1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright 2009-2021 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
31 #include <linux/suspend.h>
32 #include <linux/sync_file.h>
34 #include <drm/drm_auth.h>
35 #include <drm/drm_device.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_hashtab.h>
38 #include <drm/drm_rect.h>
40 #include <drm/ttm/ttm_bo_driver.h>
41 #include <drm/ttm/ttm_execbuf_util.h>
43 #include "ttm_object.h"
45 #include "vmwgfx_fence.h"
46 #include "vmwgfx_reg.h"
47 #include "vmwgfx_validation.h"
50 * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
51 * uapi headers should not depend on header files outside uapi/.
53 #include <drm/vmwgfx_drm.h>
56 #define VMWGFX_DRIVER_NAME "vmwgfx"
57 #define VMWGFX_DRIVER_DATE "20210722"
58 #define VMWGFX_DRIVER_MAJOR 2
59 #define VMWGFX_DRIVER_MINOR 19
60 #define VMWGFX_DRIVER_PATCHLEVEL 0
61 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
62 #define VMWGFX_MAX_RELOCATIONS 2048
63 #define VMWGFX_MAX_VALIDATIONS 2048
64 #define VMWGFX_MAX_DISPLAYS 16
65 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
66 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
68 #define VMWGFX_PCI_ID_SVGA2 0x0405
69 #define VMWGFX_PCI_ID_SVGA3 0x0406
72 * Perhaps we should have sysfs entries for these.
74 #define VMWGFX_NUM_GB_CONTEXT 256
75 #define VMWGFX_NUM_GB_SHADER 20000
76 #define VMWGFX_NUM_GB_SURFACE 32768
77 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
78 #define VMWGFX_NUM_DXCONTEXT 256
79 #define VMWGFX_NUM_DXQUERY 512
80 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
81 VMWGFX_NUM_GB_SHADER +\
82 VMWGFX_NUM_GB_SURFACE +\
83 VMWGFX_NUM_GB_SCREEN_TARGET)
85 #define VMW_PL_GMR (TTM_PL_PRIV + 0)
86 #define VMW_PL_MOB (TTM_PL_PRIV + 1)
87 #define VMW_PL_SYSTEM (TTM_PL_PRIV + 2)
89 #define VMW_RES_CONTEXT ttm_driver_type0
90 #define VMW_RES_SURFACE ttm_driver_type1
91 #define VMW_RES_STREAM ttm_driver_type2
92 #define VMW_RES_FENCE ttm_driver_type3
93 #define VMW_RES_SHADER ttm_driver_type4
95 #define MKSSTAT_CAPACITY_LOG2 5U
96 #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
99 struct ttm_object_file
*tfile
;
100 bool gb_aware
; /* user-space is guest-backed aware */
104 * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
105 * @base: The TTM buffer object
106 * @res_tree: RB tree of resources using this buffer object as a backing MOB
107 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
108 * increased. May be decreased without reservation.
109 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
110 * @map: Kmap object for semi-persistent mappings
111 * @res_prios: Eviction priority counts for attached resources
112 * @dirty: structure for user-space dirty-tracking
114 struct vmw_buffer_object
{
115 struct ttm_buffer_object base
;
116 struct rb_root res_tree
;
117 atomic_t cpu_writers
;
118 /* Not ref-counted. Protected by binding_mutex */
119 struct vmw_resource
*dx_query_ctx
;
120 /* Protected by reservation */
121 struct ttm_bo_kmap_obj map
;
122 u32 res_prios
[TTM_MAX_BO_PRIORITY
];
123 struct vmw_bo_dirty
*dirty
;
127 * struct vmw_validate_buffer - Carries validation info about buffers.
129 * @base: Validation info for TTM.
130 * @hash: Hash entry for quick lookup of the TTM buffer object.
132 * This structure contains also driver private validation info
133 * on top of the info needed by TTM.
135 struct vmw_validate_buffer
{
136 struct ttm_validate_buffer base
;
137 struct drm_hash_item hash
;
138 bool validate_as_mob
;
145 * struct vmw-resource - base class for hardware resources
147 * @kref: For refcounting.
148 * @dev_priv: Pointer to the device private for this resource. Immutable.
149 * @id: Device id. Protected by @dev_priv::resource_lock.
150 * @backup_size: Backup buffer size. Immutable.
151 * @res_dirty: Resource contains data not yet in the backup buffer. Protected
152 * by resource reserved.
153 * @backup_dirty: Backup buffer contains data not yet in the HW resource.
154 * Protected by resource reserved.
155 * @coherent: Emulate coherency by tracking vm accesses.
156 * @backup: The backup buffer if any. Protected by resource reserved.
157 * @backup_offset: Offset into the backup buffer if any. Protected by resource
158 * reserved. Note that only a few resource types can have a @backup_offset
159 * different from zero.
160 * @pin_count: The pin count for this resource. A pinned resource has a
161 * pin-count greater than zero. It is not on the resource LRU lists and its
162 * backup buffer is pinned. Hence it can't be evicted.
163 * @func: Method vtable for this resource. Immutable.
164 * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
165 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
166 * @binding_head: List head for the context binding list. Protected by
167 * the @dev_priv::binding_mutex
168 * @res_free: The resource destructor.
169 * @hw_destroy: Callback to destroy the resource on the device, as part of
170 * resource destruction.
172 struct vmw_resource_dirty
;
173 struct vmw_resource
{
175 struct vmw_private
*dev_priv
;
178 unsigned long backup_size
;
180 u32 backup_dirty
: 1;
182 struct vmw_buffer_object
*backup
;
183 unsigned long backup_offset
;
184 unsigned long pin_count
;
185 const struct vmw_res_func
*func
;
186 struct rb_node mob_node
;
187 struct list_head lru_head
;
188 struct list_head binding_head
;
189 struct vmw_resource_dirty
*dirty
;
190 void (*res_free
) (struct vmw_resource
*res
);
191 void (*hw_destroy
) (struct vmw_resource
*res
);
196 * Resources that are managed using ioctls.
206 vmw_res_streamoutput
,
211 * Resources that are managed using command streams.
213 enum vmw_cmdbuf_res_type
{
214 vmw_cmdbuf_res_shader
,
216 vmw_cmdbuf_res_streamoutput
219 struct vmw_cmdbuf_res_manager
;
221 struct vmw_cursor_snooper
{
226 struct vmw_framebuffer
;
227 struct vmw_surface_offset
;
230 * struct vmw_surface_metadata - Metadata describing a surface.
232 * @flags: Device flags.
233 * @format: Surface SVGA3D_x format.
234 * @mip_levels: Mip level for each face. For GB first index is used only.
235 * @multisample_count: Sample count.
236 * @multisample_pattern: Sample patterns.
237 * @quality_level: Quality level.
238 * @autogen_filter: Filter for automatically generated mipmaps.
239 * @array_size: Number of array elements for a 1D/2D texture. For cubemap
240 texture number of faces * array_size. This should be 0 for pre
242 * @buffer_byte_stride: Buffer byte stride.
243 * @num_sizes: Size of @sizes. For GB surface this should always be 1.
244 * @base_size: Surface dimension.
245 * @sizes: Array representing mip sizes. Legacy only.
246 * @scanout: Whether this surface will be used for scanout.
248 * This tracks metadata for both legacy and guest backed surface.
250 struct vmw_surface_metadata
{
253 u32 mip_levels
[DRM_VMW_MAX_SURFACE_FACES
];
254 u32 multisample_count
;
255 u32 multisample_pattern
;
260 u32 buffer_byte_stride
;
261 struct drm_vmw_size base_size
;
262 struct drm_vmw_size
*sizes
;
267 * struct vmw_surface: Resource structure for a surface.
269 * @res: The base resource for this surface.
270 * @metadata: Metadata for this surface resource.
271 * @snooper: Cursor data. Legacy surface only.
272 * @offsets: Legacy surface only.
273 * @view_list: List of views bound to this surface.
276 struct vmw_resource res
;
277 struct vmw_surface_metadata metadata
;
278 struct vmw_cursor_snooper snooper
;
279 struct vmw_surface_offset
*offsets
;
280 struct list_head view_list
;
283 struct vmw_fifo_state
{
284 unsigned long reserved_size
;
287 unsigned long static_buffer_size
;
288 bool using_bounce_buffer
;
289 uint32_t capabilities
;
290 struct mutex fifo_mutex
;
291 struct rw_semaphore rwsem
;
295 * struct vmw_res_cache_entry - resource information cache entry
296 * @handle: User-space handle of a resource.
297 * @res: Non-ref-counted pointer to the resource.
298 * @valid_handle: Whether the @handle member is valid.
299 * @valid: Whether the entry is valid, which also implies that the execbuf
300 * code holds a reference to the resource, and it's placed on the
303 * Used to avoid frequent repeated user-space handle lookups of the
306 struct vmw_res_cache_entry
{
308 struct vmw_resource
*res
;
310 unsigned short valid_handle
;
311 unsigned short valid
;
315 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
317 enum vmw_dma_map_mode
{
318 vmw_dma_alloc_coherent
, /* Use TTM coherent pages */
319 vmw_dma_map_populate
, /* Unmap from DMA just after unpopulate */
320 vmw_dma_map_bind
, /* Unmap from DMA just before unbind */
325 * struct vmw_sg_table - Scatter/gather table for binding, with additional
326 * device-specific information.
328 * @sgt: Pointer to a struct sg_table with binding information
329 * @num_regions: Number of regions with device-address contiguous pages
331 struct vmw_sg_table
{
332 enum vmw_dma_map_mode mode
;
334 const dma_addr_t
*addrs
;
335 struct sg_table
*sgt
;
336 unsigned long num_regions
;
337 unsigned long num_pages
;
341 * struct vmw_piter - Page iterator that iterates over a list of pages
342 * and DMA addresses that could be either a scatter-gather list or
345 * @pages: Array of page pointers to the pages.
346 * @addrs: DMA addresses to the pages if coherent pages are used.
347 * @iter: Scatter-gather page iterator. Current position in SG list.
348 * @i: Current position in arrays.
349 * @num_pages: Number of pages total.
350 * @next: Function to advance the iterator. Returns false if past the list
351 * of pages, true otherwise.
352 * @dma_address: Function to return the DMA address of the current page.
356 const dma_addr_t
*addrs
;
357 struct sg_dma_page_iter iter
;
359 unsigned long num_pages
;
360 bool (*next
)(struct vmw_piter
*);
361 dma_addr_t (*dma_address
)(struct vmw_piter
*);
365 * enum vmw_display_unit_type - Describes the display unit
367 enum vmw_display_unit_type
{
370 vmw_du_screen_object
,
371 vmw_du_screen_target
,
375 struct vmw_validation_context
;
376 struct vmw_ctx_validation_info
;
379 * struct vmw_sw_context - Command submission context
380 * @res_ht: Pointer hash table used to find validation duplicates
381 * @kernel: Whether the command buffer originates from kernel code rather
382 * than from user-space
383 * @fp: If @kernel is false, points to the file of the client. Otherwise
385 * @cmd_bounce: Command bounce buffer used for command validation before
386 * copying to fifo space
387 * @cmd_bounce_size: Current command bounce buffer size
388 * @cur_query_bo: Current buffer object used as query result buffer
389 * @bo_relocations: List of buffer object relocations
390 * @res_relocations: List of resource relocations
391 * @buf_start: Pointer to start of memory where command validation takes
393 * @res_cache: Cache of recently looked up resources
394 * @last_query_ctx: Last context that submitted a query
395 * @needs_post_query_barrier: Whether a query barrier is needed after
397 * @staged_bindings: Cached per-context binding tracker
398 * @staged_bindings_inuse: Whether the cached per-context binding tracker
400 * @staged_cmd_res: List of staged command buffer managed resources in this
402 * @ctx_list: List of context resources referenced in this command buffer
403 * @dx_ctx_node: Validation metadata of the current DX context
404 * @dx_query_mob: The MOB used for DX queries
405 * @dx_query_ctx: The DX context used for the last DX query
406 * @man: Pointer to the command buffer managed resource manager
407 * @ctx: The validation context
409 struct vmw_sw_context
{
410 struct drm_open_hash res_ht
;
411 bool res_ht_initialized
;
413 struct vmw_fpriv
*fp
;
414 uint32_t *cmd_bounce
;
415 uint32_t cmd_bounce_size
;
416 struct vmw_buffer_object
*cur_query_bo
;
417 struct list_head bo_relocations
;
418 struct list_head res_relocations
;
420 struct vmw_res_cache_entry res_cache
[vmw_res_max
];
421 struct vmw_resource
*last_query_ctx
;
422 bool needs_post_query_barrier
;
423 struct vmw_ctx_binding_state
*staged_bindings
;
424 bool staged_bindings_inuse
;
425 struct list_head staged_cmd_res
;
426 struct list_head ctx_list
;
427 struct vmw_ctx_validation_info
*dx_ctx_node
;
428 struct vmw_buffer_object
*dx_query_mob
;
429 struct vmw_resource
*dx_query_ctx
;
430 struct vmw_cmdbuf_res_manager
*man
;
431 struct vmw_validation_context
*ctx
;
434 struct vmw_legacy_display
;
437 struct vmw_vga_topology_state
{
447 * struct vmw_otable - Guest Memory OBject table metadata
449 * @size: Size of the table (page-aligned).
450 * @page_table: Pointer to a struct vmw_mob holding the page table.
454 struct vmw_mob
*page_table
;
458 struct vmw_otable_batch
{
459 unsigned num_otables
;
460 struct vmw_otable
*otables
;
461 struct vmw_resource
*context
;
462 struct ttm_buffer_object
*otable_bo
;
467 VMW_IRQTHREAD_CMDBUF
,
472 * enum vmw_sm_type - Graphics context capability supported by device.
473 * @VMW_SM_LEGACY: Pre DX context.
474 * @VMW_SM_4: Context support upto SM4.
475 * @VMW_SM_4_1: Context support upto SM4_1.
476 * @VMW_SM_5: Context support up to SM5.
477 * @VMW_SM_MAX: Should be the last.
488 struct drm_device drm
;
489 struct ttm_device bdev
;
491 struct drm_vma_offset_manager vma_manager
;
493 resource_size_t io_start
;
494 resource_size_t vram_start
;
495 resource_size_t vram_size
;
496 resource_size_t max_primary_mem
;
499 resource_size_t fifo_mem_size
;
500 uint32_t fb_max_width
;
501 uint32_t fb_max_height
;
502 uint32_t texture_max_width
;
503 uint32_t texture_max_height
;
504 uint32_t stdu_max_width
;
505 uint32_t stdu_max_height
;
506 uint32_t initial_width
;
507 uint32_t initial_height
;
508 uint32_t capabilities
;
509 uint32_t capabilities2
;
510 uint32_t max_gmr_ids
;
511 uint32_t max_gmr_pages
;
512 uint32_t max_mob_pages
;
513 uint32_t max_mob_size
;
514 uint32_t memory_size
;
520 enum vmw_sm_type sm_type
;
527 enum vmw_display_unit_type active_display_unit
;
528 struct vmw_legacy_display
*ldu_priv
;
529 struct vmw_overlay
*overlay_priv
;
530 struct drm_property
*hotplug_mode_update_property
;
531 struct drm_property
*implicit_placement_property
;
532 spinlock_t cursor_lock
;
533 struct drm_atomic_state
*suspend_state
;
536 * Context and surface management.
539 spinlock_t resource_lock
;
540 struct idr res_idr
[vmw_res_max
];
543 * A resource manager for kernel-only surfaces and
547 struct ttm_object_device
*tdev
;
554 wait_queue_head_t fence_queue
;
555 wait_queue_head_t fifo_queue
;
556 spinlock_t waiter_lock
;
557 int fence_queue_waiters
; /* Protected by waiter_lock */
558 int goal_queue_waiters
; /* Protected by waiter_lock */
559 int cmdbuf_waiters
; /* Protected by waiter_lock */
560 int error_waiters
; /* Protected by waiter_lock */
561 int fifo_queue_waiters
; /* Protected by waiter_lock */
562 uint32_t last_read_seqno
;
563 struct vmw_fence_manager
*fman
;
564 uint32_t irq_mask
; /* Updates protected by waiter_lock */
570 uint32_t traces_state
;
571 uint32_t enable_state
;
572 uint32_t config_done_state
;
578 * Protected by the cmdbuf mutex.
581 struct vmw_sw_context ctx
;
582 struct mutex cmdbuf_mutex
;
583 struct mutex binding_mutex
;
590 struct notifier_block pm_nb
;
591 bool refuse_hibernation
;
594 atomic_t num_fifo_resources
;
597 * Query processing. These members
598 * are protected by the cmdbuf mutex.
601 struct vmw_buffer_object
*dummy_query_bo
;
602 struct vmw_buffer_object
*pinned_bo
;
604 uint32_t query_cid_valid
;
605 bool dummy_query_bo_pinned
;
608 * Surface swapping. The "surface_lru" list is protected by the
609 * resource lock in order to be able to destroy a surface and take
610 * it off the lru atomically. "used_memory_size" is currently
611 * protected by the cmdbuf mutex for simplicity.
614 struct list_head res_lru
[vmw_res_max
];
615 uint32_t used_memory_size
;
620 enum vmw_dma_map_mode map_mode
;
625 struct vmw_otable_batch otable_batch
;
627 struct vmw_fifo_state
*fifo
;
628 struct vmw_cmdbuf_man
*cman
;
629 DECLARE_BITMAP(irqthread_pending
, VMW_IRQTHREAD_MAX
);
631 /* Validation memory reservation */
632 struct vmw_validation_mem vvm
;
637 * mksGuestStat instance-descriptor and pid arrays
639 struct page
*mksstat_user_pages
[MKSSTAT_CAPACITY
];
640 atomic_t mksstat_user_pids
[MKSSTAT_CAPACITY
];
642 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
643 struct page
*mksstat_kern_pages
[MKSSTAT_CAPACITY
];
644 u8 mksstat_kern_top_timer
[MKSSTAT_CAPACITY
];
645 atomic_t mksstat_kern_pids
[MKSSTAT_CAPACITY
];
649 static inline struct vmw_surface
*vmw_res_to_srf(struct vmw_resource
*res
)
651 return container_of(res
, struct vmw_surface
, res
);
654 static inline struct vmw_private
*vmw_priv(struct drm_device
*dev
)
656 return (struct vmw_private
*)dev
->dev_private
;
659 static inline struct vmw_fpriv
*vmw_fpriv(struct drm_file
*file_priv
)
661 return (struct vmw_fpriv
*)file_priv
->driver_priv
;
665 * SVGA v3 has mmio register access and lacks fifo cmds
667 static inline bool vmw_is_svga_v3(const struct vmw_private
*dev
)
669 return dev
->pci_id
== VMWGFX_PCI_ID_SVGA3
;
673 * The locking here is fine-grained, so that it is performed once
674 * for every read- and write operation. This is of course costly, but we
675 * don't perform much register access in the timing critical paths anyway.
676 * Instead we have the extra benefit of being sure that we don't forget
677 * the hw lock around register accesses.
679 static inline void vmw_write(struct vmw_private
*dev_priv
,
680 unsigned int offset
, uint32_t value
)
682 if (vmw_is_svga_v3(dev_priv
)) {
683 iowrite32(value
, dev_priv
->rmmio
+ offset
);
685 spin_lock(&dev_priv
->hw_lock
);
686 outl(offset
, dev_priv
->io_start
+ SVGA_INDEX_PORT
);
687 outl(value
, dev_priv
->io_start
+ SVGA_VALUE_PORT
);
688 spin_unlock(&dev_priv
->hw_lock
);
692 static inline uint32_t vmw_read(struct vmw_private
*dev_priv
,
697 if (vmw_is_svga_v3(dev_priv
)) {
698 val
= ioread32(dev_priv
->rmmio
+ offset
);
700 spin_lock(&dev_priv
->hw_lock
);
701 outl(offset
, dev_priv
->io_start
+ SVGA_INDEX_PORT
);
702 val
= inl(dev_priv
->io_start
+ SVGA_VALUE_PORT
);
703 spin_unlock(&dev_priv
->hw_lock
);
710 * has_sm4_context - Does the device support SM4 context.
711 * @dev_priv: Device private.
713 * Return: Bool value if device support SM4 context or not.
715 static inline bool has_sm4_context(const struct vmw_private
*dev_priv
)
717 return (dev_priv
->sm_type
>= VMW_SM_4
);
721 * has_sm4_1_context - Does the device support SM4_1 context.
722 * @dev_priv: Device private.
724 * Return: Bool value if device support SM4_1 context or not.
726 static inline bool has_sm4_1_context(const struct vmw_private
*dev_priv
)
728 return (dev_priv
->sm_type
>= VMW_SM_4_1
);
732 * has_sm5_context - Does the device support SM5 context.
733 * @dev_priv: Device private.
735 * Return: Bool value if device support SM5 context or not.
737 static inline bool has_sm5_context(const struct vmw_private
*dev_priv
)
739 return (dev_priv
->sm_type
>= VMW_SM_5
);
742 extern void vmw_svga_enable(struct vmw_private
*dev_priv
);
743 extern void vmw_svga_disable(struct vmw_private
*dev_priv
);
747 * GMR utilities - vmwgfx_gmr.c
750 extern int vmw_gmr_bind(struct vmw_private
*dev_priv
,
751 const struct vmw_sg_table
*vsgt
,
752 unsigned long num_pages
,
754 extern void vmw_gmr_unbind(struct vmw_private
*dev_priv
, int gmr_id
);
757 * Resource utilities - vmwgfx_resource.c
759 struct vmw_user_resource_conv
;
761 extern void vmw_resource_unreference(struct vmw_resource
**p_res
);
762 extern struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
);
763 extern struct vmw_resource
*
764 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
);
765 extern int vmw_resource_validate(struct vmw_resource
*res
, bool intr
,
767 extern int vmw_resource_reserve(struct vmw_resource
*res
, bool interruptible
,
769 extern bool vmw_resource_needs_backup(const struct vmw_resource
*res
);
770 extern int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
771 struct ttm_object_file
*tfile
,
773 struct vmw_surface
**out_surf
,
774 struct vmw_buffer_object
**out_buf
);
775 extern int vmw_user_resource_lookup_handle(
776 struct vmw_private
*dev_priv
,
777 struct ttm_object_file
*tfile
,
779 const struct vmw_user_resource_conv
*converter
,
780 struct vmw_resource
**p_res
);
781 extern struct vmw_resource
*
782 vmw_user_resource_noref_lookup_handle(struct vmw_private
*dev_priv
,
783 struct ttm_object_file
*tfile
,
785 const struct vmw_user_resource_conv
*
787 extern int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
788 struct drm_file
*file_priv
);
789 extern int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
790 struct drm_file
*file_priv
);
791 extern int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
792 struct ttm_object_file
*tfile
,
794 struct vmw_resource
**out
);
795 extern void vmw_resource_unreserve(struct vmw_resource
*res
,
799 struct vmw_buffer_object
*new_backup
,
800 unsigned long new_backup_offset
);
801 extern void vmw_query_move_notify(struct ttm_buffer_object
*bo
,
802 struct ttm_resource
*old_mem
,
803 struct ttm_resource
*new_mem
);
804 extern int vmw_query_readback_all(struct vmw_buffer_object
*dx_query_mob
);
805 extern void vmw_resource_evict_all(struct vmw_private
*dev_priv
);
806 extern void vmw_resource_unbind_list(struct vmw_buffer_object
*vbo
);
807 void vmw_resource_mob_attach(struct vmw_resource
*res
);
808 void vmw_resource_mob_detach(struct vmw_resource
*res
);
809 void vmw_resource_dirty_update(struct vmw_resource
*res
, pgoff_t start
,
811 int vmw_resources_clean(struct vmw_buffer_object
*vbo
, pgoff_t start
,
812 pgoff_t end
, pgoff_t
*num_prefault
);
815 * vmw_resource_mob_attached - Whether a resource currently has a mob attached
818 * Return: true if the resource has a mob attached, false otherwise.
820 static inline bool vmw_resource_mob_attached(const struct vmw_resource
*res
)
822 return !RB_EMPTY_NODE(&res
->mob_node
);
826 * vmw_user_resource_noref_release - release a user resource pointer looked up
829 static inline void vmw_user_resource_noref_release(void)
831 ttm_base_object_noref_release();
835 * Buffer object helper functions - vmwgfx_bo.c
837 extern int vmw_bo_pin_in_placement(struct vmw_private
*vmw_priv
,
838 struct vmw_buffer_object
*bo
,
839 struct ttm_placement
*placement
,
841 extern int vmw_bo_pin_in_vram(struct vmw_private
*dev_priv
,
842 struct vmw_buffer_object
*buf
,
844 extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private
*dev_priv
,
845 struct vmw_buffer_object
*buf
,
847 extern int vmw_bo_pin_in_start_of_vram(struct vmw_private
*vmw_priv
,
848 struct vmw_buffer_object
*bo
,
850 extern int vmw_bo_unpin(struct vmw_private
*vmw_priv
,
851 struct vmw_buffer_object
*bo
,
853 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object
*buf
,
855 extern void vmw_bo_pin_reserved(struct vmw_buffer_object
*bo
, bool pin
);
856 extern void vmw_bo_bo_free(struct ttm_buffer_object
*bo
);
857 extern int vmw_bo_create_kernel(struct vmw_private
*dev_priv
,
859 struct ttm_placement
*placement
,
860 struct ttm_buffer_object
**p_bo
);
861 extern int vmw_bo_init(struct vmw_private
*dev_priv
,
862 struct vmw_buffer_object
*vmw_bo
,
863 size_t size
, struct ttm_placement
*placement
,
864 bool interruptible
, bool pin
,
865 void (*bo_free
)(struct ttm_buffer_object
*bo
));
866 extern int vmw_user_bo_verify_access(struct ttm_buffer_object
*bo
,
867 struct ttm_object_file
*tfile
);
868 extern int vmw_user_bo_alloc(struct vmw_private
*dev_priv
,
869 struct ttm_object_file
*tfile
,
873 struct vmw_buffer_object
**p_dma_buf
,
874 struct ttm_base_object
**p_base
);
875 extern int vmw_user_bo_reference(struct ttm_object_file
*tfile
,
876 struct vmw_buffer_object
*dma_buf
,
878 extern int vmw_bo_alloc_ioctl(struct drm_device
*dev
, void *data
,
879 struct drm_file
*file_priv
);
880 extern int vmw_bo_unref_ioctl(struct drm_device
*dev
, void *data
,
881 struct drm_file
*file_priv
);
882 extern int vmw_user_bo_synccpu_ioctl(struct drm_device
*dev
, void *data
,
883 struct drm_file
*file_priv
);
884 extern int vmw_user_bo_lookup(struct ttm_object_file
*tfile
,
885 uint32_t id
, struct vmw_buffer_object
**out
,
886 struct ttm_base_object
**base
);
887 extern void vmw_bo_fence_single(struct ttm_buffer_object
*bo
,
888 struct vmw_fence_obj
*fence
);
889 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object
*vbo
);
890 extern void vmw_bo_unmap(struct vmw_buffer_object
*vbo
);
891 extern void vmw_bo_move_notify(struct ttm_buffer_object
*bo
,
892 struct ttm_resource
*mem
);
893 extern void vmw_bo_swap_notify(struct ttm_buffer_object
*bo
);
894 extern struct vmw_buffer_object
*
895 vmw_user_bo_noref_lookup(struct ttm_object_file
*tfile
, u32 handle
);
898 * vmw_user_bo_noref_release - release a buffer object pointer looked up
901 static inline void vmw_user_bo_noref_release(void)
903 ttm_base_object_noref_release();
907 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
908 * according to attached resources
909 * @vbo: The struct vmw_buffer_object
911 static inline void vmw_bo_prio_adjust(struct vmw_buffer_object
*vbo
)
913 int i
= ARRAY_SIZE(vbo
->res_prios
);
916 if (vbo
->res_prios
[i
]) {
917 vbo
->base
.priority
= i
;
922 vbo
->base
.priority
= 3;
926 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
928 * @vbo: The struct vmw_buffer_object
929 * @prio: The resource priority
931 * After being notified, the code assigns the highest resource eviction priority
932 * to the backing buffer object (mob).
934 static inline void vmw_bo_prio_add(struct vmw_buffer_object
*vbo
, int prio
)
936 if (vbo
->res_prios
[prio
]++ == 0)
937 vmw_bo_prio_adjust(vbo
);
941 * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
942 * priority being removed
943 * @vbo: The struct vmw_buffer_object
944 * @prio: The resource priority
946 * After being notified, the code assigns the highest resource eviction priority
947 * to the backing buffer object (mob).
949 static inline void vmw_bo_prio_del(struct vmw_buffer_object
*vbo
, int prio
)
951 if (--vbo
->res_prios
[prio
] == 0)
952 vmw_bo_prio_adjust(vbo
);
956 * Misc Ioctl functionality - vmwgfx_ioctl.c
959 extern int vmw_getparam_ioctl(struct drm_device
*dev
, void *data
,
960 struct drm_file
*file_priv
);
961 extern int vmw_get_cap_3d_ioctl(struct drm_device
*dev
, void *data
,
962 struct drm_file
*file_priv
);
963 extern int vmw_present_ioctl(struct drm_device
*dev
, void *data
,
964 struct drm_file
*file_priv
);
965 extern int vmw_present_readback_ioctl(struct drm_device
*dev
, void *data
,
966 struct drm_file
*file_priv
);
969 * Fifo utilities - vmwgfx_fifo.c
972 extern struct vmw_fifo_state
*vmw_fifo_create(struct vmw_private
*dev_priv
);
973 extern void vmw_fifo_destroy(struct vmw_private
*dev_priv
);
974 extern bool vmw_cmd_supported(struct vmw_private
*vmw
);
976 vmw_cmd_ctx_reserve(struct vmw_private
*dev_priv
, uint32_t bytes
, int ctx_id
);
977 extern void vmw_cmd_commit(struct vmw_private
*dev_priv
, uint32_t bytes
);
978 extern void vmw_cmd_commit_flush(struct vmw_private
*dev_priv
, uint32_t bytes
);
979 extern int vmw_cmd_send_fence(struct vmw_private
*dev_priv
, uint32_t *seqno
);
980 extern bool vmw_supports_3d(struct vmw_private
*dev_priv
);
981 extern void vmw_fifo_ping_host(struct vmw_private
*dev_priv
, uint32_t reason
);
982 extern bool vmw_fifo_have_pitchlock(struct vmw_private
*dev_priv
);
983 extern int vmw_cmd_emit_dummy_query(struct vmw_private
*dev_priv
,
985 extern int vmw_cmd_flush(struct vmw_private
*dev_priv
,
988 #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \
990 vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \
991 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
992 __func__, (unsigned int) __bytes); \
997 #define VMW_CMD_RESERVE(__priv, __bytes) \
998 VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
1002 * vmw_fifo_caps - Returns the capabilities of the FIFO command
1003 * queue or 0 if fifo memory isn't present.
1004 * @dev_priv: The device private context
1006 static inline uint32_t vmw_fifo_caps(const struct vmw_private
*dev_priv
)
1008 if (!dev_priv
->fifo_mem
|| !dev_priv
->fifo
)
1010 return dev_priv
->fifo
->capabilities
;
1015 * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
1016 * is enabled in the FIFO.
1017 * @dev_priv: The device private context
1020 vmw_is_cursor_bypass3_enabled(const struct vmw_private
*dev_priv
)
1022 return (vmw_fifo_caps(dev_priv
) & SVGA_FIFO_CAP_CURSOR_BYPASS_3
) != 0;
1026 * TTM glue - vmwgfx_ttm_glue.c
1029 extern int vmw_mmap(struct file
*filp
, struct vm_area_struct
*vma
);
1031 extern void vmw_validation_mem_init_ttm(struct vmw_private
*dev_priv
,
1035 * TTM buffer object driver - vmwgfx_ttm_buffer.c
1038 extern const size_t vmw_tt_size
;
1039 extern struct ttm_placement vmw_vram_placement
;
1040 extern struct ttm_placement vmw_vram_sys_placement
;
1041 extern struct ttm_placement vmw_vram_gmr_placement
;
1042 extern struct ttm_placement vmw_sys_placement
;
1043 extern struct ttm_placement vmw_srf_placement
;
1044 extern struct ttm_placement vmw_mob_placement
;
1045 extern struct ttm_placement vmw_nonfixed_placement
;
1046 extern struct ttm_device_funcs vmw_bo_driver
;
1047 extern const struct vmw_sg_table
*
1048 vmw_bo_sg_table(struct ttm_buffer_object
*bo
);
1049 extern int vmw_bo_create_and_populate(struct vmw_private
*dev_priv
,
1050 unsigned long bo_size
,
1051 struct ttm_buffer_object
**bo_p
);
1053 extern void vmw_piter_start(struct vmw_piter
*viter
,
1054 const struct vmw_sg_table
*vsgt
,
1055 unsigned long p_offs
);
1058 * vmw_piter_next - Advance the iterator one page.
1060 * @viter: Pointer to the iterator to advance.
1062 * Returns false if past the list of pages, true otherwise.
1064 static inline bool vmw_piter_next(struct vmw_piter
*viter
)
1066 return viter
->next(viter
);
1070 * vmw_piter_dma_addr - Return the DMA address of the current page.
1072 * @viter: Pointer to the iterator
1074 * Returns the DMA address of the page pointed to by @viter.
1076 static inline dma_addr_t
vmw_piter_dma_addr(struct vmw_piter
*viter
)
1078 return viter
->dma_address(viter
);
1082 * vmw_piter_page - Return a pointer to the current page.
1084 * @viter: Pointer to the iterator
1086 * Returns the DMA address of the page pointed to by @viter.
1088 static inline struct page
*vmw_piter_page(struct vmw_piter
*viter
)
1090 return viter
->pages
[viter
->i
];
1094 * Command submission - vmwgfx_execbuf.c
1097 extern int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
1098 struct drm_file
*file_priv
);
1099 extern int vmw_execbuf_process(struct drm_file
*file_priv
,
1100 struct vmw_private
*dev_priv
,
1101 void __user
*user_commands
,
1102 void *kernel_commands
,
1103 uint32_t command_size
,
1104 uint64_t throttle_us
,
1105 uint32_t dx_context_handle
,
1106 struct drm_vmw_fence_rep __user
1108 struct vmw_fence_obj
**out_fence
,
1110 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
1111 struct vmw_fence_obj
*fence
);
1112 extern void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
);
1114 extern int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
1115 struct vmw_private
*dev_priv
,
1116 struct vmw_fence_obj
**p_fence
,
1117 uint32_t *p_handle
);
1118 extern void vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
1119 struct vmw_fpriv
*vmw_fp
,
1121 struct drm_vmw_fence_rep __user
1123 struct vmw_fence_obj
*fence
,
1124 uint32_t fence_handle
,
1125 int32_t out_fence_fd
,
1126 struct sync_file
*sync_file
);
1127 bool vmw_cmd_describe(const void *buf
, u32
*size
, char const **cmd
);
1130 * IRQs and wating - vmwgfx_irq.c
1133 extern int vmw_irq_install(struct drm_device
*dev
, int irq
);
1134 extern void vmw_irq_uninstall(struct drm_device
*dev
);
1135 extern bool vmw_seqno_passed(struct vmw_private
*dev_priv
,
1137 extern int vmw_fallback_wait(struct vmw_private
*dev_priv
,
1142 unsigned long timeout
);
1143 extern void vmw_update_seqno(struct vmw_private
*dev_priv
);
1144 extern void vmw_seqno_waiter_add(struct vmw_private
*dev_priv
);
1145 extern void vmw_seqno_waiter_remove(struct vmw_private
*dev_priv
);
1146 extern void vmw_goal_waiter_add(struct vmw_private
*dev_priv
);
1147 extern void vmw_goal_waiter_remove(struct vmw_private
*dev_priv
);
1148 extern void vmw_generic_waiter_add(struct vmw_private
*dev_priv
, u32 flag
,
1150 extern void vmw_generic_waiter_remove(struct vmw_private
*dev_priv
,
1151 u32 flag
, int *waiter_count
);
1155 * Kernel framebuffer - vmwgfx_fb.c
1158 #ifdef CONFIG_DRM_FBDEV_EMULATION
1159 int vmw_fb_init(struct vmw_private
*vmw_priv
);
1160 int vmw_fb_close(struct vmw_private
*dev_priv
);
1161 int vmw_fb_off(struct vmw_private
*vmw_priv
);
1162 int vmw_fb_on(struct vmw_private
*vmw_priv
);
1164 static inline int vmw_fb_init(struct vmw_private
*vmw_priv
)
1168 static inline int vmw_fb_close(struct vmw_private
*dev_priv
)
1172 static inline int vmw_fb_off(struct vmw_private
*vmw_priv
)
1176 static inline int vmw_fb_on(struct vmw_private
*vmw_priv
)
1183 * Kernel modesetting - vmwgfx_kms.c
1186 int vmw_kms_init(struct vmw_private
*dev_priv
);
1187 int vmw_kms_close(struct vmw_private
*dev_priv
);
1188 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
1189 struct drm_file
*file_priv
);
1190 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
);
1191 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
1192 struct ttm_object_file
*tfile
,
1193 struct ttm_buffer_object
*bo
,
1194 SVGA3dCmdHeader
*header
);
1195 int vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
1196 unsigned width
, unsigned height
, unsigned pitch
,
1197 unsigned bpp
, unsigned depth
);
1198 bool vmw_kms_validate_mode_vram(struct vmw_private
*dev_priv
,
1201 u32
vmw_get_vblank_counter(struct drm_crtc
*crtc
);
1202 int vmw_enable_vblank(struct drm_crtc
*crtc
);
1203 void vmw_disable_vblank(struct drm_crtc
*crtc
);
1204 int vmw_kms_present(struct vmw_private
*dev_priv
,
1205 struct drm_file
*file_priv
,
1206 struct vmw_framebuffer
*vfb
,
1207 struct vmw_surface
*surface
,
1208 uint32_t sid
, int32_t destX
, int32_t destY
,
1209 struct drm_vmw_rect
*clips
,
1210 uint32_t num_clips
);
1211 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
1212 struct drm_file
*file_priv
);
1213 void vmw_kms_legacy_hotspot_clear(struct vmw_private
*dev_priv
);
1214 int vmw_kms_suspend(struct drm_device
*dev
);
1215 int vmw_kms_resume(struct drm_device
*dev
);
1216 void vmw_kms_lost_device(struct drm_device
*dev
);
1218 int vmw_dumb_create(struct drm_file
*file_priv
,
1219 struct drm_device
*dev
,
1220 struct drm_mode_create_dumb
*args
);
1222 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
1223 struct drm_device
*dev
, uint32_t handle
,
1225 int vmw_dumb_destroy(struct drm_file
*file_priv
,
1226 struct drm_device
*dev
,
1228 extern int vmw_resource_pin(struct vmw_resource
*res
, bool interruptible
);
1229 extern void vmw_resource_unpin(struct vmw_resource
*res
);
1230 extern enum vmw_res_type
vmw_res_type(const struct vmw_resource
*res
);
1233 * Overlay control - vmwgfx_overlay.c
1236 int vmw_overlay_init(struct vmw_private
*dev_priv
);
1237 int vmw_overlay_close(struct vmw_private
*dev_priv
);
1238 int vmw_overlay_ioctl(struct drm_device
*dev
, void *data
,
1239 struct drm_file
*file_priv
);
1240 int vmw_overlay_resume_all(struct vmw_private
*dev_priv
);
1241 int vmw_overlay_pause_all(struct vmw_private
*dev_priv
);
1242 int vmw_overlay_claim(struct vmw_private
*dev_priv
, uint32_t *out
);
1243 int vmw_overlay_unref(struct vmw_private
*dev_priv
, uint32_t stream_id
);
1244 int vmw_overlay_num_overlays(struct vmw_private
*dev_priv
);
1245 int vmw_overlay_num_free_overlays(struct vmw_private
*dev_priv
);
1251 int vmw_gmrid_man_init(struct vmw_private
*dev_priv
, int type
);
1252 void vmw_gmrid_man_fini(struct vmw_private
*dev_priv
, int type
);
1255 * System memory manager
1257 int vmw_sys_man_init(struct vmw_private
*dev_priv
);
1258 void vmw_sys_man_fini(struct vmw_private
*dev_priv
);
1261 * Prime - vmwgfx_prime.c
1264 extern const struct dma_buf_ops vmw_prime_dmabuf_ops
;
1265 extern int vmw_prime_fd_to_handle(struct drm_device
*dev
,
1266 struct drm_file
*file_priv
,
1267 int fd
, u32
*handle
);
1268 extern int vmw_prime_handle_to_fd(struct drm_device
*dev
,
1269 struct drm_file
*file_priv
,
1270 uint32_t handle
, uint32_t flags
,
1274 * MemoryOBject management - vmwgfx_mob.c
1277 extern int vmw_mob_bind(struct vmw_private
*dev_priv
, struct vmw_mob
*mob
,
1278 const struct vmw_sg_table
*vsgt
,
1279 unsigned long num_data_pages
, int32_t mob_id
);
1280 extern void vmw_mob_unbind(struct vmw_private
*dev_priv
,
1281 struct vmw_mob
*mob
);
1282 extern void vmw_mob_destroy(struct vmw_mob
*mob
);
1283 extern struct vmw_mob
*vmw_mob_create(unsigned long data_pages
);
1284 extern int vmw_otables_setup(struct vmw_private
*dev_priv
);
1285 extern void vmw_otables_takedown(struct vmw_private
*dev_priv
);
1288 * Context management - vmwgfx_context.c
1291 extern const struct vmw_user_resource_conv
*user_context_converter
;
1293 extern int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
1294 struct drm_file
*file_priv
);
1295 extern int vmw_extended_context_define_ioctl(struct drm_device
*dev
, void *data
,
1296 struct drm_file
*file_priv
);
1297 extern int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
1298 struct drm_file
*file_priv
);
1299 extern struct list_head
*vmw_context_binding_list(struct vmw_resource
*ctx
);
1300 extern struct vmw_cmdbuf_res_manager
*
1301 vmw_context_res_man(struct vmw_resource
*ctx
);
1302 extern struct vmw_resource
*vmw_context_cotable(struct vmw_resource
*ctx
,
1303 SVGACOTableType cotable_type
);
1304 struct vmw_ctx_binding_state
;
1305 extern struct vmw_ctx_binding_state
*
1306 vmw_context_binding_state(struct vmw_resource
*ctx
);
1307 extern void vmw_dx_context_scrub_cotables(struct vmw_resource
*ctx
,
1309 extern int vmw_context_bind_dx_query(struct vmw_resource
*ctx_res
,
1310 struct vmw_buffer_object
*mob
);
1311 extern struct vmw_buffer_object
*
1312 vmw_context_get_dx_query_mob(struct vmw_resource
*ctx_res
);
1316 * Surface management - vmwgfx_surface.c
1319 extern const struct vmw_user_resource_conv
*user_surface_converter
;
1321 extern int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
1322 struct drm_file
*file_priv
);
1323 extern int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1324 struct drm_file
*file_priv
);
1325 extern int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1326 struct drm_file
*file_priv
);
1327 extern int vmw_gb_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1328 struct drm_file
*file_priv
);
1329 extern int vmw_gb_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1330 struct drm_file
*file_priv
);
1331 int vmw_surface_gb_priv_define(struct drm_device
*dev
,
1332 uint32_t user_accounting_size
,
1333 SVGA3dSurfaceAllFlags svga3d_flags
,
1334 SVGA3dSurfaceFormat format
,
1336 uint32_t num_mip_levels
,
1337 uint32_t multisample_count
,
1338 uint32_t array_size
,
1339 struct drm_vmw_size size
,
1340 SVGA3dMSPattern multisample_pattern
,
1341 SVGA3dMSQualityLevel quality_level
,
1342 struct vmw_surface
**srf_out
);
1343 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device
*dev
,
1345 struct drm_file
*file_priv
);
1346 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device
*dev
,
1348 struct drm_file
*file_priv
);
1350 int vmw_gb_surface_define(struct vmw_private
*dev_priv
,
1351 uint32_t user_accounting_size
,
1352 const struct vmw_surface_metadata
*req
,
1353 struct vmw_surface
**srf_out
);
1356 * Shader management - vmwgfx_shader.c
1359 extern const struct vmw_user_resource_conv
*user_shader_converter
;
1361 extern int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
1362 struct drm_file
*file_priv
);
1363 extern int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
1364 struct drm_file
*file_priv
);
1365 extern int vmw_compat_shader_add(struct vmw_private
*dev_priv
,
1366 struct vmw_cmdbuf_res_manager
*man
,
1367 u32 user_key
, const void *bytecode
,
1368 SVGA3dShaderType shader_type
,
1370 struct list_head
*list
);
1371 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager
*man
,
1372 u32 user_key
, SVGA3dShaderType shader_type
,
1373 struct list_head
*list
);
1374 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager
*man
,
1375 struct vmw_resource
*ctx
,
1377 SVGA3dShaderType shader_type
,
1378 struct list_head
*list
);
1379 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private
*dev_priv
,
1380 struct list_head
*list
,
1383 extern struct vmw_resource
*
1384 vmw_shader_lookup(struct vmw_cmdbuf_res_manager
*man
,
1385 u32 user_key
, SVGA3dShaderType shader_type
);
1388 * Streamoutput management
1390 struct vmw_resource
*
1391 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager
*man
,
1393 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager
*man
,
1394 struct vmw_resource
*ctx
,
1395 SVGA3dStreamOutputId user_key
,
1396 struct list_head
*list
);
1397 void vmw_dx_streamoutput_set_size(struct vmw_resource
*res
, u32 size
);
1398 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager
*man
,
1399 SVGA3dStreamOutputId user_key
,
1400 struct list_head
*list
);
1401 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private
*dev_priv
,
1402 struct list_head
*list
,
1406 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1409 extern struct vmw_cmdbuf_res_manager
*
1410 vmw_cmdbuf_res_man_create(struct vmw_private
*dev_priv
);
1411 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager
*man
);
1412 extern size_t vmw_cmdbuf_res_man_size(void);
1413 extern struct vmw_resource
*
1414 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager
*man
,
1415 enum vmw_cmdbuf_res_type res_type
,
1417 extern void vmw_cmdbuf_res_revert(struct list_head
*list
);
1418 extern void vmw_cmdbuf_res_commit(struct list_head
*list
);
1419 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager
*man
,
1420 enum vmw_cmdbuf_res_type res_type
,
1422 struct vmw_resource
*res
,
1423 struct list_head
*list
);
1424 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager
*man
,
1425 enum vmw_cmdbuf_res_type res_type
,
1427 struct list_head
*list
,
1428 struct vmw_resource
**res
);
1431 * COTable management - vmwgfx_cotable.c
1433 extern const SVGACOTableType vmw_cotable_scrub_order
[];
1434 extern struct vmw_resource
*vmw_cotable_alloc(struct vmw_private
*dev_priv
,
1435 struct vmw_resource
*ctx
,
1437 extern int vmw_cotable_notify(struct vmw_resource
*res
, int id
);
1438 extern int vmw_cotable_scrub(struct vmw_resource
*res
, bool readback
);
1439 extern void vmw_cotable_add_resource(struct vmw_resource
*ctx
,
1440 struct list_head
*head
);
1443 * Command buffer managerment vmwgfx_cmdbuf.c
1445 struct vmw_cmdbuf_man
;
1446 struct vmw_cmdbuf_header
;
1448 extern struct vmw_cmdbuf_man
*
1449 vmw_cmdbuf_man_create(struct vmw_private
*dev_priv
);
1450 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man
*man
, size_t size
);
1451 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man
*man
);
1452 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man
*man
);
1453 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man
*man
, bool interruptible
,
1454 unsigned long timeout
);
1455 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man
*man
, size_t size
,
1456 int ctx_id
, bool interruptible
,
1457 struct vmw_cmdbuf_header
*header
);
1458 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man
*man
, size_t size
,
1459 struct vmw_cmdbuf_header
*header
,
1461 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man
*man
,
1462 size_t size
, bool interruptible
,
1463 struct vmw_cmdbuf_header
**p_header
);
1464 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header
*header
);
1465 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man
*man
,
1466 bool interruptible
);
1467 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man
*man
);
1469 /* CPU blit utilities - vmwgfx_blit.c */
1472 * struct vmw_diff_cpy - CPU blit information structure
1474 * @rect: The output bounding box rectangle.
1475 * @line: The current line of the blit.
1476 * @line_offset: Offset of the current line segment.
1477 * @cpp: Bytes per pixel (granularity information).
1478 * @memcpy: Which memcpy function to use.
1480 struct vmw_diff_cpy
{
1481 struct drm_rect rect
;
1485 void (*do_cpy
)(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
,
1489 #define VMW_CPU_BLIT_INITIALIZER { \
1490 .do_cpy = vmw_memcpy, \
1493 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
1496 .rect = { .x1 = INT_MAX/2, \
1502 .do_cpy = vmw_diff_memcpy, \
1505 void vmw_diff_memcpy(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
,
1508 void vmw_memcpy(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
, size_t n
);
1510 int vmw_bo_cpu_blit(struct ttm_buffer_object
*dst
,
1511 u32 dst_offset
, u32 dst_stride
,
1512 struct ttm_buffer_object
*src
,
1513 u32 src_offset
, u32 src_stride
,
1515 struct vmw_diff_cpy
*diff
);
1517 /* Host messaging -vmwgfx_msg.c: */
1518 int vmw_host_get_guestinfo(const char *guest_info_param
,
1519 char *buffer
, size_t *length
);
1520 __printf(1, 2) int vmw_host_printf(const char *fmt
, ...);
1521 int vmw_msg_ioctl(struct drm_device
*dev
, void *data
,
1522 struct drm_file
*file_priv
);
1524 /* Host mksGuestStats -vmwgfx_msg.c: */
1525 int vmw_mksstat_get_kern_slot(pid_t pid
, struct vmw_private
*dev_priv
);
1527 int vmw_mksstat_reset_ioctl(struct drm_device
*dev
, void *data
,
1528 struct drm_file
*file_priv
);
1529 int vmw_mksstat_add_ioctl(struct drm_device
*dev
, void *data
,
1530 struct drm_file
*file_priv
);
1531 int vmw_mksstat_remove_ioctl(struct drm_device
*dev
, void *data
,
1532 struct drm_file
*file_priv
);
1533 int vmw_mksstat_remove_all(struct vmw_private
*dev_priv
);
1538 * VMW_DEBUG_USER - Debug output for user-space debugging.
1540 * @fmt: printf() like format string.
1542 * This macro is for logging user-space error and debugging messages for e.g.
1543 * command buffer execution errors due to malformed commands, invalid context,
1546 #define VMW_DEBUG_USER(fmt, ...) \
1547 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1549 /* Resource dirtying - vmwgfx_page_dirty.c */
1550 void vmw_bo_dirty_scan(struct vmw_buffer_object
*vbo
);
1551 int vmw_bo_dirty_add(struct vmw_buffer_object
*vbo
);
1552 void vmw_bo_dirty_transfer_to_res(struct vmw_resource
*res
);
1553 void vmw_bo_dirty_clear_res(struct vmw_resource
*res
);
1554 void vmw_bo_dirty_release(struct vmw_buffer_object
*vbo
);
1555 void vmw_bo_dirty_unmap(struct vmw_buffer_object
*vbo
,
1556 pgoff_t start
, pgoff_t end
);
1557 vm_fault_t
vmw_bo_vm_fault(struct vm_fault
*vmf
);
1558 vm_fault_t
vmw_bo_vm_mkwrite(struct vm_fault
*vmf
);
1562 * VMW_DEBUG_KMS - Debug output for kernel mode-setting
1564 * This macro is for debugging vmwgfx mode-setting code.
1566 #define VMW_DEBUG_KMS(fmt, ...) \
1567 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1570 * Inline helper functions
1573 static inline void vmw_surface_unreference(struct vmw_surface
**srf
)
1575 struct vmw_surface
*tmp_srf
= *srf
;
1576 struct vmw_resource
*res
= &tmp_srf
->res
;
1579 vmw_resource_unreference(&res
);
1582 static inline struct vmw_surface
*vmw_surface_reference(struct vmw_surface
*srf
)
1584 (void) vmw_resource_reference(&srf
->res
);
1588 static inline void vmw_bo_unreference(struct vmw_buffer_object
**buf
)
1590 struct vmw_buffer_object
*tmp_buf
= *buf
;
1593 if (tmp_buf
!= NULL
)
1594 ttm_bo_put(&tmp_buf
->base
);
1597 static inline struct vmw_buffer_object
*
1598 vmw_bo_reference(struct vmw_buffer_object
*buf
)
1600 ttm_bo_get(&buf
->base
);
1604 static inline struct ttm_mem_global
*vmw_mem_glob(struct vmw_private
*dev_priv
)
1606 return &ttm_mem_glob
;
1609 static inline void vmw_fifo_resource_inc(struct vmw_private
*dev_priv
)
1611 atomic_inc(&dev_priv
->num_fifo_resources
);
1614 static inline void vmw_fifo_resource_dec(struct vmw_private
*dev_priv
)
1616 atomic_dec(&dev_priv
->num_fifo_resources
);
1620 * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
1622 * @fifo_reg: The fifo register to read from
1624 * This function is intended to be equivalent to ioread32() on
1625 * memremap'd memory, but without byteswapping.
1627 static inline u32
vmw_fifo_mem_read(struct vmw_private
*vmw
, uint32 fifo_reg
)
1629 BUG_ON(vmw_is_svga_v3(vmw
));
1630 return READ_ONCE(*(vmw
->fifo_mem
+ fifo_reg
));
1634 * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
1636 * @addr: The fifo register to write to
1638 * This function is intended to be equivalent to iowrite32 on
1639 * memremap'd memory, but without byteswapping.
1641 static inline void vmw_fifo_mem_write(struct vmw_private
*vmw
, u32 fifo_reg
,
1644 BUG_ON(vmw_is_svga_v3(vmw
));
1645 WRITE_ONCE(*(vmw
->fifo_mem
+ fifo_reg
), value
);
1648 static inline u32
vmw_fence_read(struct vmw_private
*dev_priv
)
1651 if (vmw_is_svga_v3(dev_priv
))
1652 fence
= vmw_read(dev_priv
, SVGA_REG_FENCE
);
1654 fence
= vmw_fifo_mem_read(dev_priv
, SVGA_FIFO_FENCE
);
1658 static inline void vmw_fence_write(struct vmw_private
*dev_priv
,
1661 BUG_ON(vmw_is_svga_v3(dev_priv
));
1662 vmw_fifo_mem_write(dev_priv
, SVGA_FIFO_FENCE
, fence
);
1665 static inline u32
vmw_irq_status_read(struct vmw_private
*vmw
)
1668 if (vmw_is_svga_v3(vmw
))
1669 status
= vmw_read(vmw
, SVGA_REG_IRQ_STATUS
);
1671 status
= inl(vmw
->io_start
+ SVGA_IRQSTATUS_PORT
);
1675 static inline void vmw_irq_status_write(struct vmw_private
*vmw
,
1678 if (vmw_is_svga_v3(vmw
))
1679 vmw_write(vmw
, SVGA_REG_IRQ_STATUS
, status
);
1681 outl(status
, vmw
->io_start
+ SVGA_IRQSTATUS_PORT
);