]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drm/ttm: Allow system memory as a busy placement.
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_buffer.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31
32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
33 TTM_PL_FLAG_CACHED;
34
35static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
36 TTM_PL_FLAG_CACHED |
37 TTM_PL_FLAG_NO_EVICT;
38
39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
40 TTM_PL_FLAG_CACHED;
41
42struct ttm_placement vmw_vram_placement = {
43 .fpfn = 0,
44 .lpfn = 0,
45 .num_placement = 1,
46 .placement = &vram_placement_flags,
47 .num_busy_placement = 1,
48 .busy_placement = &vram_placement_flags
49};
50
51struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_ne_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &vram_ne_placement_flags
58};
59
60struct ttm_placement vmw_sys_placement = {
61 .fpfn = 0,
62 .lpfn = 0,
63 .num_placement = 1,
64 .placement = &sys_placement_flags,
65 .num_busy_placement = 1,
66 .busy_placement = &sys_placement_flags
67};
68
69struct vmw_ttm_backend {
70 struct ttm_backend backend;
71};
72
73static int vmw_ttm_populate(struct ttm_backend *backend,
74 unsigned long num_pages, struct page **pages,
75 struct page *dummy_read_page)
76{
77 return 0;
78}
79
80static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81{
82 return 0;
83}
84
85static int vmw_ttm_unbind(struct ttm_backend *backend)
86{
87 return 0;
88}
89
90static void vmw_ttm_clear(struct ttm_backend *backend)
91{
92}
93
94static void vmw_ttm_destroy(struct ttm_backend *backend)
95{
96 struct vmw_ttm_backend *vmw_be =
97 container_of(backend, struct vmw_ttm_backend, backend);
98
99 kfree(vmw_be);
100}
101
102static struct ttm_backend_func vmw_ttm_func = {
103 .populate = vmw_ttm_populate,
104 .clear = vmw_ttm_clear,
105 .bind = vmw_ttm_bind,
106 .unbind = vmw_ttm_unbind,
107 .destroy = vmw_ttm_destroy,
108};
109
110struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
111{
112 struct vmw_ttm_backend *vmw_be;
113
114 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
115 if (!vmw_be)
116 return NULL;
117
118 vmw_be->backend.func = &vmw_ttm_func;
119
120 return &vmw_be->backend;
121}
122
123int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
124{
125 return 0;
126}
127
128int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
129 struct ttm_mem_type_manager *man)
130{
131 struct vmw_private *dev_priv =
132 container_of(bdev, struct vmw_private, bdev);
133
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 /* System memory */
137
138 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
139 man->available_caching = TTM_PL_MASK_CACHING;
140 man->default_caching = TTM_PL_FLAG_CACHED;
141 break;
142 case TTM_PL_VRAM:
143 /* "On-card" video ram */
144 man->gpu_offset = 0;
145 man->io_offset = dev_priv->vram_start;
146 man->io_size = dev_priv->vram_size;
147 man->flags = TTM_MEMTYPE_FLAG_FIXED |
148 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->io_addr = NULL;
150 man->available_caching = TTM_PL_MASK_CACHING;
151 man->default_caching = TTM_PL_FLAG_WC;
152 break;
153 default:
154 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
155 return -EINVAL;
156 }
157 return 0;
158}
159
160void vmw_evict_flags(struct ttm_buffer_object *bo,
161 struct ttm_placement *placement)
162{
163 *placement = vmw_sys_placement;
164}
165
166/**
167 * FIXME: Proper access checks on buffers.
168 */
169
170static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
171{
172 return 0;
173}
174
effe1105
TH
175static void vmw_move_notify(struct ttm_buffer_object *bo,
176 struct ttm_mem_reg *new_mem)
177{
178 if (new_mem->mem_type != TTM_PL_SYSTEM)
179 vmw_dmabuf_gmr_unbind(bo);
180}
181
476d51db
TH
182static void vmw_swap_notify(struct ttm_buffer_object *bo)
183{
184 vmw_dmabuf_gmr_unbind(bo);
185}
186
fb1d9738
JB
187/**
188 * FIXME: We're using the old vmware polling method to sync.
189 * Do this with fences instead.
190 */
191
192static void *vmw_sync_obj_ref(void *sync_obj)
193{
194 return sync_obj;
195}
196
197static void vmw_sync_obj_unref(void **sync_obj)
198{
199 *sync_obj = NULL;
200}
201
202static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
203{
204 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
205
206 mutex_lock(&dev_priv->hw_mutex);
207 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
208 mutex_unlock(&dev_priv->hw_mutex);
209 return 0;
210}
211
212static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
213{
214 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
215 uint32_t sequence = (unsigned long) sync_obj;
216
217 return vmw_fence_signaled(dev_priv, sequence);
218}
219
220static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
221 bool lazy, bool interruptible)
222{
223 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
224 uint32_t sequence = (unsigned long) sync_obj;
225
226 return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
227}
228
229struct ttm_bo_driver vmw_bo_driver = {
230 .create_ttm_backend_entry = vmw_ttm_backend_init,
231 .invalidate_caches = vmw_invalidate_caches,
232 .init_mem_type = vmw_init_mem_type,
233 .evict_flags = vmw_evict_flags,
234 .move = NULL,
235 .verify_access = vmw_verify_access,
236 .sync_obj_signaled = vmw_sync_obj_signaled,
237 .sync_obj_wait = vmw_sync_obj_wait,
238 .sync_obj_flush = vmw_sync_obj_flush,
239 .sync_obj_unref = vmw_sync_obj_unref,
effe1105 240 .sync_obj_ref = vmw_sync_obj_ref,
476d51db
TH
241 .move_notify = vmw_move_notify,
242 .swap_notify = vmw_swap_notify
fb1d9738 243};