]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
GFS2: Use GFP_NOFS for alloc structure
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "drmP.h"
30 #include "ttm/ttm_placement.h"
31
32 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33 {
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t max;
36 uint32_t min;
37 uint32_t dummy;
38 int ret;
39
40 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
41 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
42 if (unlikely(fifo->static_buffer == NULL))
43 return -ENOMEM;
44
45 fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
46 fifo->last_data_size = 0;
47 fifo->last_buffer_add = false;
48 fifo->last_buffer = vmalloc(fifo->last_buffer_size);
49 if (unlikely(fifo->last_buffer == NULL)) {
50 ret = -ENOMEM;
51 goto out_err;
52 }
53
54 fifo->dynamic_buffer = NULL;
55 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false;
57
58 init_rwsem(&fifo->rwsem);
59
60 /*
61 * Allow mapping the first page read-only to user-space.
62 */
63
64 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
65 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
66 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
67
68 mutex_lock(&dev_priv->hw_mutex);
69 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
70 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
71 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
72
73 min = 4;
74 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
75 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
76 min <<= 2;
77
78 if (min < PAGE_SIZE)
79 min = PAGE_SIZE;
80
81 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
82 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
83 wmb();
84 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
85 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
86 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
87 mb();
88
89 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
90 mutex_unlock(&dev_priv->hw_mutex);
91
92 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
93 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
94 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
95
96 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
97 (unsigned int) max,
98 (unsigned int) min,
99 (unsigned int) fifo->capabilities);
100
101 dev_priv->fence_seq = (uint32_t) -100;
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104
105 return vmw_fifo_send_fence(dev_priv, &dummy);
106 out_err:
107 vfree(fifo->static_buffer);
108 fifo->static_buffer = NULL;
109 return ret;
110 }
111
112 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
113 {
114 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
115
116 mutex_lock(&dev_priv->hw_mutex);
117
118 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
119 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
120 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
121 }
122
123 mutex_unlock(&dev_priv->hw_mutex);
124 }
125
126 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
127 {
128 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
129
130 mutex_lock(&dev_priv->hw_mutex);
131
132 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
133 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
134
135 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
136
137 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
138 dev_priv->config_done_state);
139 vmw_write(dev_priv, SVGA_REG_ENABLE,
140 dev_priv->enable_state);
141
142 mutex_unlock(&dev_priv->hw_mutex);
143
144 if (likely(fifo->last_buffer != NULL)) {
145 vfree(fifo->last_buffer);
146 fifo->last_buffer = NULL;
147 }
148
149 if (likely(fifo->static_buffer != NULL)) {
150 vfree(fifo->static_buffer);
151 fifo->static_buffer = NULL;
152 }
153
154 if (likely(fifo->dynamic_buffer != NULL)) {
155 vfree(fifo->dynamic_buffer);
156 fifo->dynamic_buffer = NULL;
157 }
158 }
159
160 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
161 {
162 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
163 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
164 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
165 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
166 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
167
168 return ((max - next_cmd) + (stop - min) <= bytes);
169 }
170
171 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
172 uint32_t bytes, bool interruptible,
173 unsigned long timeout)
174 {
175 int ret = 0;
176 unsigned long end_jiffies = jiffies + timeout;
177 DEFINE_WAIT(__wait);
178
179 DRM_INFO("Fifo wait noirq.\n");
180
181 for (;;) {
182 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
183 (interruptible) ?
184 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
185 if (!vmw_fifo_is_full(dev_priv, bytes))
186 break;
187 if (time_after_eq(jiffies, end_jiffies)) {
188 ret = -EBUSY;
189 DRM_ERROR("SVGA device lockup.\n");
190 break;
191 }
192 schedule_timeout(1);
193 if (interruptible && signal_pending(current)) {
194 ret = -ERESTARTSYS;
195 break;
196 }
197 }
198 finish_wait(&dev_priv->fifo_queue, &__wait);
199 wake_up_all(&dev_priv->fifo_queue);
200 DRM_INFO("Fifo noirq exit.\n");
201 return ret;
202 }
203
204 static int vmw_fifo_wait(struct vmw_private *dev_priv,
205 uint32_t bytes, bool interruptible,
206 unsigned long timeout)
207 {
208 long ret = 1L;
209 unsigned long irq_flags;
210
211 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
212 return 0;
213
214 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
215 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
216 return vmw_fifo_wait_noirq(dev_priv, bytes,
217 interruptible, timeout);
218
219 mutex_lock(&dev_priv->hw_mutex);
220 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
221 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
222 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
223 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
224 vmw_write(dev_priv, SVGA_REG_IRQMASK,
225 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
226 SVGA_IRQFLAG_FIFO_PROGRESS);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 }
229 mutex_unlock(&dev_priv->hw_mutex);
230
231 if (interruptible)
232 ret = wait_event_interruptible_timeout
233 (dev_priv->fifo_queue,
234 !vmw_fifo_is_full(dev_priv, bytes), timeout);
235 else
236 ret = wait_event_timeout
237 (dev_priv->fifo_queue,
238 !vmw_fifo_is_full(dev_priv, bytes), timeout);
239
240 if (unlikely(ret == 0))
241 ret = -EBUSY;
242 else if (likely(ret > 0))
243 ret = 0;
244
245 mutex_lock(&dev_priv->hw_mutex);
246 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
247 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
248 vmw_write(dev_priv, SVGA_REG_IRQMASK,
249 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
250 ~SVGA_IRQFLAG_FIFO_PROGRESS);
251 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
252 }
253 mutex_unlock(&dev_priv->hw_mutex);
254
255 return ret;
256 }
257
258 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
259 {
260 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
261 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
262 uint32_t max;
263 uint32_t min;
264 uint32_t next_cmd;
265 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
266 int ret;
267
268 down_write(&fifo_state->rwsem);
269 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
270 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
271 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
272
273 if (unlikely(bytes >= (max - min)))
274 goto out_err;
275
276 BUG_ON(fifo_state->reserved_size != 0);
277 BUG_ON(fifo_state->dynamic_buffer != NULL);
278
279 fifo_state->reserved_size = bytes;
280
281 while (1) {
282 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
283 bool need_bounce = false;
284 bool reserve_in_place = false;
285
286 if (next_cmd >= stop) {
287 if (likely((next_cmd + bytes < max ||
288 (next_cmd + bytes == max && stop > min))))
289 reserve_in_place = true;
290
291 else if (vmw_fifo_is_full(dev_priv, bytes)) {
292 ret = vmw_fifo_wait(dev_priv, bytes,
293 false, 3 * HZ);
294 if (unlikely(ret != 0))
295 goto out_err;
296 } else
297 need_bounce = true;
298
299 } else {
300
301 if (likely((next_cmd + bytes < stop)))
302 reserve_in_place = true;
303 else {
304 ret = vmw_fifo_wait(dev_priv, bytes,
305 false, 3 * HZ);
306 if (unlikely(ret != 0))
307 goto out_err;
308 }
309 }
310
311 if (reserve_in_place) {
312 if (reserveable || bytes <= sizeof(uint32_t)) {
313 fifo_state->using_bounce_buffer = false;
314
315 if (reserveable)
316 iowrite32(bytes, fifo_mem +
317 SVGA_FIFO_RESERVED);
318 return fifo_mem + (next_cmd >> 2);
319 } else {
320 need_bounce = true;
321 }
322 }
323
324 if (need_bounce) {
325 fifo_state->using_bounce_buffer = true;
326 if (bytes < fifo_state->static_buffer_size)
327 return fifo_state->static_buffer;
328 else {
329 fifo_state->dynamic_buffer = vmalloc(bytes);
330 return fifo_state->dynamic_buffer;
331 }
332 }
333 }
334 out_err:
335 fifo_state->reserved_size = 0;
336 up_write(&fifo_state->rwsem);
337 return NULL;
338 }
339
340 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
341 __le32 __iomem *fifo_mem,
342 uint32_t next_cmd,
343 uint32_t max, uint32_t min, uint32_t bytes)
344 {
345 uint32_t chunk_size = max - next_cmd;
346 uint32_t rest;
347 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
348 fifo_state->dynamic_buffer : fifo_state->static_buffer;
349
350 if (bytes < chunk_size)
351 chunk_size = bytes;
352
353 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
354 mb();
355 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
356 rest = bytes - chunk_size;
357 if (rest)
358 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
359 rest);
360 }
361
362 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
363 __le32 __iomem *fifo_mem,
364 uint32_t next_cmd,
365 uint32_t max, uint32_t min, uint32_t bytes)
366 {
367 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
368 fifo_state->dynamic_buffer : fifo_state->static_buffer;
369
370 while (bytes > 0) {
371 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
372 next_cmd += sizeof(uint32_t);
373 if (unlikely(next_cmd == max))
374 next_cmd = min;
375 mb();
376 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
377 mb();
378 bytes -= sizeof(uint32_t);
379 }
380 }
381
382 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
383 {
384 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
385 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
386 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
387 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
388 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
389 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
390
391 BUG_ON((bytes & 3) != 0);
392 BUG_ON(bytes > fifo_state->reserved_size);
393
394 fifo_state->reserved_size = 0;
395
396 if (fifo_state->using_bounce_buffer) {
397 if (reserveable)
398 vmw_fifo_res_copy(fifo_state, fifo_mem,
399 next_cmd, max, min, bytes);
400 else
401 vmw_fifo_slow_copy(fifo_state, fifo_mem,
402 next_cmd, max, min, bytes);
403
404 if (fifo_state->dynamic_buffer) {
405 vfree(fifo_state->dynamic_buffer);
406 fifo_state->dynamic_buffer = NULL;
407 }
408
409 }
410
411 if (fifo_state->using_bounce_buffer || reserveable) {
412 next_cmd += bytes;
413 if (next_cmd >= max)
414 next_cmd -= max - min;
415 mb();
416 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
417 }
418
419 if (reserveable)
420 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
421 mb();
422 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
423 up_write(&fifo_state->rwsem);
424 }
425
426 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
427 {
428 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
429 struct svga_fifo_cmd_fence *cmd_fence;
430 void *fm;
431 int ret = 0;
432 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
433
434 fm = vmw_fifo_reserve(dev_priv, bytes);
435 if (unlikely(fm == NULL)) {
436 down_write(&fifo_state->rwsem);
437 *sequence = dev_priv->fence_seq;
438 up_write(&fifo_state->rwsem);
439 ret = -ENOMEM;
440 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
441 false, 3*HZ);
442 goto out_err;
443 }
444
445 do {
446 *sequence = dev_priv->fence_seq++;
447 } while (*sequence == 0);
448
449 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
450
451 /*
452 * Don't request hardware to send a fence. The
453 * waiting code in vmwgfx_irq.c will emulate this.
454 */
455
456 vmw_fifo_commit(dev_priv, 0);
457 return 0;
458 }
459
460 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
461 cmd_fence = (struct svga_fifo_cmd_fence *)
462 ((unsigned long)fm + sizeof(__le32));
463
464 iowrite32(*sequence, &cmd_fence->fence);
465 fifo_state->last_buffer_add = true;
466 vmw_fifo_commit(dev_priv, bytes);
467 fifo_state->last_buffer_add = false;
468
469 out_err:
470 return ret;
471 }
472
473 /**
474 * Map the first page of the FIFO read-only to user-space.
475 */
476
477 static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
478 {
479 int ret;
480 unsigned long address = (unsigned long)vmf->virtual_address;
481
482 if (address != vma->vm_start)
483 return VM_FAULT_SIGBUS;
484
485 ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
486 if (likely(ret == -EBUSY || ret == 0))
487 return VM_FAULT_NOPAGE;
488 else if (ret == -ENOMEM)
489 return VM_FAULT_OOM;
490
491 return VM_FAULT_SIGBUS;
492 }
493
494 static struct vm_operations_struct vmw_fifo_vm_ops = {
495 .fault = vmw_fifo_vm_fault,
496 .open = NULL,
497 .close = NULL
498 };
499
500 int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
501 {
502 struct drm_file *file_priv;
503 struct vmw_private *dev_priv;
504
505 file_priv = (struct drm_file *)filp->private_data;
506 dev_priv = vmw_priv(file_priv->minor->dev);
507
508 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
509 (vma->vm_end - vma->vm_start) != PAGE_SIZE)
510 return -EINVAL;
511
512 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
513 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
514 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
515 vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
516 vma->vm_page_prot);
517 vma->vm_ops = &vmw_fifo_vm_ops;
518 return 0;
519 }