]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
Merge tag 'powerpc-4.13-8' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
1 /**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <drm/ttm/ttm_bo_api.h>
29
30 #include "vmwgfx_drv.h"
31
32 /*
33 * Size of inline command buffers. Try to make sure that a page size is a
34 * multiple of the DMA pool allocation size.
35 */
36 #define VMW_CMDBUF_INLINE_ALIGN 64
37 #define VMW_CMDBUF_INLINE_SIZE \
38 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
39
40 /**
41 * struct vmw_cmdbuf_context - Command buffer context queues
42 *
43 * @submitted: List of command buffers that have been submitted to the
44 * manager but not yet submitted to hardware.
45 * @hw_submitted: List of command buffers submitted to hardware.
46 * @preempted: List of preempted command buffers.
47 * @num_hw_submitted: Number of buffers currently being processed by hardware
48 */
49 struct vmw_cmdbuf_context {
50 struct list_head submitted;
51 struct list_head hw_submitted;
52 struct list_head preempted;
53 unsigned num_hw_submitted;
54 };
55
56 /**
57 * struct vmw_cmdbuf_man: - Command buffer manager
58 *
59 * @cur_mutex: Mutex protecting the command buffer used for incremental small
60 * kernel command submissions, @cur.
61 * @space_mutex: Mutex to protect against starvation when we allocate
62 * main pool buffer space.
63 * @work: A struct work_struct implementeing command buffer error handling.
64 * Immutable.
65 * @dev_priv: Pointer to the device private struct. Immutable.
66 * @ctx: Array of command buffer context queues. The queues and the context
67 * data is protected by @lock.
68 * @error: List of command buffers that have caused device errors.
69 * Protected by @lock.
70 * @mm: Range manager for the command buffer space. Manager allocations and
71 * frees are protected by @lock.
72 * @cmd_space: Buffer object for the command buffer space, unless we were
73 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
74 * @map_obj: Mapping state for @cmd_space. Immutable.
75 * @map: Pointer to command buffer space. May be a mapped buffer object or
76 * a contigous coherent DMA memory allocation. Immutable.
77 * @cur: Command buffer for small kernel command submissions. Protected by
78 * the @cur_mutex.
79 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
80 * @default_size: Default size for the @cur command buffer. Immutable.
81 * @max_hw_submitted: Max number of in-flight command buffers the device can
82 * handle. Immutable.
83 * @lock: Spinlock protecting command submission queues.
84 * @header: Pool of DMA memory for device command buffer headers.
85 * Internal protection.
86 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
87 * space for inline data. Internal protection.
88 * @tasklet: Tasklet struct for irq processing. Immutable.
89 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
90 * space.
91 * @idle_queue: Wait queue for processes waiting for command buffer idle.
92 * @irq_on: Whether the process function has requested irq to be turned on.
93 * Protected by @lock.
94 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
95 * allocation. Immutable.
96 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
97 * Typically this is false only during bootstrap.
98 * @handle: DMA address handle for the command buffer space if @using_mob is
99 * false. Immutable.
100 * @size: The size of the command buffer space. Immutable.
101 */
102 struct vmw_cmdbuf_man {
103 struct mutex cur_mutex;
104 struct mutex space_mutex;
105 struct work_struct work;
106 struct vmw_private *dev_priv;
107 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
108 struct list_head error;
109 struct drm_mm mm;
110 struct ttm_buffer_object *cmd_space;
111 struct ttm_bo_kmap_obj map_obj;
112 u8 *map;
113 struct vmw_cmdbuf_header *cur;
114 size_t cur_pos;
115 size_t default_size;
116 unsigned max_hw_submitted;
117 spinlock_t lock;
118 struct dma_pool *headers;
119 struct dma_pool *dheaders;
120 struct tasklet_struct tasklet;
121 wait_queue_head_t alloc_queue;
122 wait_queue_head_t idle_queue;
123 bool irq_on;
124 bool using_mob;
125 bool has_pool;
126 dma_addr_t handle;
127 size_t size;
128 };
129
130 /**
131 * struct vmw_cmdbuf_header - Command buffer metadata
132 *
133 * @man: The command buffer manager.
134 * @cb_header: Device command buffer header, allocated from a DMA pool.
135 * @cb_context: The device command buffer context.
136 * @list: List head for attaching to the manager lists.
137 * @node: The range manager node.
138 * @handle. The DMA address of @cb_header. Handed to the device on command
139 * buffer submission.
140 * @cmd: Pointer to the command buffer space of this buffer.
141 * @size: Size of the command buffer space of this buffer.
142 * @reserved: Reserved space of this buffer.
143 * @inline_space: Whether inline command buffer space is used.
144 */
145 struct vmw_cmdbuf_header {
146 struct vmw_cmdbuf_man *man;
147 SVGACBHeader *cb_header;
148 SVGACBContext cb_context;
149 struct list_head list;
150 struct drm_mm_node node;
151 dma_addr_t handle;
152 u8 *cmd;
153 size_t size;
154 size_t reserved;
155 bool inline_space;
156 };
157
158 /**
159 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
160 * command buffer space.
161 *
162 * @cb_header: Device command buffer header.
163 * @cmd: Inline command buffer space.
164 */
165 struct vmw_cmdbuf_dheader {
166 SVGACBHeader cb_header;
167 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
168 };
169
170 /**
171 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
172 *
173 * @page_size: Size of requested command buffer space in pages.
174 * @node: Pointer to the range manager node.
175 * @done: True if this allocation has succeeded.
176 */
177 struct vmw_cmdbuf_alloc_info {
178 size_t page_size;
179 struct drm_mm_node *node;
180 bool done;
181 };
182
183 /* Loop over each context in the command buffer manager. */
184 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
185 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
186 ++(_i), ++(_ctx))
187
188 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
189
190
191 /**
192 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
193 *
194 * @man: The range manager.
195 * @interruptible: Whether to wait interruptible when locking.
196 */
197 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
198 {
199 if (interruptible) {
200 if (mutex_lock_interruptible(&man->cur_mutex))
201 return -ERESTARTSYS;
202 } else {
203 mutex_lock(&man->cur_mutex);
204 }
205
206 return 0;
207 }
208
209 /**
210 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
211 *
212 * @man: The range manager.
213 */
214 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
215 {
216 mutex_unlock(&man->cur_mutex);
217 }
218
219 /**
220 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
221 * been used for the device context with inline command buffers.
222 * Need not be called locked.
223 *
224 * @header: Pointer to the header to free.
225 */
226 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
227 {
228 struct vmw_cmdbuf_dheader *dheader;
229
230 if (WARN_ON_ONCE(!header->inline_space))
231 return;
232
233 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
234 cb_header);
235 dma_pool_free(header->man->dheaders, dheader, header->handle);
236 kfree(header);
237 }
238
239 /**
240 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
241 * associated structures.
242 *
243 * header: Pointer to the header to free.
244 *
245 * For internal use. Must be called with man::lock held.
246 */
247 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
248 {
249 struct vmw_cmdbuf_man *man = header->man;
250
251 lockdep_assert_held_once(&man->lock);
252
253 if (header->inline_space) {
254 vmw_cmdbuf_header_inline_free(header);
255 return;
256 }
257
258 drm_mm_remove_node(&header->node);
259 wake_up_all(&man->alloc_queue);
260 if (header->cb_header)
261 dma_pool_free(man->headers, header->cb_header,
262 header->handle);
263 kfree(header);
264 }
265
266 /**
267 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
268 * associated structures.
269 *
270 * @header: Pointer to the header to free.
271 */
272 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
273 {
274 struct vmw_cmdbuf_man *man = header->man;
275
276 /* Avoid locking if inline_space */
277 if (header->inline_space) {
278 vmw_cmdbuf_header_inline_free(header);
279 return;
280 }
281 spin_lock_bh(&man->lock);
282 __vmw_cmdbuf_header_free(header);
283 spin_unlock_bh(&man->lock);
284 }
285
286
287 /**
288 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
289 *
290 * @header: The header of the buffer to submit.
291 */
292 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
293 {
294 struct vmw_cmdbuf_man *man = header->man;
295 u32 val;
296
297 val = upper_32_bits(header->handle);
298 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
299
300 val = lower_32_bits(header->handle);
301 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
302 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
303
304 return header->cb_header->status;
305 }
306
307 /**
308 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
309 *
310 * @ctx: The command buffer context to initialize
311 */
312 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
313 {
314 INIT_LIST_HEAD(&ctx->hw_submitted);
315 INIT_LIST_HEAD(&ctx->submitted);
316 INIT_LIST_HEAD(&ctx->preempted);
317 ctx->num_hw_submitted = 0;
318 }
319
320 /**
321 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
322 * context.
323 *
324 * @man: The command buffer manager.
325 * @ctx: The command buffer context.
326 *
327 * Submits command buffers to hardware until there are no more command
328 * buffers to submit or the hardware can't handle more command buffers.
329 */
330 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
331 struct vmw_cmdbuf_context *ctx)
332 {
333 while (ctx->num_hw_submitted < man->max_hw_submitted &&
334 !list_empty(&ctx->submitted)) {
335 struct vmw_cmdbuf_header *entry;
336 SVGACBStatus status;
337
338 entry = list_first_entry(&ctx->submitted,
339 struct vmw_cmdbuf_header,
340 list);
341
342 status = vmw_cmdbuf_header_submit(entry);
343
344 /* This should never happen */
345 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
346 entry->cb_header->status = SVGA_CB_STATUS_NONE;
347 break;
348 }
349
350 list_del(&entry->list);
351 list_add_tail(&entry->list, &ctx->hw_submitted);
352 ctx->num_hw_submitted++;
353 }
354
355 }
356
357 /**
358 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
359 *
360 * @man: The command buffer manager.
361 * @ctx: The command buffer context.
362 *
363 * Submit command buffers to hardware if possible, and process finished
364 * buffers. Typically freeing them, but on preemption or error take
365 * appropriate action. Wake up waiters if appropriate.
366 */
367 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
368 struct vmw_cmdbuf_context *ctx,
369 int *notempty)
370 {
371 struct vmw_cmdbuf_header *entry, *next;
372
373 vmw_cmdbuf_ctx_submit(man, ctx);
374
375 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
376 SVGACBStatus status = entry->cb_header->status;
377
378 if (status == SVGA_CB_STATUS_NONE)
379 break;
380
381 list_del(&entry->list);
382 wake_up_all(&man->idle_queue);
383 ctx->num_hw_submitted--;
384 switch (status) {
385 case SVGA_CB_STATUS_COMPLETED:
386 __vmw_cmdbuf_header_free(entry);
387 break;
388 case SVGA_CB_STATUS_COMMAND_ERROR:
389 case SVGA_CB_STATUS_CB_HEADER_ERROR:
390 list_add_tail(&entry->list, &man->error);
391 schedule_work(&man->work);
392 break;
393 case SVGA_CB_STATUS_PREEMPTED:
394 list_add(&entry->list, &ctx->preempted);
395 break;
396 default:
397 WARN_ONCE(true, "Undefined command buffer status.\n");
398 __vmw_cmdbuf_header_free(entry);
399 break;
400 }
401 }
402
403 vmw_cmdbuf_ctx_submit(man, ctx);
404 if (!list_empty(&ctx->submitted))
405 (*notempty)++;
406 }
407
408 /**
409 * vmw_cmdbuf_man_process - Process all command buffer contexts and
410 * switch on and off irqs as appropriate.
411 *
412 * @man: The command buffer manager.
413 *
414 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
415 * command buffers left that are not submitted to hardware, Make sure
416 * IRQ handling is turned on. Otherwise, make sure it's turned off.
417 */
418 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
419 {
420 int notempty;
421 struct vmw_cmdbuf_context *ctx;
422 int i;
423
424 retry:
425 notempty = 0;
426 for_each_cmdbuf_ctx(man, i, ctx)
427 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
428
429 if (man->irq_on && !notempty) {
430 vmw_generic_waiter_remove(man->dev_priv,
431 SVGA_IRQFLAG_COMMAND_BUFFER,
432 &man->dev_priv->cmdbuf_waiters);
433 man->irq_on = false;
434 } else if (!man->irq_on && notempty) {
435 vmw_generic_waiter_add(man->dev_priv,
436 SVGA_IRQFLAG_COMMAND_BUFFER,
437 &man->dev_priv->cmdbuf_waiters);
438 man->irq_on = true;
439
440 /* Rerun in case we just missed an irq. */
441 goto retry;
442 }
443 }
444
445 /**
446 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
447 * command buffer context
448 *
449 * @man: The command buffer manager.
450 * @header: The header of the buffer to submit.
451 * @cb_context: The command buffer context to use.
452 *
453 * This function adds @header to the "submitted" queue of the command
454 * buffer context identified by @cb_context. It then calls the command buffer
455 * manager processing to potentially submit the buffer to hardware.
456 * @man->lock needs to be held when calling this function.
457 */
458 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
459 struct vmw_cmdbuf_header *header,
460 SVGACBContext cb_context)
461 {
462 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
463 header->cb_header->dxContext = 0;
464 header->cb_context = cb_context;
465 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
466
467 vmw_cmdbuf_man_process(man);
468 }
469
470 /**
471 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
472 * handler implemented as a tasklet.
473 *
474 * @data: Tasklet closure. A pointer to the command buffer manager cast to
475 * an unsigned long.
476 *
477 * The bottom half (tasklet) of the interrupt handler simply calls into the
478 * command buffer processor to free finished buffers and submit any
479 * queued buffers to hardware.
480 */
481 static void vmw_cmdbuf_man_tasklet(unsigned long data)
482 {
483 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
484
485 spin_lock(&man->lock);
486 vmw_cmdbuf_man_process(man);
487 spin_unlock(&man->lock);
488 }
489
490 /**
491 * vmw_cmdbuf_work_func - The deferred work function that handles
492 * command buffer errors.
493 *
494 * @work: The work func closure argument.
495 *
496 * Restarting the command buffer context after an error requires process
497 * context, so it is deferred to this work function.
498 */
499 static void vmw_cmdbuf_work_func(struct work_struct *work)
500 {
501 struct vmw_cmdbuf_man *man =
502 container_of(work, struct vmw_cmdbuf_man, work);
503 struct vmw_cmdbuf_header *entry, *next;
504 uint32_t dummy;
505 bool restart = false;
506
507 spin_lock_bh(&man->lock);
508 list_for_each_entry_safe(entry, next, &man->error, list) {
509 restart = true;
510 DRM_ERROR("Command buffer error.\n");
511
512 list_del(&entry->list);
513 __vmw_cmdbuf_header_free(entry);
514 wake_up_all(&man->idle_queue);
515 }
516 spin_unlock_bh(&man->lock);
517
518 if (restart && vmw_cmdbuf_startstop(man, true))
519 DRM_ERROR("Failed restarting command buffer context 0.\n");
520
521 /* Send a new fence in case one was removed */
522 vmw_fifo_send_fence(man->dev_priv, &dummy);
523 }
524
525 /**
526 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
527 *
528 * @man: The command buffer manager.
529 * @check_preempted: Check also the preempted queue for pending command buffers.
530 *
531 */
532 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
533 bool check_preempted)
534 {
535 struct vmw_cmdbuf_context *ctx;
536 bool idle = false;
537 int i;
538
539 spin_lock_bh(&man->lock);
540 vmw_cmdbuf_man_process(man);
541 for_each_cmdbuf_ctx(man, i, ctx) {
542 if (!list_empty(&ctx->submitted) ||
543 !list_empty(&ctx->hw_submitted) ||
544 (check_preempted && !list_empty(&ctx->preempted)))
545 goto out_unlock;
546 }
547
548 idle = list_empty(&man->error);
549
550 out_unlock:
551 spin_unlock_bh(&man->lock);
552
553 return idle;
554 }
555
556 /**
557 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
558 * command submissions
559 *
560 * @man: The command buffer manager.
561 *
562 * Flushes the current command buffer without allocating a new one. A new one
563 * is automatically allocated when needed. Call with @man->cur_mutex held.
564 */
565 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
566 {
567 struct vmw_cmdbuf_header *cur = man->cur;
568
569 WARN_ON(!mutex_is_locked(&man->cur_mutex));
570
571 if (!cur)
572 return;
573
574 spin_lock_bh(&man->lock);
575 if (man->cur_pos == 0) {
576 __vmw_cmdbuf_header_free(cur);
577 goto out_unlock;
578 }
579
580 man->cur->cb_header->length = man->cur_pos;
581 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
582 out_unlock:
583 spin_unlock_bh(&man->lock);
584 man->cur = NULL;
585 man->cur_pos = 0;
586 }
587
588 /**
589 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
590 * command submissions
591 *
592 * @man: The command buffer manager.
593 * @interruptible: Whether to sleep interruptible when sleeping.
594 *
595 * Flushes the current command buffer without allocating a new one. A new one
596 * is automatically allocated when needed.
597 */
598 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
599 bool interruptible)
600 {
601 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
602
603 if (ret)
604 return ret;
605
606 __vmw_cmdbuf_cur_flush(man);
607 vmw_cmdbuf_cur_unlock(man);
608
609 return 0;
610 }
611
612 /**
613 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
614 *
615 * @man: The command buffer manager.
616 * @interruptible: Sleep interruptible while waiting.
617 * @timeout: Time out after this many ticks.
618 *
619 * Wait until the command buffer manager has processed all command buffers,
620 * or until a timeout occurs. If a timeout occurs, the function will return
621 * -EBUSY.
622 */
623 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
624 unsigned long timeout)
625 {
626 int ret;
627
628 ret = vmw_cmdbuf_cur_flush(man, interruptible);
629 vmw_generic_waiter_add(man->dev_priv,
630 SVGA_IRQFLAG_COMMAND_BUFFER,
631 &man->dev_priv->cmdbuf_waiters);
632
633 if (interruptible) {
634 ret = wait_event_interruptible_timeout
635 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
636 timeout);
637 } else {
638 ret = wait_event_timeout
639 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
640 timeout);
641 }
642 vmw_generic_waiter_remove(man->dev_priv,
643 SVGA_IRQFLAG_COMMAND_BUFFER,
644 &man->dev_priv->cmdbuf_waiters);
645 if (ret == 0) {
646 if (!vmw_cmdbuf_man_idle(man, true))
647 ret = -EBUSY;
648 else
649 ret = 0;
650 }
651 if (ret > 0)
652 ret = 0;
653
654 return ret;
655 }
656
657 /**
658 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
659 *
660 * @man: The command buffer manager.
661 * @info: Allocation info. Will hold the size on entry and allocated mm node
662 * on successful return.
663 *
664 * Try to allocate buffer space from the main pool. Returns true if succeeded.
665 * If a fatal error was hit, the error code is returned in @info->ret.
666 */
667 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
668 struct vmw_cmdbuf_alloc_info *info)
669 {
670 int ret;
671
672 if (info->done)
673 return true;
674
675 memset(info->node, 0, sizeof(*info->node));
676 spin_lock_bh(&man->lock);
677 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
678 if (ret) {
679 vmw_cmdbuf_man_process(man);
680 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
681 }
682
683 spin_unlock_bh(&man->lock);
684 info->done = !ret;
685
686 return info->done;
687 }
688
689 /**
690 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
691 *
692 * @man: The command buffer manager.
693 * @node: Pointer to pre-allocated range-manager node.
694 * @size: The size of the allocation.
695 * @interruptible: Whether to sleep interruptible while waiting for space.
696 *
697 * This function allocates buffer space from the main pool, and if there is
698 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
699 * become available.
700 */
701 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
702 struct drm_mm_node *node,
703 size_t size,
704 bool interruptible)
705 {
706 struct vmw_cmdbuf_alloc_info info;
707
708 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
709 info.node = node;
710 info.done = false;
711
712 /*
713 * To prevent starvation of large requests, only one allocating call
714 * at a time waiting for space.
715 */
716 if (interruptible) {
717 if (mutex_lock_interruptible(&man->space_mutex))
718 return -ERESTARTSYS;
719 } else {
720 mutex_lock(&man->space_mutex);
721 }
722
723 /* Try to allocate space without waiting. */
724 if (vmw_cmdbuf_try_alloc(man, &info))
725 goto out_unlock;
726
727 vmw_generic_waiter_add(man->dev_priv,
728 SVGA_IRQFLAG_COMMAND_BUFFER,
729 &man->dev_priv->cmdbuf_waiters);
730
731 if (interruptible) {
732 int ret;
733
734 ret = wait_event_interruptible
735 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
736 if (ret) {
737 vmw_generic_waiter_remove
738 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
739 &man->dev_priv->cmdbuf_waiters);
740 mutex_unlock(&man->space_mutex);
741 return ret;
742 }
743 } else {
744 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
745 }
746 vmw_generic_waiter_remove(man->dev_priv,
747 SVGA_IRQFLAG_COMMAND_BUFFER,
748 &man->dev_priv->cmdbuf_waiters);
749
750 out_unlock:
751 mutex_unlock(&man->space_mutex);
752
753 return 0;
754 }
755
756 /**
757 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
758 * space from the main pool.
759 *
760 * @man: The command buffer manager.
761 * @header: Pointer to the header to set up.
762 * @size: The requested size of the buffer space.
763 * @interruptible: Whether to sleep interruptible while waiting for space.
764 */
765 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
766 struct vmw_cmdbuf_header *header,
767 size_t size,
768 bool interruptible)
769 {
770 SVGACBHeader *cb_hdr;
771 size_t offset;
772 int ret;
773
774 if (!man->has_pool)
775 return -ENOMEM;
776
777 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
778
779 if (ret)
780 return ret;
781
782 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
783 &header->handle);
784 if (!header->cb_header) {
785 ret = -ENOMEM;
786 goto out_no_cb_header;
787 }
788
789 header->size = header->node.size << PAGE_SHIFT;
790 cb_hdr = header->cb_header;
791 offset = header->node.start << PAGE_SHIFT;
792 header->cmd = man->map + offset;
793 if (man->using_mob) {
794 cb_hdr->flags = SVGA_CB_FLAG_MOB;
795 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
796 cb_hdr->ptr.mob.mobOffset = offset;
797 } else {
798 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
799 }
800
801 return 0;
802
803 out_no_cb_header:
804 spin_lock_bh(&man->lock);
805 drm_mm_remove_node(&header->node);
806 spin_unlock_bh(&man->lock);
807
808 return ret;
809 }
810
811 /**
812 * vmw_cmdbuf_space_inline - Set up a command buffer header with
813 * inline command buffer space.
814 *
815 * @man: The command buffer manager.
816 * @header: Pointer to the header to set up.
817 * @size: The requested size of the buffer space.
818 */
819 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
820 struct vmw_cmdbuf_header *header,
821 int size)
822 {
823 struct vmw_cmdbuf_dheader *dheader;
824 SVGACBHeader *cb_hdr;
825
826 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
827 return -ENOMEM;
828
829 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
830 &header->handle);
831 if (!dheader)
832 return -ENOMEM;
833
834 header->inline_space = true;
835 header->size = VMW_CMDBUF_INLINE_SIZE;
836 cb_hdr = &dheader->cb_header;
837 header->cb_header = cb_hdr;
838 header->cmd = dheader->cmd;
839 cb_hdr->status = SVGA_CB_STATUS_NONE;
840 cb_hdr->flags = SVGA_CB_FLAG_NONE;
841 cb_hdr->ptr.pa = (u64)header->handle +
842 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
843
844 return 0;
845 }
846
847 /**
848 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
849 * command buffer space.
850 *
851 * @man: The command buffer manager.
852 * @size: The requested size of the buffer space.
853 * @interruptible: Whether to sleep interruptible while waiting for space.
854 * @p_header: points to a header pointer to populate on successful return.
855 *
856 * Returns a pointer to command buffer space if successful. Otherwise
857 * returns an error pointer. The header pointer returned in @p_header should
858 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
859 */
860 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
861 size_t size, bool interruptible,
862 struct vmw_cmdbuf_header **p_header)
863 {
864 struct vmw_cmdbuf_header *header;
865 int ret = 0;
866
867 *p_header = NULL;
868
869 header = kzalloc(sizeof(*header), GFP_KERNEL);
870 if (!header)
871 return ERR_PTR(-ENOMEM);
872
873 if (size <= VMW_CMDBUF_INLINE_SIZE)
874 ret = vmw_cmdbuf_space_inline(man, header, size);
875 else
876 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
877
878 if (ret) {
879 kfree(header);
880 return ERR_PTR(ret);
881 }
882
883 header->man = man;
884 INIT_LIST_HEAD(&header->list);
885 header->cb_header->status = SVGA_CB_STATUS_NONE;
886 *p_header = header;
887
888 return header->cmd;
889 }
890
891 /**
892 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
893 * command buffer.
894 *
895 * @man: The command buffer manager.
896 * @size: The requested size of the commands.
897 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
898 * @interruptible: Whether to sleep interruptible while waiting for space.
899 *
900 * Returns a pointer to command buffer space if successful. Otherwise
901 * returns an error pointer.
902 */
903 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
904 size_t size,
905 int ctx_id,
906 bool interruptible)
907 {
908 struct vmw_cmdbuf_header *cur;
909 void *ret;
910
911 if (vmw_cmdbuf_cur_lock(man, interruptible))
912 return ERR_PTR(-ERESTARTSYS);
913
914 cur = man->cur;
915 if (cur && (size + man->cur_pos > cur->size ||
916 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
917 ctx_id != cur->cb_header->dxContext)))
918 __vmw_cmdbuf_cur_flush(man);
919
920 if (!man->cur) {
921 ret = vmw_cmdbuf_alloc(man,
922 max_t(size_t, size, man->default_size),
923 interruptible, &man->cur);
924 if (IS_ERR(ret)) {
925 vmw_cmdbuf_cur_unlock(man);
926 return ret;
927 }
928
929 cur = man->cur;
930 }
931
932 if (ctx_id != SVGA3D_INVALID_ID) {
933 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
934 cur->cb_header->dxContext = ctx_id;
935 }
936
937 cur->reserved = size;
938
939 return (void *) (man->cur->cmd + man->cur_pos);
940 }
941
942 /**
943 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
944 *
945 * @man: The command buffer manager.
946 * @size: The size of the commands actually written.
947 * @flush: Whether to flush the command buffer immediately.
948 */
949 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
950 size_t size, bool flush)
951 {
952 struct vmw_cmdbuf_header *cur = man->cur;
953
954 WARN_ON(!mutex_is_locked(&man->cur_mutex));
955
956 WARN_ON(size > cur->reserved);
957 man->cur_pos += size;
958 if (!size)
959 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
960 if (flush)
961 __vmw_cmdbuf_cur_flush(man);
962 vmw_cmdbuf_cur_unlock(man);
963 }
964
965 /**
966 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
967 *
968 * @man: The command buffer manager.
969 * @size: The requested size of the commands.
970 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
971 * @interruptible: Whether to sleep interruptible while waiting for space.
972 * @header: Header of the command buffer. NULL if the current command buffer
973 * should be used.
974 *
975 * Returns a pointer to command buffer space if successful. Otherwise
976 * returns an error pointer.
977 */
978 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
979 int ctx_id, bool interruptible,
980 struct vmw_cmdbuf_header *header)
981 {
982 if (!header)
983 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
984
985 if (size > header->size)
986 return ERR_PTR(-EINVAL);
987
988 if (ctx_id != SVGA3D_INVALID_ID) {
989 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
990 header->cb_header->dxContext = ctx_id;
991 }
992
993 header->reserved = size;
994 return header->cmd;
995 }
996
997 /**
998 * vmw_cmdbuf_commit - Commit commands in a command buffer.
999 *
1000 * @man: The command buffer manager.
1001 * @size: The size of the commands actually written.
1002 * @header: Header of the command buffer. NULL if the current command buffer
1003 * should be used.
1004 * @flush: Whether to flush the command buffer immediately.
1005 */
1006 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1007 struct vmw_cmdbuf_header *header, bool flush)
1008 {
1009 if (!header) {
1010 vmw_cmdbuf_commit_cur(man, size, flush);
1011 return;
1012 }
1013
1014 (void) vmw_cmdbuf_cur_lock(man, false);
1015 __vmw_cmdbuf_cur_flush(man);
1016 WARN_ON(size > header->reserved);
1017 man->cur = header;
1018 man->cur_pos = size;
1019 if (!size)
1020 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1021 if (flush)
1022 __vmw_cmdbuf_cur_flush(man);
1023 vmw_cmdbuf_cur_unlock(man);
1024 }
1025
1026 /**
1027 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1028 *
1029 * @man: The command buffer manager.
1030 */
1031 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1032 {
1033 if (!man)
1034 return;
1035
1036 tasklet_schedule(&man->tasklet);
1037 }
1038
1039 /**
1040 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1041 *
1042 * @man: The command buffer manager.
1043 * @command: Pointer to the command to send.
1044 * @size: Size of the command.
1045 *
1046 * Synchronously sends a device context command.
1047 */
1048 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1049 const void *command,
1050 size_t size)
1051 {
1052 struct vmw_cmdbuf_header *header;
1053 int status;
1054 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1055
1056 if (IS_ERR(cmd))
1057 return PTR_ERR(cmd);
1058
1059 memcpy(cmd, command, size);
1060 header->cb_header->length = size;
1061 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1062 spin_lock_bh(&man->lock);
1063 status = vmw_cmdbuf_header_submit(header);
1064 spin_unlock_bh(&man->lock);
1065 vmw_cmdbuf_header_free(header);
1066
1067 if (status != SVGA_CB_STATUS_COMPLETED) {
1068 DRM_ERROR("Device context command failed with status %d\n",
1069 status);
1070 return -EINVAL;
1071 }
1072
1073 return 0;
1074 }
1075
1076 /**
1077 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1078 * context.
1079 *
1080 * @man: The command buffer manager.
1081 * @enable: Whether to enable or disable the context.
1082 *
1083 * Synchronously sends a device start / stop context command.
1084 */
1085 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1086 bool enable)
1087 {
1088 struct {
1089 uint32 id;
1090 SVGADCCmdStartStop body;
1091 } __packed cmd;
1092
1093 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1094 cmd.body.enable = (enable) ? 1 : 0;
1095 cmd.body.context = SVGA_CB_CONTEXT_0;
1096
1097 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1098 }
1099
1100 /**
1101 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1102 *
1103 * @man: The command buffer manager.
1104 * @size: The size of the main space pool.
1105 * @default_size: The default size of the command buffer for small kernel
1106 * submissions.
1107 *
1108 * Set the size and allocate the main command buffer space pool,
1109 * as well as the default size of the command buffer for
1110 * small kernel submissions. If successful, this enables large command
1111 * submissions. Note that this function requires that rudimentary command
1112 * submission is already available and that the MOB memory manager is alive.
1113 * Returns 0 on success. Negative error code on failure.
1114 */
1115 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1116 size_t size, size_t default_size)
1117 {
1118 struct vmw_private *dev_priv = man->dev_priv;
1119 bool dummy;
1120 int ret;
1121
1122 if (man->has_pool)
1123 return -EINVAL;
1124
1125 /* First, try to allocate a huge chunk of DMA memory */
1126 size = PAGE_ALIGN(size);
1127 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1128 &man->handle, GFP_KERNEL);
1129 if (man->map) {
1130 man->using_mob = false;
1131 } else {
1132 /*
1133 * DMA memory failed. If we can have command buffers in a
1134 * MOB, try to use that instead. Note that this will
1135 * actually call into the already enabled manager, when
1136 * binding the MOB.
1137 */
1138 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1139 return -ENOMEM;
1140
1141 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1142 &vmw_mob_ne_placement, 0, false, NULL,
1143 &man->cmd_space);
1144 if (ret)
1145 return ret;
1146
1147 man->using_mob = true;
1148 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1149 &man->map_obj);
1150 if (ret)
1151 goto out_no_map;
1152
1153 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1154 }
1155
1156 man->size = size;
1157 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1158
1159 man->has_pool = true;
1160
1161 /*
1162 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1163 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1164 * needs to wait for space and we block on further command
1165 * submissions to be able to free up space.
1166 */
1167 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1168 DRM_INFO("Using command buffers with %s pool.\n",
1169 (man->using_mob) ? "MOB" : "DMA");
1170
1171 return 0;
1172
1173 out_no_map:
1174 if (man->using_mob)
1175 ttm_bo_unref(&man->cmd_space);
1176
1177 return ret;
1178 }
1179
1180 /**
1181 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1182 * inline command buffer submissions only.
1183 *
1184 * @dev_priv: Pointer to device private structure.
1185 *
1186 * Returns a pointer to a cummand buffer manager to success or error pointer
1187 * on failure. The command buffer manager will be enabled for submissions of
1188 * size VMW_CMDBUF_INLINE_SIZE only.
1189 */
1190 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1191 {
1192 struct vmw_cmdbuf_man *man;
1193 struct vmw_cmdbuf_context *ctx;
1194 int i;
1195 int ret;
1196
1197 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1198 return ERR_PTR(-ENOSYS);
1199
1200 man = kzalloc(sizeof(*man), GFP_KERNEL);
1201 if (!man)
1202 return ERR_PTR(-ENOMEM);
1203
1204 man->headers = dma_pool_create("vmwgfx cmdbuf",
1205 &dev_priv->dev->pdev->dev,
1206 sizeof(SVGACBHeader),
1207 64, PAGE_SIZE);
1208 if (!man->headers) {
1209 ret = -ENOMEM;
1210 goto out_no_pool;
1211 }
1212
1213 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1214 &dev_priv->dev->pdev->dev,
1215 sizeof(struct vmw_cmdbuf_dheader),
1216 64, PAGE_SIZE);
1217 if (!man->dheaders) {
1218 ret = -ENOMEM;
1219 goto out_no_dpool;
1220 }
1221
1222 for_each_cmdbuf_ctx(man, i, ctx)
1223 vmw_cmdbuf_ctx_init(ctx);
1224
1225 INIT_LIST_HEAD(&man->error);
1226 spin_lock_init(&man->lock);
1227 mutex_init(&man->cur_mutex);
1228 mutex_init(&man->space_mutex);
1229 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1230 (unsigned long) man);
1231 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1232 init_waitqueue_head(&man->alloc_queue);
1233 init_waitqueue_head(&man->idle_queue);
1234 man->dev_priv = dev_priv;
1235 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1236 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1237 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1238 &dev_priv->error_waiters);
1239 ret = vmw_cmdbuf_startstop(man, true);
1240 if (ret) {
1241 DRM_ERROR("Failed starting command buffer context 0.\n");
1242 vmw_cmdbuf_man_destroy(man);
1243 return ERR_PTR(ret);
1244 }
1245
1246 return man;
1247
1248 out_no_dpool:
1249 dma_pool_destroy(man->headers);
1250 out_no_pool:
1251 kfree(man);
1252
1253 return ERR_PTR(ret);
1254 }
1255
1256 /**
1257 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1258 *
1259 * @man: Pointer to a command buffer manager.
1260 *
1261 * This function removes the main buffer space pool, and should be called
1262 * before MOB memory management is removed. When this function has been called,
1263 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1264 * less are allowed, and the default size of the command buffer for small kernel
1265 * submissions is also set to this size.
1266 */
1267 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1268 {
1269 if (!man->has_pool)
1270 return;
1271
1272 man->has_pool = false;
1273 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1275 if (man->using_mob) {
1276 (void) ttm_bo_kunmap(&man->map_obj);
1277 ttm_bo_unref(&man->cmd_space);
1278 } else {
1279 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1280 man->size, man->map, man->handle);
1281 }
1282 }
1283
1284 /**
1285 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1286 *
1287 * @man: Pointer to a command buffer manager.
1288 *
1289 * This function idles and then destroys a command buffer manager.
1290 */
1291 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1292 {
1293 WARN_ON_ONCE(man->has_pool);
1294 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1295 if (vmw_cmdbuf_startstop(man, false))
1296 DRM_ERROR("Failed stopping command buffer context 0.\n");
1297
1298 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1299 &man->dev_priv->error_waiters);
1300 tasklet_kill(&man->tasklet);
1301 (void) cancel_work_sync(&man->work);
1302 dma_pool_destroy(man->dheaders);
1303 dma_pool_destroy(man->headers);
1304 mutex_destroy(&man->cur_mutex);
1305 mutex_destroy(&man->space_mutex);
1306 kfree(man);
1307 }