u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
kfree(suballoc);
}
-struct etnaviv_cmdbuf *
-etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos)
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size)
{
- struct etnaviv_cmdbuf *cmdbuf;
- size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
- sizeof(*cmdbuf));
int granule_offs, order, ret;
- cmdbuf = kzalloc(sz, GFP_KERNEL);
- if (!cmdbuf)
- return NULL;
-
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
if (!ret) {
dev_err(suballoc->gpu->dev,
"Timeout waiting for cmdbuf space\n");
- return NULL;
+ return -ETIMEDOUT;
}
goto retry;
}
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
- return cmdbuf;
+ return 0;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
- kfree(cmdbuf);
}
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
void *vaddr;
u32 size;
u32 user_size;
- /* fence after which this buffer is to be disposed */
- struct dma_fence *fence;
- /* per GPU in-flight list */
- struct list_head node;
- /* BOs attached to this command buffer */
- unsigned int nr_bos;
- struct etnaviv_vram_mapping *bo_map[0];
};
struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
-struct etnaviv_cmdbuf *
-etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos);
+
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
{
- struct etnaviv_cmdbuf *buf = gpu->buffer;
+ struct etnaviv_cmdbuf *buf = &gpu->buffer;
u32 size = buf->size;
u32 *ptr = buf->vaddr;
u32 i;
struct core_dump_iterator iter;
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
- struct etnaviv_cmdbuf *cmd;
+ struct etnaviv_gem_submit *submit;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
- mmu_size + gpu->buffer->size;
+ mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
- file_size += cmd->size;
+ list_for_each_entry(submit, &gpu->active_submit_list, node) {
+ file_size += submit->cmdbuf.size;
n_obj++;
}
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
- gpu->buffer->size,
- etnaviv_cmdbuf_get_va(gpu->buffer));
-
- list_for_each_entry(cmd, &gpu->active_cmd_list, node)
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
- cmd->size, etnaviv_cmdbuf_get_va(cmd));
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
+ gpu->buffer.size,
+ etnaviv_cmdbuf_get_va(&gpu->buffer));
+
+ list_for_each_entry(submit, &gpu->active_submit_list, node)
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf));
/* Reserve space for the bomap */
if (n_bomap_pages) {
#define __ETNAVIV_GEM_H__
#include <linux/reservation.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct dma_fence;
struct kref refcount;
struct etnaviv_gpu *gpu;
struct dma_fence *out_fence, *in_fence;
+ struct list_head node; /* GPU active submit list */
+ struct etnaviv_cmdbuf cmdbuf;
u32 exec_state;
u32 flags;
unsigned int nr_pmrs;
container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
+ if (submit->cmdbuf.suballoc)
+ etnaviv_cmdbuf_free(&submit->cmdbuf);
+
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
- struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
struct sync_file *sync_file = NULL;
struct ww_acquire_ctx ticket;
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
- cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
- ALIGN(args->stream_size, 8) + 8,
- args->nr_bos);
- if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
+ if (!bos || !relocs || !pmrs || !stream) {
ret = -ENOMEM;
goto err_submit_cmds;
}
- cmdbuf->ctx = file->driver_priv;
-
ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
goto err_submit_ww_acquire;
}
+ ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
+ ALIGN(args->stream_size, 8) + 8);
+ if (ret)
+ goto err_submit_objects;
+
+ submit->cmdbuf.ctx = file->driver_priv;
submit->exec_state = args->exec_state;
submit->flags = args->flags;
if (ret)
goto err_submit_objects;
- memcpy(cmdbuf->vaddr, stream, args->stream_size);
- cmdbuf->user_size = ALIGN(args->stream_size, 8);
+ memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
+ submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);
- ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+ ret = etnaviv_gpu_submit(gpu, submit);
if (ret)
goto err_submit_objects;
submit_attach_object_fences(submit);
- cmdbuf = NULL;
-
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
* This can be improved: ideally we want to allocate the sync
err_submit_cmds:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
- /* if we still own the cmdbuf */
- if (cmdbuf)
- etnaviv_cmdbuf_free(cmdbuf);
if (stream)
kvfree(stream);
if (bos)
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
prefetch);
}
}
/* Create buffer: */
- gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
- if (!gpu->buffer) {
- ret = -ENOMEM;
+ ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
+ PAGE_SIZE);
+ if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
- etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
+ etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
return 0;
free_buffer:
- etnaviv_cmdbuf_free(gpu->buffer);
- gpu->buffer = NULL;
+ etnaviv_cmdbuf_free(&gpu->buffer);
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
retire_work);
u32 fence = gpu->completed_fence;
- struct etnaviv_cmdbuf *cmdbuf, *tmp;
+ struct etnaviv_gem_submit *submit, *tmp;
unsigned int i;
mutex_lock(&gpu->lock);
- list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!dma_fence_is_signaled(cmdbuf->fence))
+ list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) {
+ if (!dma_fence_is_signaled(submit->out_fence))
break;
- list_del(&cmdbuf->node);
- dma_fence_put(cmdbuf->fence);
+ list_del(&submit->node);
- for (i = 0; i < cmdbuf->nr_bos; i++) {
- struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
- struct etnaviv_gem_object *etnaviv_obj = mapping->object;
-
- atomic_dec(&etnaviv_obj->gpu_active);
- /* drop the refcount taken in etnaviv_gpu_submit */
- etnaviv_gem_mapping_unreference(mapping);
- }
+ for (i = 0; i < submit->nr_bos; i++)
+ atomic_dec(&submit->bos[i].obj->gpu_active);
- etnaviv_cmdbuf_free(cmdbuf);
+ etnaviv_submit_put(submit);
/*
* We need to balance the runtime PM count caused by
* each submission. Upon submission, we increment
/* add bo's to gpu's ring, and kick gpu: */
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
+ struct etnaviv_gem_submit *submit)
{
- struct dma_fence *fence;
unsigned int i, nr_events = 1, event[3];
int ret;
mutex_lock(&gpu->lock);
- fence = etnaviv_gpu_fence_alloc(gpu);
- if (!fence) {
+ submit->out_fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!submit->out_fence) {
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
goto out_unlock;
}
- gpu->event[event[0]].fence = fence;
- submit->out_fence = dma_fence_get(fence);
gpu->active_fence = submit->out_fence->seqno;
if (submit->nr_pmrs) {
etnaviv_sync_point_queue(gpu, event[1]);
}
- etnaviv_buffer_queue(gpu, submit->exec_state, event[0], cmdbuf);
+ kref_get(&submit->refcount);
+ gpu->event[event[0]].fence = submit->out_fence;
+ etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
+ &submit->cmdbuf);
if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
etnaviv_sync_point_queue(gpu, event[2]);
}
- cmdbuf->fence = fence;
- list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
+ list_add_tail(&submit->node, &gpu->active_submit_list);
/* We're committed to adding this command buffer, hold a PM reference */
pm_runtime_get_noresume(gpu->dev);
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-
- /* Each cmdbuf takes a refcount on the mapping */
- etnaviv_gem_mapping_reference(submit->bos[i].mapping);
- cmdbuf->bo_map[i] = submit->bos[i].mapping;
atomic_inc(&etnaviv_obj->gpu_active);
}
- cmdbuf->nr_bos = submit->nr_bos;
hangcheck_timer_reset(gpu);
ret = 0;
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->buffer) {
+ if (gpu->buffer.suballoc) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
- INIT_LIST_HEAD(&gpu->active_cmd_list);
+ INIT_LIST_HEAD(&gpu->active_submit_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
etnaviv_gpu_hw_suspend(gpu);
#endif
- if (gpu->buffer) {
- etnaviv_cmdbuf_free(gpu->buffer);
- gpu->buffer = NULL;
- }
+ if (gpu->buffer.suballoc)
+ etnaviv_cmdbuf_free(&gpu->buffer);
if (gpu->cmdbuf_suballoc) {
etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
return ret;
/* Re-initialise the basic hardware state */
- if (gpu->drm && gpu->buffer) {
+ if (gpu->drm && gpu->buffer.suballoc) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
struct workqueue_struct *wq;
/* 'ring'-buffer: */
- struct etnaviv_cmdbuf *buffer;
+ struct etnaviv_cmdbuf buffer;
int exec_state;
/* bus base address of memory */
spinlock_t event_spinlock;
/* list of currently in-flight command buffers */
- struct list_head active_cmd_list;
+ struct list_head active_submit_list;
u32 idle_mask;
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
+ struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->base.bad_page_dma);
- etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);