]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
Merge branch 'for-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.h
CommitLineData
073440d2
CK
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24#ifndef __AMDGPU_VM_H__
25#define __AMDGPU_VM_H__
26
27#include <linux/rbtree.h>
28
29#include "gpu_scheduler.h"
30#include "amdgpu_sync.h"
31#include "amdgpu_ring.h"
32
33struct amdgpu_bo_va;
34struct amdgpu_job;
35struct amdgpu_bo_list_entry;
36
37/*
38 * GPUVM handling
39 */
40
41/* maximum number of VMIDs */
42#define AMDGPU_NUM_VM 16
43
44/* Maximum number of PTEs the hardware can write with one command */
45#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
46
47/* number of entries in page table */
48#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
49
50/* PTBs (Page Table Blocks) need to be aligned to 32K */
51#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
52
53/* LOG2 number of continuous pages for the fragment field */
54#define AMDGPU_LOG2_PAGES_PER_FRAG 4
55
56#define AMDGPU_PTE_VALID (1 << 0)
57#define AMDGPU_PTE_SYSTEM (1 << 1)
58#define AMDGPU_PTE_SNOOPED (1 << 2)
59
60/* VI only */
61#define AMDGPU_PTE_EXECUTABLE (1 << 4)
62
63#define AMDGPU_PTE_READABLE (1 << 5)
64#define AMDGPU_PTE_WRITEABLE (1 << 6)
65
66#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
67
68/* How to programm VM fault handling */
69#define AMDGPU_VM_FAULT_STOP_NEVER 0
70#define AMDGPU_VM_FAULT_STOP_FIRST 1
71#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
72
73struct amdgpu_vm_pt {
74 struct amdgpu_bo *bo;
75 uint64_t addr;
76};
77
78struct amdgpu_vm {
79 /* tree of virtual addresses mapped */
80 struct rb_root va;
81
82 /* protecting invalidated */
83 spinlock_t status_lock;
84
85 /* BOs moved, but not yet updated in the PT */
86 struct list_head invalidated;
87
88 /* BOs cleared in the PT because of a move */
89 struct list_head cleared;
90
91 /* BO mappings freed, but not yet updated in the PT */
92 struct list_head freed;
93
94 /* contains the page directory */
95 struct amdgpu_bo *page_directory;
96 unsigned max_pde_used;
220196b3 97 struct dma_fence *page_directory_fence;
073440d2
CK
98 uint64_t last_eviction_counter;
99
100 /* array of page tables, one for each page directory entry */
101 struct amdgpu_vm_pt *page_tables;
102
103 /* for id and flush management per ring */
104 struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
105
106 /* protecting freed */
107 spinlock_t freed_lock;
108
109 /* Scheduler entity for page table updates */
110 struct amd_sched_entity entity;
111
112 /* client id */
113 u64 client_id;
114};
115
116struct amdgpu_vm_id {
117 struct list_head list;
220196b3 118 struct dma_fence *first;
073440d2 119 struct amdgpu_sync active;
220196b3 120 struct dma_fence *last_flush;
073440d2
CK
121 atomic64_t owner;
122
123 uint64_t pd_gpu_addr;
124 /* last flushed PD/PT update */
220196b3 125 struct dma_fence *flushed_updates;
073440d2
CK
126
127 uint32_t current_gpu_reset_count;
128
129 uint32_t gds_base;
130 uint32_t gds_size;
131 uint32_t gws_base;
132 uint32_t gws_size;
133 uint32_t oa_base;
134 uint32_t oa_size;
135};
136
137struct amdgpu_vm_manager {
138 /* Handling of VMIDs */
139 struct mutex lock;
140 unsigned num_ids;
141 struct list_head ids_lru;
142 struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
143
144 /* Handling of VM fences */
145 u64 fence_context;
146 unsigned seqno[AMDGPU_MAX_RINGS];
147
148 uint32_t max_pfn;
149 /* vram base address for page table entry */
150 u64 vram_base_offset;
151 /* is vm enabled? */
152 bool enabled;
153 /* vm pte handling */
154 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
155 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
156 unsigned vm_pte_num_rings;
157 atomic_t vm_pte_next_ring;
158 /* client id counter */
159 atomic64_t client_counter;
160};
161
162void amdgpu_vm_manager_init(struct amdgpu_device *adev);
163void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
164int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
165void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
166void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
167 struct list_head *validated,
168 struct amdgpu_bo_list_entry *entry);
169int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
170 int (*callback)(void *p, struct amdgpu_bo *bo),
171 void *param);
172void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
173 struct amdgpu_vm *vm);
174int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
220196b3 175 struct amdgpu_sync *sync, struct dma_fence *fence,
073440d2
CK
176 struct amdgpu_job *job);
177int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
178void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
179int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
180 struct amdgpu_vm *vm);
181int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
182 struct amdgpu_vm *vm);
183int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
184 struct amdgpu_sync *sync);
185int amdgpu_vm_bo_update(struct amdgpu_device *adev,
186 struct amdgpu_bo_va *bo_va,
187 bool clear);
188void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
189 struct amdgpu_bo *bo);
190struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
191 struct amdgpu_bo *bo);
192struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
193 struct amdgpu_vm *vm,
194 struct amdgpu_bo *bo);
195int amdgpu_vm_bo_map(struct amdgpu_device *adev,
196 struct amdgpu_bo_va *bo_va,
197 uint64_t addr, uint64_t offset,
198 uint64_t size, uint32_t flags);
199int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
200 struct amdgpu_bo_va *bo_va,
201 uint64_t addr);
202void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
203 struct amdgpu_bo_va *bo_va);
204
205#endif