]>
Commit | Line | Data |
---|---|---|
bb990bb0 CK |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Christian König | |
23 | */ | |
24 | ||
25 | #include <drm/drmP.h> | |
26 | #include "amdgpu.h" | |
27 | ||
28 | struct amdgpu_gtt_mgr { | |
29 | struct drm_mm mm; | |
30 | spinlock_t lock; | |
9255d77d | 31 | atomic64_t available; |
bb990bb0 CK |
32 | }; |
33 | ||
34 | /** | |
35 | * amdgpu_gtt_mgr_init - init GTT manager and DRM MM | |
36 | * | |
37 | * @man: TTM memory type manager | |
38 | * @p_size: maximum size of GTT | |
39 | * | |
40 | * Allocate and initialize the GTT manager. | |
41 | */ | |
42 | static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, | |
43 | unsigned long p_size) | |
44 | { | |
bb84284e | 45 | struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); |
bb990bb0 | 46 | struct amdgpu_gtt_mgr *mgr; |
cc25188a | 47 | uint64_t start, size; |
bb990bb0 CK |
48 | |
49 | mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); | |
50 | if (!mgr) | |
51 | return -ENOMEM; | |
52 | ||
cc25188a | 53 | start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; |
bb84284e | 54 | size = (adev->mc.gart_size >> PAGE_SHIFT) - start; |
cc25188a | 55 | drm_mm_init(&mgr->mm, start, size); |
bb990bb0 | 56 | spin_lock_init(&mgr->lock); |
9255d77d | 57 | atomic64_set(&mgr->available, p_size); |
bb990bb0 CK |
58 | man->priv = mgr; |
59 | return 0; | |
60 | } | |
61 | ||
62 | /** | |
63 | * amdgpu_gtt_mgr_fini - free and destroy GTT manager | |
64 | * | |
65 | * @man: TTM memory type manager | |
66 | * | |
67 | * Destroy and free the GTT manager, returns -EBUSY if ranges are still | |
68 | * allocated inside it. | |
69 | */ | |
70 | static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) | |
71 | { | |
72 | struct amdgpu_gtt_mgr *mgr = man->priv; | |
73 | ||
74 | spin_lock(&mgr->lock); | |
75 | if (!drm_mm_clean(&mgr->mm)) { | |
76 | spin_unlock(&mgr->lock); | |
77 | return -EBUSY; | |
78 | } | |
79 | ||
80 | drm_mm_takedown(&mgr->mm); | |
81 | spin_unlock(&mgr->lock); | |
82 | kfree(mgr); | |
83 | man->priv = NULL; | |
84 | return 0; | |
85 | } | |
86 | ||
98a7f88c CK |
87 | /** |
88 | * amdgpu_gtt_mgr_is_allocated - Check if mem has address space | |
89 | * | |
90 | * @mem: the mem object to check | |
91 | * | |
92 | * Check if a mem object has already address space allocated. | |
93 | */ | |
94 | bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) | |
95 | { | |
96 | struct drm_mm_node *node = mem->mm_node; | |
97 | ||
98 | return (node->start != AMDGPU_BO_INVALID_OFFSET); | |
99 | } | |
100 | ||
bb990bb0 CK |
101 | /** |
102 | * amdgpu_gtt_mgr_alloc - allocate new ranges | |
103 | * | |
104 | * @man: TTM memory type manager | |
105 | * @tbo: TTM BO we need this range for | |
106 | * @place: placement flags and restrictions | |
107 | * @mem: the resulting mem object | |
108 | * | |
109 | * Allocate the address space for a node. | |
110 | */ | |
1d00402b CK |
111 | static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, |
112 | struct ttm_buffer_object *tbo, | |
113 | const struct ttm_place *place, | |
114 | struct ttm_mem_reg *mem) | |
bb990bb0 | 115 | { |
bb84284e | 116 | struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); |
bb990bb0 CK |
117 | struct amdgpu_gtt_mgr *mgr = man->priv; |
118 | struct drm_mm_node *node = mem->mm_node; | |
4e64e553 | 119 | enum drm_mm_insert_mode mode; |
bb990bb0 CK |
120 | unsigned long fpfn, lpfn; |
121 | int r; | |
122 | ||
98a7f88c | 123 | if (amdgpu_gtt_mgr_is_allocated(mem)) |
bb990bb0 CK |
124 | return 0; |
125 | ||
126 | if (place) | |
127 | fpfn = place->fpfn; | |
128 | else | |
129 | fpfn = 0; | |
130 | ||
131 | if (place && place->lpfn) | |
132 | lpfn = place->lpfn; | |
133 | else | |
bb84284e | 134 | lpfn = adev->gart.num_cpu_pages; |
bb990bb0 | 135 | |
4e64e553 CW |
136 | mode = DRM_MM_INSERT_BEST; |
137 | if (place && place->flags & TTM_PL_FLAG_TOPDOWN) | |
138 | mode = DRM_MM_INSERT_HIGH; | |
bb990bb0 CK |
139 | |
140 | spin_lock(&mgr->lock); | |
4e64e553 CW |
141 | r = drm_mm_insert_node_in_range(&mgr->mm, node, |
142 | mem->num_pages, mem->page_alignment, 0, | |
143 | fpfn, lpfn, mode); | |
bb990bb0 CK |
144 | spin_unlock(&mgr->lock); |
145 | ||
1d00402b | 146 | if (!r) |
bb990bb0 | 147 | mem->start = node->start; |
bb990bb0 CK |
148 | |
149 | return r; | |
150 | } | |
151 | ||
152 | /** | |
153 | * amdgpu_gtt_mgr_new - allocate a new node | |
154 | * | |
155 | * @man: TTM memory type manager | |
156 | * @tbo: TTM BO we need this range for | |
157 | * @place: placement flags and restrictions | |
158 | * @mem: the resulting mem object | |
159 | * | |
160 | * Dummy, allocate the node but no space for it yet. | |
161 | */ | |
162 | static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, | |
163 | struct ttm_buffer_object *tbo, | |
164 | const struct ttm_place *place, | |
165 | struct ttm_mem_reg *mem) | |
166 | { | |
167 | struct amdgpu_gtt_mgr *mgr = man->priv; | |
168 | struct drm_mm_node *node; | |
169 | int r; | |
170 | ||
171 | spin_lock(&mgr->lock); | |
d2d7cc33 CK |
172 | if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && |
173 | atomic64_read(&mgr->available) < mem->num_pages) { | |
bb990bb0 CK |
174 | spin_unlock(&mgr->lock); |
175 | return 0; | |
176 | } | |
9255d77d | 177 | atomic64_sub(mem->num_pages, &mgr->available); |
bb990bb0 CK |
178 | spin_unlock(&mgr->lock); |
179 | ||
180 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
47e50d5c FC |
181 | if (!node) { |
182 | r = -ENOMEM; | |
183 | goto err_out; | |
184 | } | |
bb990bb0 CK |
185 | |
186 | node->start = AMDGPU_BO_INVALID_OFFSET; | |
d2e93870 | 187 | node->size = mem->num_pages; |
bb990bb0 CK |
188 | mem->mm_node = node; |
189 | ||
190 | if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { | |
191 | r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); | |
192 | if (unlikely(r)) { | |
193 | kfree(node); | |
194 | mem->mm_node = NULL; | |
47e50d5c FC |
195 | r = 0; |
196 | goto err_out; | |
bb990bb0 CK |
197 | } |
198 | } else { | |
199 | mem->start = node->start; | |
200 | } | |
201 | ||
202 | return 0; | |
47e50d5c | 203 | err_out: |
9255d77d | 204 | atomic64_add(mem->num_pages, &mgr->available); |
47e50d5c FC |
205 | |
206 | return r; | |
bb990bb0 CK |
207 | } |
208 | ||
209 | /** | |
210 | * amdgpu_gtt_mgr_del - free ranges | |
211 | * | |
212 | * @man: TTM memory type manager | |
213 | * @tbo: TTM BO we need this range for | |
214 | * @place: placement flags and restrictions | |
215 | * @mem: TTM memory object | |
216 | * | |
217 | * Free the allocated GTT again. | |
218 | */ | |
219 | static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, | |
220 | struct ttm_mem_reg *mem) | |
221 | { | |
222 | struct amdgpu_gtt_mgr *mgr = man->priv; | |
223 | struct drm_mm_node *node = mem->mm_node; | |
224 | ||
225 | if (!node) | |
226 | return; | |
227 | ||
228 | spin_lock(&mgr->lock); | |
229 | if (node->start != AMDGPU_BO_INVALID_OFFSET) | |
230 | drm_mm_remove_node(node); | |
bb990bb0 | 231 | spin_unlock(&mgr->lock); |
9255d77d | 232 | atomic64_add(mem->num_pages, &mgr->available); |
bb990bb0 CK |
233 | |
234 | kfree(node); | |
235 | mem->mm_node = NULL; | |
236 | } | |
237 | ||
9255d77d CK |
238 | /** |
239 | * amdgpu_gtt_mgr_usage - return usage of GTT domain | |
240 | * | |
241 | * @man: TTM memory type manager | |
242 | * | |
243 | * Return how many bytes are used in the GTT domain | |
244 | */ | |
245 | uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) | |
246 | { | |
247 | struct amdgpu_gtt_mgr *mgr = man->priv; | |
d2d7cc33 | 248 | s64 result = man->size - atomic64_read(&mgr->available); |
9255d77d | 249 | |
d2d7cc33 | 250 | return (result > 0 ? result : 0) * PAGE_SIZE; |
9255d77d CK |
251 | } |
252 | ||
bb990bb0 CK |
253 | /** |
254 | * amdgpu_gtt_mgr_debug - dump VRAM table | |
255 | * | |
256 | * @man: TTM memory type manager | |
373533f8 | 257 | * @printer: DRM printer to use |
bb990bb0 CK |
258 | * |
259 | * Dump the table content using printk. | |
260 | */ | |
261 | static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, | |
373533f8 | 262 | struct drm_printer *printer) |
bb990bb0 CK |
263 | { |
264 | struct amdgpu_gtt_mgr *mgr = man->priv; | |
265 | ||
266 | spin_lock(&mgr->lock); | |
373533f8 | 267 | drm_mm_print(&mgr->mm, printer); |
bb990bb0 | 268 | spin_unlock(&mgr->lock); |
97cbb284 | 269 | |
d2d7cc33 | 270 | drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n", |
9255d77d CK |
271 | man->size, (u64)atomic64_read(&mgr->available), |
272 | amdgpu_gtt_mgr_usage(man) >> 20); | |
bb990bb0 CK |
273 | } |
274 | ||
275 | const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { | |
613e61a0 KC |
276 | .init = amdgpu_gtt_mgr_init, |
277 | .takedown = amdgpu_gtt_mgr_fini, | |
278 | .get_node = amdgpu_gtt_mgr_new, | |
279 | .put_node = amdgpu_gtt_mgr_del, | |
280 | .debug = amdgpu_gtt_mgr_debug | |
bb990bb0 | 281 | }; |