2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
28 struct amdgpu_vram_mgr
{
36 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
38 * @man: TTM memory type manager
39 * @p_size: maximum size of VRAM
41 * Allocate and initialize the VRAM manager.
43 static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager
*man
,
46 struct amdgpu_vram_mgr
*mgr
;
48 mgr
= kzalloc(sizeof(*mgr
), GFP_KERNEL
);
52 drm_mm_init(&mgr
->mm
, 0, p_size
);
53 spin_lock_init(&mgr
->lock
);
59 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
61 * @man: TTM memory type manager
63 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
64 * allocated inside it.
66 static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager
*man
)
68 struct amdgpu_vram_mgr
*mgr
= man
->priv
;
70 spin_lock(&mgr
->lock
);
71 if (!drm_mm_clean(&mgr
->mm
)) {
72 spin_unlock(&mgr
->lock
);
76 drm_mm_takedown(&mgr
->mm
);
77 spin_unlock(&mgr
->lock
);
84 * amdgpu_vram_mgr_vis_size - Calculate visible node size
86 * @adev: amdgpu device structure
87 * @node: MM node structure
89 * Calculate how many bytes of the MM node are inside visible VRAM
91 static u64
amdgpu_vram_mgr_vis_size(struct amdgpu_device
*adev
,
92 struct drm_mm_node
*node
)
94 uint64_t start
= node
->start
<< PAGE_SHIFT
;
95 uint64_t end
= (node
->size
+ node
->start
) << PAGE_SHIFT
;
97 if (start
>= adev
->mc
.visible_vram_size
)
100 return (end
> adev
->mc
.visible_vram_size
?
101 adev
->mc
.visible_vram_size
: end
) - start
;
105 * amdgpu_vram_mgr_new - allocate new ranges
107 * @man: TTM memory type manager
108 * @tbo: TTM BO we need this range for
109 * @place: placement flags and restrictions
110 * @mem: the resulting mem object
112 * Allocate VRAM for the given BO.
114 static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager
*man
,
115 struct ttm_buffer_object
*tbo
,
116 const struct ttm_place
*place
,
117 struct ttm_mem_reg
*mem
)
119 struct amdgpu_device
*adev
= amdgpu_ttm_adev(man
->bdev
);
120 struct amdgpu_vram_mgr
*mgr
= man
->priv
;
121 struct drm_mm
*mm
= &mgr
->mm
;
122 struct drm_mm_node
*nodes
;
123 enum drm_mm_insert_mode mode
;
124 unsigned long lpfn
, num_nodes
, pages_per_node
, pages_left
;
125 uint64_t usage
= 0, vis_usage
= 0;
133 if (place
->flags
& TTM_PL_FLAG_CONTIGUOUS
||
134 amdgpu_vram_page_split
== -1) {
135 pages_per_node
= ~0ul;
138 pages_per_node
= max((uint32_t)amdgpu_vram_page_split
,
139 mem
->page_alignment
);
140 num_nodes
= DIV_ROUND_UP(mem
->num_pages
, pages_per_node
);
143 nodes
= kcalloc(num_nodes
, sizeof(*nodes
), GFP_KERNEL
);
147 mode
= DRM_MM_INSERT_BEST
;
148 if (place
->flags
& TTM_PL_FLAG_TOPDOWN
)
149 mode
= DRM_MM_INSERT_HIGH
;
152 pages_left
= mem
->num_pages
;
154 spin_lock(&mgr
->lock
);
155 for (i
= 0; i
< num_nodes
; ++i
) {
156 unsigned long pages
= min(pages_left
, pages_per_node
);
157 uint32_t alignment
= mem
->page_alignment
;
160 if (pages
== pages_per_node
)
161 alignment
= pages_per_node
;
163 r
= drm_mm_insert_node_in_range(mm
, &nodes
[i
],
170 usage
+= nodes
[i
].size
<< PAGE_SHIFT
;
171 vis_usage
+= amdgpu_vram_mgr_vis_size(adev
, &nodes
[i
]);
173 /* Calculate a virtual BO start address to easily check if
174 * everything is CPU accessible.
176 start
= nodes
[i
].start
+ nodes
[i
].size
;
177 if (start
> mem
->num_pages
)
178 start
-= mem
->num_pages
;
181 mem
->start
= max(mem
->start
, start
);
184 spin_unlock(&mgr
->lock
);
186 atomic64_add(usage
, &mgr
->usage
);
187 atomic64_add(vis_usage
, &mgr
->vis_usage
);
189 mem
->mm_node
= nodes
;
195 drm_mm_remove_node(&nodes
[i
]);
196 spin_unlock(&mgr
->lock
);
199 return r
== -ENOSPC
? 0 : r
;
203 * amdgpu_vram_mgr_del - free ranges
205 * @man: TTM memory type manager
206 * @tbo: TTM BO we need this range for
207 * @place: placement flags and restrictions
208 * @mem: TTM memory object
210 * Free the allocated VRAM again.
212 static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager
*man
,
213 struct ttm_mem_reg
*mem
)
215 struct amdgpu_device
*adev
= amdgpu_ttm_adev(man
->bdev
);
216 struct amdgpu_vram_mgr
*mgr
= man
->priv
;
217 struct drm_mm_node
*nodes
= mem
->mm_node
;
218 uint64_t usage
= 0, vis_usage
= 0;
219 unsigned pages
= mem
->num_pages
;
224 spin_lock(&mgr
->lock
);
226 pages
-= nodes
->size
;
227 drm_mm_remove_node(nodes
);
228 usage
+= nodes
->size
<< PAGE_SHIFT
;
229 vis_usage
+= amdgpu_vram_mgr_vis_size(adev
, nodes
);
232 spin_unlock(&mgr
->lock
);
234 atomic64_sub(usage
, &mgr
->usage
);
235 atomic64_sub(vis_usage
, &mgr
->vis_usage
);
242 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
244 * @man: TTM memory type manager
246 * Returns how many bytes are used in this domain.
248 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager
*man
)
250 struct amdgpu_vram_mgr
*mgr
= man
->priv
;
252 return atomic64_read(&mgr
->usage
);
256 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
258 * @man: TTM memory type manager
260 * Returns how many bytes are used in the visible part of VRAM
262 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager
*man
)
264 struct amdgpu_vram_mgr
*mgr
= man
->priv
;
266 return atomic64_read(&mgr
->vis_usage
);
270 * amdgpu_vram_mgr_debug - dump VRAM table
272 * @man: TTM memory type manager
273 * @printer: DRM printer to use
275 * Dump the table content using printk.
277 static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager
*man
,
278 struct drm_printer
*printer
)
280 struct amdgpu_vram_mgr
*mgr
= man
->priv
;
282 spin_lock(&mgr
->lock
);
283 drm_mm_print(&mgr
->mm
, printer
);
284 spin_unlock(&mgr
->lock
);
286 drm_printf(printer
, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
287 man
->size
, amdgpu_vram_mgr_usage(man
) >> 20,
288 amdgpu_vram_mgr_vis_usage(man
) >> 20);
291 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func
= {
292 .init
= amdgpu_vram_mgr_init
,
293 .takedown
= amdgpu_vram_mgr_fini
,
294 .get_node
= amdgpu_vram_mgr_new
,
295 .put_node
= amdgpu_vram_mgr_del
,
296 .debug
= amdgpu_vram_mgr_debug