]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
Merge tag 'ntb-4.13-bugfixes' of git://github.com/jonmason/ntb
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_mn.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <linux/mmu_notifier.h>
a9f87f64 34#include <linux/interval_tree.h>
d38ceaf9
AD
35#include <drm/drmP.h>
36#include <drm/drm.h>
37
38#include "amdgpu.h"
39
40struct amdgpu_mn {
41 /* constant after initialisation */
42 struct amdgpu_device *adev;
43 struct mm_struct *mm;
44 struct mmu_notifier mn;
45
46 /* only used on destruction */
47 struct work_struct work;
48
49 /* protected by adev->mn_lock */
50 struct hlist_node node;
51
0d2b42b0
CK
52 /* objects protected by lock */
53 struct mutex lock;
d38ceaf9
AD
54 struct rb_root objects;
55};
56
57struct amdgpu_mn_node {
58 struct interval_tree_node it;
59 struct list_head bos;
60};
61
62/**
63 * amdgpu_mn_destroy - destroy the rmn
64 *
65 * @work: previously sheduled work item
66 *
67 * Lazy destroys the notifier from a work item
68 */
69static void amdgpu_mn_destroy(struct work_struct *work)
70{
71 struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
72 struct amdgpu_device *adev = rmn->adev;
73 struct amdgpu_mn_node *node, *next_node;
74 struct amdgpu_bo *bo, *next_bo;
75
76 mutex_lock(&adev->mn_lock);
0d2b42b0 77 mutex_lock(&rmn->lock);
d38ceaf9
AD
78 hash_del(&rmn->node);
79 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
80 it.rb) {
d38ceaf9
AD
81 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
82 bo->mn = NULL;
83 list_del_init(&bo->mn_list);
84 }
85 kfree(node);
86 }
0d2b42b0 87 mutex_unlock(&rmn->lock);
b8ea3783 88 mutex_unlock(&adev->mn_lock);
fa5b5000 89 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
d38ceaf9
AD
90 kfree(rmn);
91}
92
93/**
94 * amdgpu_mn_release - callback to notify about mm destruction
95 *
96 * @mn: our notifier
97 * @mn: the mm this callback is about
98 *
99 * Shedule a work item to lazy destroy our notifier.
100 */
101static void amdgpu_mn_release(struct mmu_notifier *mn,
102 struct mm_struct *mm)
103{
104 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
105 INIT_WORK(&rmn->work, amdgpu_mn_destroy);
106 schedule_work(&rmn->work);
107}
108
ae20f12d
CK
109/**
110 * amdgpu_mn_invalidate_node - unmap all BOs of a node
111 *
112 * @node: the node with the BOs to unmap
113 *
114 * We block for all BOs and unmap them by move them
115 * into system domain again.
116 */
117static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
118 unsigned long start,
119 unsigned long end)
120{
121 struct amdgpu_bo *bo;
122 long r;
123
124 list_for_each_entry(bo, &node->bos, mn_list) {
125
126 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
127 continue;
128
129 r = amdgpu_bo_reserve(bo, true);
130 if (r) {
131 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
132 continue;
133 }
134
135 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
136 true, false, MAX_SCHEDULE_TIMEOUT);
137 if (r <= 0)
138 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
139
140 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
141 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
142 if (r)
143 DRM_ERROR("(%ld) failed to validate user bo\n", r);
144
145 amdgpu_bo_unreserve(bo);
146 }
147}
148
149/**
150 * amdgpu_mn_invalidate_page - callback to notify about mm change
151 *
152 * @mn: our notifier
153 * @mn: the mm this callback is about
154 * @address: address of invalidate page
155 *
156 * Invalidation of a single page. Blocks for all BOs mapping it
157 * and unmap them by move them into system domain again.
158 */
159static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
160 struct mm_struct *mm,
161 unsigned long address)
162{
163 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
164 struct interval_tree_node *it;
165
166 mutex_lock(&rmn->lock);
167
168 it = interval_tree_iter_first(&rmn->objects, address, address);
169 if (it) {
170 struct amdgpu_mn_node *node;
171
172 node = container_of(it, struct amdgpu_mn_node, it);
173 amdgpu_mn_invalidate_node(node, address, address);
174 }
175
176 mutex_unlock(&rmn->lock);
177}
178
d38ceaf9
AD
179/**
180 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
181 *
182 * @mn: our notifier
183 * @mn: the mm this callback is about
184 * @start: start of updated range
185 * @end: end of updated range
186 *
187 * We block for all BOs between start and end to be idle and
188 * unmap them by move them into system domain again.
189 */
190static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
191 struct mm_struct *mm,
192 unsigned long start,
193 unsigned long end)
194{
195 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
196 struct interval_tree_node *it;
197
198 /* notification is exclusive, but interval is inclusive */
199 end -= 1;
200
0d2b42b0
CK
201 mutex_lock(&rmn->lock);
202
d38ceaf9
AD
203 it = interval_tree_iter_first(&rmn->objects, start, end);
204 while (it) {
205 struct amdgpu_mn_node *node;
d38ceaf9
AD
206
207 node = container_of(it, struct amdgpu_mn_node, it);
208 it = interval_tree_iter_next(it, start, end);
209
ae20f12d 210 amdgpu_mn_invalidate_node(node, start, end);
d38ceaf9 211 }
0d2b42b0
CK
212
213 mutex_unlock(&rmn->lock);
d38ceaf9
AD
214}
215
216static const struct mmu_notifier_ops amdgpu_mn_ops = {
217 .release = amdgpu_mn_release,
ae20f12d 218 .invalidate_page = amdgpu_mn_invalidate_page,
d38ceaf9
AD
219 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
220};
221
222/**
223 * amdgpu_mn_get - create notifier context
224 *
225 * @adev: amdgpu device pointer
226 *
227 * Creates a notifier context for current->mm.
228 */
229static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
230{
231 struct mm_struct *mm = current->mm;
232 struct amdgpu_mn *rmn;
233 int r;
234
d38ceaf9 235 mutex_lock(&adev->mn_lock);
b5637051
MH
236 if (down_write_killable(&mm->mmap_sem)) {
237 mutex_unlock(&adev->mn_lock);
238 return ERR_PTR(-EINTR);
239 }
d38ceaf9
AD
240
241 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
242 if (rmn->mm == mm)
243 goto release_locks;
244
245 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
246 if (!rmn) {
247 rmn = ERR_PTR(-ENOMEM);
248 goto release_locks;
249 }
250
251 rmn->adev = adev;
252 rmn->mm = mm;
253 rmn->mn.ops = &amdgpu_mn_ops;
0d2b42b0 254 mutex_init(&rmn->lock);
d38ceaf9
AD
255 rmn->objects = RB_ROOT;
256
257 r = __mmu_notifier_register(&rmn->mn, mm);
258 if (r)
259 goto free_rmn;
260
261 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
262
263release_locks:
d38ceaf9 264 up_write(&mm->mmap_sem);
b8ea3783 265 mutex_unlock(&adev->mn_lock);
d38ceaf9
AD
266
267 return rmn;
268
269free_rmn:
d38ceaf9 270 up_write(&mm->mmap_sem);
b8ea3783 271 mutex_unlock(&adev->mn_lock);
d38ceaf9
AD
272 kfree(rmn);
273
274 return ERR_PTR(r);
275}
276
277/**
278 * amdgpu_mn_register - register a BO for notifier updates
279 *
280 * @bo: amdgpu buffer object
281 * @addr: userptr addr we should monitor
282 *
283 * Registers an MMU notifier for the given BO at the specified address.
284 * Returns 0 on success, -ERRNO if anything goes wrong.
285 */
286int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
287{
288 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
a7d64de6 289 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
d38ceaf9
AD
290 struct amdgpu_mn *rmn;
291 struct amdgpu_mn_node *node = NULL;
292 struct list_head bos;
293 struct interval_tree_node *it;
294
295 rmn = amdgpu_mn_get(adev);
296 if (IS_ERR(rmn))
297 return PTR_ERR(rmn);
298
299 INIT_LIST_HEAD(&bos);
300
0d2b42b0 301 mutex_lock(&rmn->lock);
d38ceaf9
AD
302
303 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
304 kfree(node);
305 node = container_of(it, struct amdgpu_mn_node, it);
306 interval_tree_remove(&node->it, &rmn->objects);
307 addr = min(it->start, addr);
308 end = max(it->last, end);
309 list_splice(&node->bos, &bos);
310 }
311
312 if (!node) {
313 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
314 if (!node) {
0d2b42b0 315 mutex_unlock(&rmn->lock);
d38ceaf9
AD
316 return -ENOMEM;
317 }
318 }
319
320 bo->mn = rmn;
321
322 node->it.start = addr;
323 node->it.last = end;
324 INIT_LIST_HEAD(&node->bos);
325 list_splice(&bos, &node->bos);
326 list_add(&bo->mn_list, &node->bos);
327
328 interval_tree_insert(&node->it, &rmn->objects);
329
0d2b42b0 330 mutex_unlock(&rmn->lock);
d38ceaf9
AD
331
332 return 0;
333}
334
335/**
336 * amdgpu_mn_unregister - unregister a BO for notifier updates
337 *
338 * @bo: amdgpu buffer object
339 *
340 * Remove any registration of MMU notifier updates from the buffer object.
341 */
342void amdgpu_mn_unregister(struct amdgpu_bo *bo)
343{
a7d64de6 344 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
b8ea3783 345 struct amdgpu_mn *rmn;
d38ceaf9
AD
346 struct list_head *head;
347
b8ea3783
FK
348 mutex_lock(&adev->mn_lock);
349
350 rmn = bo->mn;
351 if (rmn == NULL) {
352 mutex_unlock(&adev->mn_lock);
d38ceaf9 353 return;
b8ea3783 354 }
d38ceaf9 355
0d2b42b0 356 mutex_lock(&rmn->lock);
c41d271d 357
d38ceaf9
AD
358 /* save the next list entry for later */
359 head = bo->mn_list.next;
360
361 bo->mn = NULL;
68c9793d 362 list_del_init(&bo->mn_list);
d38ceaf9
AD
363
364 if (list_empty(head)) {
365 struct amdgpu_mn_node *node;
366 node = container_of(head, struct amdgpu_mn_node, bos);
367 interval_tree_remove(&node->it, &rmn->objects);
368 kfree(node);
369 }
370
0d2b42b0 371 mutex_unlock(&rmn->lock);
b8ea3783 372 mutex_unlock(&adev->mn_lock);
d38ceaf9 373}