2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
35 struct i915_mm_struct
{
37 struct drm_device
*dev
;
38 struct i915_mmu_notifier
*mn
;
39 struct hlist_node node
;
41 struct work_struct work
;
44 #if defined(CONFIG_MMU_NOTIFIER)
45 #include <linux/interval_tree.h>
47 struct i915_mmu_notifier
{
49 struct hlist_node node
;
50 struct mmu_notifier mn
;
51 struct rb_root objects
;
52 struct list_head linear
;
57 struct i915_mmu_object
{
58 struct i915_mmu_notifier
*mn
;
59 struct interval_tree_node it
;
60 struct list_head link
;
61 struct drm_i915_gem_object
*obj
;
65 static unsigned long cancel_userptr(struct drm_i915_gem_object
*obj
)
67 struct drm_device
*dev
= obj
->base
.dev
;
70 mutex_lock(&dev
->struct_mutex
);
71 /* Cancel any active worker and force us to re-evaluate gup */
72 obj
->userptr
.work
= NULL
;
74 if (obj
->pages
!= NULL
) {
75 struct drm_i915_private
*dev_priv
= to_i915(dev
);
76 struct i915_vma
*vma
, *tmp
;
77 bool was_interruptible
;
79 was_interruptible
= dev_priv
->mm
.interruptible
;
80 dev_priv
->mm
.interruptible
= false;
82 list_for_each_entry_safe(vma
, tmp
, &obj
->vma_list
, vma_link
) {
83 int ret
= i915_vma_unbind(vma
);
84 WARN_ON(ret
&& ret
!= -EIO
);
86 WARN_ON(i915_gem_object_put_pages(obj
));
88 dev_priv
->mm
.interruptible
= was_interruptible
;
91 end
= obj
->userptr
.ptr
+ obj
->base
.size
;
93 drm_gem_object_unreference(&obj
->base
);
94 mutex_unlock(&dev
->struct_mutex
);
99 static void *invalidate_range__linear(struct i915_mmu_notifier
*mn
,
100 struct mm_struct
*mm
,
104 struct i915_mmu_object
*mo
;
105 unsigned long serial
;
109 list_for_each_entry(mo
, &mn
->linear
, link
) {
110 struct drm_i915_gem_object
*obj
;
112 if (mo
->it
.last
< start
|| mo
->it
.start
> end
)
117 if (!kref_get_unless_zero(&obj
->base
.refcount
))
120 spin_unlock(&mn
->lock
);
124 spin_lock(&mn
->lock
);
125 if (serial
!= mn
->serial
)
132 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier
*_mn
,
133 struct mm_struct
*mm
,
137 struct i915_mmu_notifier
*mn
= container_of(_mn
, struct i915_mmu_notifier
, mn
);
138 struct interval_tree_node
*it
= NULL
;
139 unsigned long next
= start
;
140 unsigned long serial
= 0;
142 end
--; /* interval ranges are inclusive, but invalidate range is exclusive */
144 struct drm_i915_gem_object
*obj
= NULL
;
146 spin_lock(&mn
->lock
);
148 it
= invalidate_range__linear(mn
, mm
, start
, end
);
149 else if (serial
== mn
->serial
)
150 it
= interval_tree_iter_next(it
, next
, end
);
152 it
= interval_tree_iter_first(&mn
->objects
, start
, end
);
154 obj
= container_of(it
, struct i915_mmu_object
, it
)->obj
;
156 /* The mmu_object is released late when destroying the
157 * GEM object so it is entirely possible to gain a
158 * reference on an object in the process of being freed
159 * since our serialisation is via the spinlock and not
160 * the struct_mutex - and consequently use it after it
161 * is freed and then double free it.
163 if (!kref_get_unless_zero(&obj
->base
.refcount
)) {
164 spin_unlock(&mn
->lock
);
171 spin_unlock(&mn
->lock
);
175 next
= cancel_userptr(obj
);
179 static const struct mmu_notifier_ops i915_gem_userptr_notifier
= {
180 .invalidate_range_start
= i915_gem_userptr_mn_invalidate_range_start
,
183 static struct i915_mmu_notifier
*
184 i915_mmu_notifier_create(struct mm_struct
*mm
)
186 struct i915_mmu_notifier
*mn
;
189 mn
= kmalloc(sizeof(*mn
), GFP_KERNEL
);
191 return ERR_PTR(-ENOMEM
);
193 spin_lock_init(&mn
->lock
);
194 mn
->mn
.ops
= &i915_gem_userptr_notifier
;
195 mn
->objects
= RB_ROOT
;
197 INIT_LIST_HEAD(&mn
->linear
);
198 mn
->has_linear
= false;
200 /* Protected by mmap_sem (write-lock) */
201 ret
= __mmu_notifier_register(&mn
->mn
, mm
);
210 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier
*mn
)
212 if (++mn
->serial
== 0)
217 i915_mmu_notifier_add(struct drm_device
*dev
,
218 struct i915_mmu_notifier
*mn
,
219 struct i915_mmu_object
*mo
)
221 struct interval_tree_node
*it
;
224 ret
= i915_mutex_lock_interruptible(dev
);
228 /* Make sure we drop the final active reference (and thereby
229 * remove the objects from the interval tree) before we do
230 * the check for overlapping objects.
232 i915_gem_retire_requests(dev
);
234 spin_lock(&mn
->lock
);
235 it
= interval_tree_iter_first(&mn
->objects
,
236 mo
->it
.start
, mo
->it
.last
);
238 struct drm_i915_gem_object
*obj
;
240 /* We only need to check the first object in the range as it
241 * either has cancelled gup work queued and we need to
242 * return back to the user to give time for the gup-workers
243 * to flush their object references upon which the object will
244 * be removed from the interval-tree, or the the range is
245 * still in use by another client and the overlap is invalid.
247 * If we do have an overlap, we cannot use the interval tree
248 * for fast range invalidation.
251 obj
= container_of(it
, struct i915_mmu_object
, it
)->obj
;
252 if (!obj
->userptr
.workers
)
253 mn
->has_linear
= mo
->is_linear
= true;
257 interval_tree_insert(&mo
->it
, &mn
->objects
);
260 list_add(&mo
->link
, &mn
->linear
);
261 __i915_mmu_notifier_update_serial(mn
);
263 spin_unlock(&mn
->lock
);
264 mutex_unlock(&dev
->struct_mutex
);
269 static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier
*mn
)
271 struct i915_mmu_object
*mo
;
273 list_for_each_entry(mo
, &mn
->linear
, link
)
281 i915_mmu_notifier_del(struct i915_mmu_notifier
*mn
,
282 struct i915_mmu_object
*mo
)
284 spin_lock(&mn
->lock
);
287 mn
->has_linear
= i915_mmu_notifier_has_linear(mn
);
289 interval_tree_remove(&mo
->it
, &mn
->objects
);
290 __i915_mmu_notifier_update_serial(mn
);
291 spin_unlock(&mn
->lock
);
295 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
297 struct i915_mmu_object
*mo
;
299 mo
= obj
->userptr
.mmu_object
;
303 i915_mmu_notifier_del(mo
->mn
, mo
);
306 obj
->userptr
.mmu_object
= NULL
;
309 static struct i915_mmu_notifier
*
310 i915_mmu_notifier_find(struct i915_mm_struct
*mm
)
312 struct i915_mmu_notifier
*mn
= mm
->mn
;
318 down_write(&mm
->mm
->mmap_sem
);
319 mutex_lock(&to_i915(mm
->dev
)->mm_lock
);
320 if ((mn
= mm
->mn
) == NULL
) {
321 mn
= i915_mmu_notifier_create(mm
->mm
);
325 mutex_unlock(&to_i915(mm
->dev
)->mm_lock
);
326 up_write(&mm
->mm
->mmap_sem
);
332 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
335 struct i915_mmu_notifier
*mn
;
336 struct i915_mmu_object
*mo
;
339 if (flags
& I915_USERPTR_UNSYNCHRONIZED
)
340 return capable(CAP_SYS_ADMIN
) ? 0 : -EPERM
;
342 if (WARN_ON(obj
->userptr
.mm
== NULL
))
345 mn
= i915_mmu_notifier_find(obj
->userptr
.mm
);
349 mo
= kzalloc(sizeof(*mo
), GFP_KERNEL
);
354 mo
->it
.start
= obj
->userptr
.ptr
;
355 mo
->it
.last
= mo
->it
.start
+ obj
->base
.size
- 1;
358 ret
= i915_mmu_notifier_add(obj
->base
.dev
, mn
, mo
);
364 obj
->userptr
.mmu_object
= mo
;
369 i915_mmu_notifier_free(struct i915_mmu_notifier
*mn
,
370 struct mm_struct
*mm
)
375 mmu_notifier_unregister(&mn
->mn
, mm
);
382 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
387 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
390 if ((flags
& I915_USERPTR_UNSYNCHRONIZED
) == 0)
393 if (!capable(CAP_SYS_ADMIN
))
400 i915_mmu_notifier_free(struct i915_mmu_notifier
*mn
,
401 struct mm_struct
*mm
)
407 static struct i915_mm_struct
*
408 __i915_mm_struct_find(struct drm_i915_private
*dev_priv
, struct mm_struct
*real
)
410 struct i915_mm_struct
*mm
;
412 /* Protected by dev_priv->mm_lock */
413 hash_for_each_possible(dev_priv
->mm_structs
, mm
, node
, (unsigned long)real
)
421 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object
*obj
)
423 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
424 struct i915_mm_struct
*mm
;
427 /* During release of the GEM object we hold the struct_mutex. This
428 * precludes us from calling mmput() at that time as that may be
429 * the last reference and so call exit_mmap(). exit_mmap() will
430 * attempt to reap the vma, and if we were holding a GTT mmap
431 * would then call drm_gem_vm_close() and attempt to reacquire
432 * the struct mutex. So in order to avoid that recursion, we have
433 * to defer releasing the mm reference until after we drop the
434 * struct_mutex, i.e. we need to schedule a worker to do the clean
437 mutex_lock(&dev_priv
->mm_lock
);
438 mm
= __i915_mm_struct_find(dev_priv
, current
->mm
);
440 mm
= kmalloc(sizeof(*mm
), GFP_KERNEL
);
446 kref_init(&mm
->kref
);
447 mm
->dev
= obj
->base
.dev
;
449 mm
->mm
= current
->mm
;
450 atomic_inc(¤t
->mm
->mm_count
);
454 /* Protected by dev_priv->mm_lock */
455 hash_add(dev_priv
->mm_structs
,
456 &mm
->node
, (unsigned long)mm
->mm
);
460 obj
->userptr
.mm
= mm
;
462 mutex_unlock(&dev_priv
->mm_lock
);
467 __i915_mm_struct_free__worker(struct work_struct
*work
)
469 struct i915_mm_struct
*mm
= container_of(work
, typeof(*mm
), work
);
470 i915_mmu_notifier_free(mm
->mn
, mm
->mm
);
476 __i915_mm_struct_free(struct kref
*kref
)
478 struct i915_mm_struct
*mm
= container_of(kref
, typeof(*mm
), kref
);
480 /* Protected by dev_priv->mm_lock */
482 mutex_unlock(&to_i915(mm
->dev
)->mm_lock
);
484 INIT_WORK(&mm
->work
, __i915_mm_struct_free__worker
);
485 schedule_work(&mm
->work
);
489 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object
*obj
)
491 if (obj
->userptr
.mm
== NULL
)
494 kref_put_mutex(&obj
->userptr
.mm
->kref
,
495 __i915_mm_struct_free
,
496 &to_i915(obj
->base
.dev
)->mm_lock
);
497 obj
->userptr
.mm
= NULL
;
500 struct get_pages_work
{
501 struct work_struct work
;
502 struct drm_i915_gem_object
*obj
;
503 struct task_struct
*task
;
506 #if IS_ENABLED(CONFIG_SWIOTLB)
507 #define swiotlb_active() swiotlb_nr_tbl()
509 #define swiotlb_active() 0
513 st_set_pages(struct sg_table
**st
, struct page
**pvec
, int num_pages
)
515 struct scatterlist
*sg
;
518 *st
= kmalloc(sizeof(**st
), GFP_KERNEL
);
522 if (swiotlb_active()) {
523 ret
= sg_alloc_table(*st
, num_pages
, GFP_KERNEL
);
527 for_each_sg((*st
)->sgl
, sg
, num_pages
, n
)
528 sg_set_page(sg
, pvec
[n
], PAGE_SIZE
, 0);
530 ret
= sg_alloc_table_from_pages(*st
, pvec
, num_pages
,
531 0, num_pages
<< PAGE_SHIFT
,
546 __i915_gem_userptr_get_pages_worker(struct work_struct
*_work
)
548 struct get_pages_work
*work
= container_of(_work
, typeof(*work
), work
);
549 struct drm_i915_gem_object
*obj
= work
->obj
;
550 struct drm_device
*dev
= obj
->base
.dev
;
551 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
558 pvec
= kmalloc(num_pages
*sizeof(struct page
*),
559 GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
561 pvec
= drm_malloc_ab(num_pages
, sizeof(struct page
*));
563 struct mm_struct
*mm
= obj
->userptr
.mm
->mm
;
565 down_read(&mm
->mmap_sem
);
566 while (pinned
< num_pages
) {
567 ret
= get_user_pages(work
->task
, mm
,
568 obj
->userptr
.ptr
+ pinned
* PAGE_SIZE
,
570 !obj
->userptr
.read_only
, 0,
571 pvec
+ pinned
, NULL
);
577 up_read(&mm
->mmap_sem
);
580 mutex_lock(&dev
->struct_mutex
);
581 if (obj
->userptr
.work
!= &work
->work
) {
583 } else if (pinned
== num_pages
) {
584 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
586 list_add_tail(&obj
->global_list
, &to_i915(dev
)->mm
.unbound_list
);
591 obj
->userptr
.work
= ERR_PTR(ret
);
592 obj
->userptr
.workers
--;
593 drm_gem_object_unreference(&obj
->base
);
594 mutex_unlock(&dev
->struct_mutex
);
596 release_pages(pvec
, pinned
, 0);
597 drm_free_large(pvec
);
599 put_task_struct(work
->task
);
604 i915_gem_userptr_get_pages(struct drm_i915_gem_object
*obj
)
606 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
610 /* If userspace should engineer that these pages are replaced in
611 * the vma between us binding this page into the GTT and completion
612 * of rendering... Their loss. If they change the mapping of their
613 * pages they need to create a new bo to point to the new vma.
615 * However, that still leaves open the possibility of the vma
616 * being copied upon fork. Which falls under the same userspace
617 * synchronisation issue as a regular bo, except that this time
618 * the process may not be expecting that a particular piece of
619 * memory is tied to the GPU.
621 * Fortunately, we can hook into the mmu_notifier in order to
622 * discard the page references prior to anything nasty happening
623 * to the vma (discard or cloning) which should prevent the more
624 * egregious cases from causing harm.
629 if (obj
->userptr
.mm
->mm
== current
->mm
) {
630 pvec
= kmalloc(num_pages
*sizeof(struct page
*),
631 GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
633 pvec
= drm_malloc_ab(num_pages
, sizeof(struct page
*));
638 pinned
= __get_user_pages_fast(obj
->userptr
.ptr
, num_pages
,
639 !obj
->userptr
.read_only
, pvec
);
641 if (pinned
< num_pages
) {
646 /* Spawn a worker so that we can acquire the
647 * user pages without holding our mutex. Access
648 * to the user pages requires mmap_sem, and we have
649 * a strict lock ordering of mmap_sem, struct_mutex -
650 * we already hold struct_mutex here and so cannot
651 * call gup without encountering a lock inversion.
653 * Userspace will keep on repeating the operation
654 * (thanks to EAGAIN) until either we hit the fast
655 * path or the worker completes. If the worker is
656 * cancelled or superseded, the task is still run
657 * but the results ignored. (This leads to
658 * complications that we may have a stray object
659 * refcount that we need to be wary of when
660 * checking for existing objects during creation.)
661 * If the worker encounters an error, it reports
662 * that error back to this function through
663 * obj->userptr.work = ERR_PTR.
666 if (obj
->userptr
.work
== NULL
&&
667 obj
->userptr
.workers
< I915_GEM_USERPTR_MAX_WORKERS
) {
668 struct get_pages_work
*work
;
670 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
672 obj
->userptr
.work
= &work
->work
;
673 obj
->userptr
.workers
++;
676 drm_gem_object_reference(&obj
->base
);
678 work
->task
= current
;
679 get_task_struct(work
->task
);
681 INIT_WORK(&work
->work
, __i915_gem_userptr_get_pages_worker
);
682 schedule_work(&work
->work
);
686 if (IS_ERR(obj
->userptr
.work
)) {
687 ret
= PTR_ERR(obj
->userptr
.work
);
688 obj
->userptr
.work
= NULL
;
693 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
695 obj
->userptr
.work
= NULL
;
700 release_pages(pvec
, pinned
, 0);
701 drm_free_large(pvec
);
706 i915_gem_userptr_put_pages(struct drm_i915_gem_object
*obj
)
708 struct sg_page_iter sg_iter
;
710 BUG_ON(obj
->userptr
.work
!= NULL
);
712 if (obj
->madv
!= I915_MADV_WILLNEED
)
715 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
, 0) {
716 struct page
*page
= sg_page_iter_page(&sg_iter
);
719 set_page_dirty(page
);
721 mark_page_accessed(page
);
722 page_cache_release(page
);
726 sg_free_table(obj
->pages
);
731 i915_gem_userptr_release(struct drm_i915_gem_object
*obj
)
733 i915_gem_userptr_release__mmu_notifier(obj
);
734 i915_gem_userptr_release__mm_struct(obj
);
738 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object
*obj
)
740 if (obj
->userptr
.mmu_object
)
743 return i915_gem_userptr_init__mmu_notifier(obj
, 0);
746 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops
= {
747 .dmabuf_export
= i915_gem_userptr_dmabuf_export
,
748 .get_pages
= i915_gem_userptr_get_pages
,
749 .put_pages
= i915_gem_userptr_put_pages
,
750 .release
= i915_gem_userptr_release
,
754 * Creates a new mm object that wraps some normal memory from the process
755 * context - user memory.
757 * We impose several restrictions upon the memory being mapped
759 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
760 * 2. It must be normal system memory, not a pointer into another map of IO
761 * space (e.g. it must not be a GTT mmapping of another object).
762 * 3. We only allow a bo as large as we could in theory map into the GTT,
763 * that is we limit the size to the total size of the GTT.
764 * 4. The bo is marked as being snoopable. The backing pages are left
765 * accessible directly by the CPU, but reads and writes by the GPU may
766 * incur the cost of a snoop (unless you have an LLC architecture).
768 * Synchronisation between multiple users and the GPU is left to userspace
769 * through the normal set-domain-ioctl. The kernel will enforce that the
770 * GPU relinquishes the VMA before it is returned back to the system
771 * i.e. upon free(), munmap() or process termination. However, the userspace
772 * malloc() library may not immediately relinquish the VMA after free() and
773 * instead reuse it whilst the GPU is still reading and writing to the VMA.
776 * Also note, that the object created here is not currently a "first class"
777 * object, in that several ioctls are banned. These are the CPU access
778 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
779 * direct access via your pointer rather than use those ioctls.
781 * If you think this is a good interface to use to pass GPU memory between
782 * drivers, please use dma-buf instead. In fact, wherever possible use
786 i915_gem_userptr_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
788 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
789 struct drm_i915_gem_userptr
*args
= data
;
790 struct drm_i915_gem_object
*obj
;
794 if (args
->flags
& ~(I915_USERPTR_READ_ONLY
|
795 I915_USERPTR_UNSYNCHRONIZED
))
798 if (offset_in_page(args
->user_ptr
| args
->user_size
))
801 if (args
->user_size
> dev_priv
->gtt
.base
.total
)
804 if (!access_ok(args
->flags
& I915_USERPTR_READ_ONLY
? VERIFY_READ
: VERIFY_WRITE
,
805 (char __user
*)(unsigned long)args
->user_ptr
, args
->user_size
))
808 if (args
->flags
& I915_USERPTR_READ_ONLY
) {
809 /* On almost all of the current hw, we cannot tell the GPU that a
810 * page is readonly, so this is just a placeholder in the uAPI.
815 obj
= i915_gem_object_alloc(dev
);
819 drm_gem_private_object_init(dev
, &obj
->base
, args
->user_size
);
820 i915_gem_object_init(obj
, &i915_gem_userptr_ops
);
821 obj
->cache_level
= I915_CACHE_LLC
;
822 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
823 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
825 obj
->userptr
.ptr
= args
->user_ptr
;
826 obj
->userptr
.read_only
= !!(args
->flags
& I915_USERPTR_READ_ONLY
);
828 /* And keep a pointer to the current->mm for resolving the user pages
829 * at binding. This means that we need to hook into the mmu_notifier
830 * in order to detect if the mmu is destroyed.
832 ret
= i915_gem_userptr_init__mm_struct(obj
);
834 ret
= i915_gem_userptr_init__mmu_notifier(obj
, args
->flags
);
836 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
838 /* drop reference from allocate - handle holds it now */
839 drm_gem_object_unreference_unlocked(&obj
->base
);
843 args
->handle
= handle
;
848 i915_gem_init_userptr(struct drm_device
*dev
)
850 struct drm_i915_private
*dev_priv
= to_i915(dev
);
851 mutex_init(&dev_priv
->mm_lock
);
852 hash_init(dev_priv
->mm_structs
);