]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/drm_vm.c
drm/i915: show unknown sdvox registers on hdmi init
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / drm_vm.c
CommitLineData
1da177e4 1/**
b5e89ed5 2 * \file drm_vm.c
1da177e4 3 * Memory mapping for DRM
b5e89ed5 4 *
1da177e4
LT
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
2d1a8a48 37#include <linux/export.h>
1da177e4
LT
38#if defined(__ia64__)
39#include <linux/efi.h>
5a0e3ad6 40#include <linux/slab.h>
1da177e4
LT
41#endif
42
c94f7029
DA
43static void drm_vm_open(struct vm_area_struct *vma);
44static void drm_vm_close(struct vm_area_struct *vma);
1da177e4 45
0bead7cd 46static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
5cc7f9ab
DA
47{
48 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
49
50#if defined(__i386__) || defined(__x86_64__)
51 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
52 pgprot_val(tmp) |= _PAGE_PCD;
53 pgprot_val(tmp) &= ~_PAGE_PWT;
54 }
55#elif defined(__powerpc__)
56 pgprot_val(tmp) |= _PAGE_NO_CACHE;
57 if (map_type == _DRM_REGISTERS)
58 pgprot_val(tmp) |= _PAGE_GUARDED;
6876b3ba 59#elif defined(__ia64__)
5cc7f9ab
DA
60 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
61 vma->vm_start))
62 tmp = pgprot_writecombine(tmp);
63 else
64 tmp = pgprot_noncached(tmp);
4b7fb9b5 65#elif defined(__sparc__) || defined(__arm__)
6876b3ba
BH
66 tmp = pgprot_noncached(tmp);
67#endif
68 return tmp;
69}
70
71static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
72{
73 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
74
75#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
76 tmp |= _PAGE_NO_CACHE;
5cc7f9ab
DA
77#endif
78 return tmp;
79}
80
1da177e4 81/**
ca0b07d9 82 * \c fault method for AGP virtual memory.
1da177e4
LT
83 *
84 * \param vma virtual memory area.
85 * \param address access address.
86 * \return pointer to the page structure.
b5e89ed5 87 *
1da177e4
LT
88 * Find the right map and if it's AGP memory find the real physical page to
89 * map, get the page, increment the use count and return it.
90 */
91#if __OS_HAS_AGP
ca0b07d9 92static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 93{
84b1fd10 94 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 95 struct drm_device *dev = priv->minor->dev;
f77d390c 96 struct drm_local_map *map = NULL;
55910517 97 struct drm_map_list *r_list;
e0be428e 98 struct drm_hash_item *hash;
1da177e4
LT
99
100 /*
b5e89ed5
DA
101 * Find the right map
102 */
1da177e4 103 if (!drm_core_has_AGP(dev))
ca0b07d9 104 goto vm_fault_error;
1da177e4 105
b5e89ed5 106 if (!dev->agp || !dev->agp->cant_use_aperture)
ca0b07d9 107 goto vm_fault_error;
1da177e4 108
1545085a 109 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
ca0b07d9 110 goto vm_fault_error;
8d153f71 111
55910517 112 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
8d153f71 113 map = r_list->map;
1da177e4
LT
114
115 if (map && map->type == _DRM_AGP) {
ca0b07d9
NP
116 /*
117 * Using vm_pgoff as a selector forces us to use this unusual
118 * addressing scheme.
119 */
41c2e75e
BH
120 resource_size_t offset = (unsigned long)vmf->virtual_address -
121 vma->vm_start;
122 resource_size_t baddr = map->offset + offset;
1da177e4
LT
123 struct drm_agp_mem *agpmem;
124 struct page *page;
125
126#ifdef __alpha__
127 /*
b5e89ed5
DA
128 * Adjust to a bus-relative address
129 */
1da177e4
LT
130 baddr -= dev->hose->mem_space->start;
131#endif
132
133 /*
b5e89ed5
DA
134 * It's AGP memory - find the real physical page to map
135 */
bd1b331f 136 list_for_each_entry(agpmem, &dev->agp->memory, head) {
1da177e4 137 if (agpmem->bound <= baddr &&
b5e89ed5 138 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
1da177e4
LT
139 break;
140 }
141
161c4810 142 if (&agpmem->head == &dev->agp->memory)
ca0b07d9 143 goto vm_fault_error;
1da177e4
LT
144
145 /*
b5e89ed5
DA
146 * Get the page, inc the use count, and return it
147 */
1da177e4 148 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
07613ba2 149 page = agpmem->memory->pages[offset];
1da177e4 150 get_page(page);
ca0b07d9 151 vmf->page = page;
1da177e4 152
b5e89ed5 153 DRM_DEBUG
41c2e75e
BH
154 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155 (unsigned long long)baddr,
07613ba2 156 agpmem->memory->pages[offset],
41c2e75e 157 (unsigned long long)offset,
b5e89ed5 158 page_count(page));
ca0b07d9 159 return 0;
b5e89ed5 160 }
ca0b07d9
NP
161vm_fault_error:
162 return VM_FAULT_SIGBUS; /* Disallow mremap */
1da177e4 163}
b5e89ed5 164#else /* __OS_HAS_AGP */
ca0b07d9 165static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 166{
ca0b07d9 167 return VM_FAULT_SIGBUS;
1da177e4 168}
b5e89ed5 169#endif /* __OS_HAS_AGP */
1da177e4
LT
170
171/**
172 * \c nopage method for shared virtual memory.
173 *
174 * \param vma virtual memory area.
175 * \param address access address.
176 * \return pointer to the page structure.
b5e89ed5 177 *
59c51591 178 * Get the mapping, find the real physical page to map, get the page, and
1da177e4
LT
179 * return it.
180 */
ca0b07d9 181static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 182{
f77d390c 183 struct drm_local_map *map = vma->vm_private_data;
b5e89ed5
DA
184 unsigned long offset;
185 unsigned long i;
186 struct page *page;
1da177e4 187
b5e89ed5 188 if (!map)
ca0b07d9 189 return VM_FAULT_SIGBUS; /* Nothing allocated */
1da177e4 190
ca0b07d9 191 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
1da177e4 192 i = (unsigned long)map->handle + offset;
38315878 193 page = vmalloc_to_page((void *)i);
1da177e4 194 if (!page)
ca0b07d9 195 return VM_FAULT_SIGBUS;
1da177e4 196 get_page(page);
ca0b07d9 197 vmf->page = page;
1da177e4 198
ca0b07d9
NP
199 DRM_DEBUG("shm_fault 0x%lx\n", offset);
200 return 0;
1da177e4
LT
201}
202
1da177e4
LT
203/**
204 * \c close method for shared virtual memory.
b5e89ed5 205 *
1da177e4 206 * \param vma virtual memory area.
b5e89ed5 207 *
1da177e4
LT
208 * Deletes map information if we are the last
209 * person to close a mapping and it's not in the global maplist.
210 */
c94f7029 211static void drm_vm_shm_close(struct vm_area_struct *vma)
1da177e4 212{
84b1fd10 213 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 214 struct drm_device *dev = priv->minor->dev;
8fc2fdf4 215 struct drm_vma_entry *pt, *temp;
f77d390c 216 struct drm_local_map *map;
55910517 217 struct drm_map_list *r_list;
1da177e4
LT
218 int found_maps = 0;
219
220 DRM_DEBUG("0x%08lx,0x%08lx\n",
221 vma->vm_start, vma->vm_end - vma->vm_start);
222 atomic_dec(&dev->vma_count);
223
224 map = vma->vm_private_data;
225
30e2fb18 226 mutex_lock(&dev->struct_mutex);
bd1b331f 227 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
b5e89ed5
DA
228 if (pt->vma->vm_private_data == map)
229 found_maps++;
1da177e4 230 if (pt->vma == vma) {
bd1b331f 231 list_del(&pt->head);
9a298b2a 232 kfree(pt);
1da177e4
LT
233 }
234 }
bd1b331f 235
1da177e4 236 /* We were the only map that was found */
b5e89ed5 237 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
1da177e4
LT
238 /* Check to see if we are in the maplist, if we are not, then
239 * we delete this mappings information.
240 */
241 found_maps = 0;
bd1b331f 242 list_for_each_entry(r_list, &dev->maplist, head) {
b5e89ed5
DA
243 if (r_list->map == map)
244 found_maps++;
1da177e4
LT
245 }
246
b5e89ed5 247 if (!found_maps) {
9c8da5eb
DA
248 drm_dma_handle_t dmah;
249
1da177e4
LT
250 switch (map->type) {
251 case _DRM_REGISTERS:
252 case _DRM_FRAME_BUFFER:
253 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
254 int retcode;
255 retcode = mtrr_del(map->mtrr,
256 map->offset,
257 map->size);
258 DRM_DEBUG("mtrr_del = %d\n", retcode);
259 }
004a7727 260 iounmap(map->handle);
1da177e4
LT
261 break;
262 case _DRM_SHM:
263 vfree(map->handle);
264 break;
265 case _DRM_AGP:
266 case _DRM_SCATTER_GATHER:
267 break;
2d0f9eaf 268 case _DRM_CONSISTENT:
9c8da5eb
DA
269 dmah.vaddr = map->handle;
270 dmah.busaddr = map->offset;
271 dmah.size = map->size;
272 __drm_pci_free(dev, &dmah);
2d0f9eaf 273 break;
a2c0a97b
JB
274 case _DRM_GEM:
275 DRM_ERROR("tried to rmmap GEM object\n");
276 break;
1da177e4 277 }
9a298b2a 278 kfree(map);
1da177e4
LT
279 }
280 }
30e2fb18 281 mutex_unlock(&dev->struct_mutex);
1da177e4
LT
282}
283
284/**
ca0b07d9 285 * \c fault method for DMA virtual memory.
1da177e4
LT
286 *
287 * \param vma virtual memory area.
288 * \param address access address.
289 * \return pointer to the page structure.
b5e89ed5 290 *
1da177e4
LT
291 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
292 */
ca0b07d9 293static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 294{
84b1fd10 295 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 296 struct drm_device *dev = priv->minor->dev;
cdd55a29 297 struct drm_device_dma *dma = dev->dma;
b5e89ed5
DA
298 unsigned long offset;
299 unsigned long page_nr;
300 struct page *page;
301
302 if (!dma)
ca0b07d9 303 return VM_FAULT_SIGBUS; /* Error */
b5e89ed5 304 if (!dma->pagelist)
ca0b07d9 305 return VM_FAULT_SIGBUS; /* Nothing allocated */
b5e89ed5 306
ca0b07d9
NP
307 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
308 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
b5e89ed5 309 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
1da177e4
LT
310
311 get_page(page);
ca0b07d9 312 vmf->page = page;
1da177e4 313
ca0b07d9
NP
314 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315 return 0;
1da177e4
LT
316}
317
318/**
ca0b07d9 319 * \c fault method for scatter-gather virtual memory.
1da177e4
LT
320 *
321 * \param vma virtual memory area.
322 * \param address access address.
323 * \return pointer to the page structure.
b5e89ed5 324 *
1da177e4
LT
325 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
326 */
ca0b07d9 327static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 328{
f77d390c 329 struct drm_local_map *map = vma->vm_private_data;
84b1fd10 330 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 331 struct drm_device *dev = priv->minor->dev;
55910517 332 struct drm_sg_mem *entry = dev->sg;
1da177e4
LT
333 unsigned long offset;
334 unsigned long map_offset;
335 unsigned long page_offset;
336 struct page *page;
337
b5e89ed5 338 if (!entry)
ca0b07d9 339 return VM_FAULT_SIGBUS; /* Error */
b5e89ed5 340 if (!entry->pagelist)
ca0b07d9 341 return VM_FAULT_SIGBUS; /* Nothing allocated */
1da177e4 342
ca0b07d9 343 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
d1f2b55a 344 map_offset = map->offset - (unsigned long)dev->sg->virtual;
1da177e4
LT
345 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
346 page = entry->pagelist[page_offset];
347 get_page(page);
ca0b07d9 348 vmf->page = page;
1da177e4 349
ca0b07d9 350 return 0;
1da177e4
LT
351}
352
ca0b07d9 353static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
b5e89ed5 354{
ca0b07d9 355 return drm_do_vm_fault(vma, vmf);
1da177e4
LT
356}
357
ca0b07d9 358static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
b5e89ed5 359{
ca0b07d9 360 return drm_do_vm_shm_fault(vma, vmf);
1da177e4
LT
361}
362
ca0b07d9 363static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
b5e89ed5 364{
ca0b07d9 365 return drm_do_vm_dma_fault(vma, vmf);
1da177e4
LT
366}
367
ca0b07d9 368static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
b5e89ed5 369{
ca0b07d9 370 return drm_do_vm_sg_fault(vma, vmf);
1da177e4
LT
371}
372
1da177e4 373/** AGP virtual memory operations */
f0f37e2f 374static const struct vm_operations_struct drm_vm_ops = {
ca0b07d9 375 .fault = drm_vm_fault,
b5e89ed5
DA
376 .open = drm_vm_open,
377 .close = drm_vm_close,
1da177e4
LT
378};
379
380/** Shared virtual memory operations */
f0f37e2f 381static const struct vm_operations_struct drm_vm_shm_ops = {
ca0b07d9 382 .fault = drm_vm_shm_fault,
b5e89ed5
DA
383 .open = drm_vm_open,
384 .close = drm_vm_shm_close,
1da177e4
LT
385};
386
387/** DMA virtual memory operations */
f0f37e2f 388static const struct vm_operations_struct drm_vm_dma_ops = {
ca0b07d9 389 .fault = drm_vm_dma_fault,
b5e89ed5
DA
390 .open = drm_vm_open,
391 .close = drm_vm_close,
1da177e4
LT
392};
393
394/** Scatter-gather virtual memory operations */
f0f37e2f 395static const struct vm_operations_struct drm_vm_sg_ops = {
ca0b07d9 396 .fault = drm_vm_sg_fault,
b5e89ed5
DA
397 .open = drm_vm_open,
398 .close = drm_vm_close,
1da177e4
LT
399};
400
1da177e4
LT
401/**
402 * \c open method for shared virtual memory.
b5e89ed5 403 *
1da177e4 404 * \param vma virtual memory area.
b5e89ed5 405 *
1da177e4
LT
406 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 * add it to drm_device::vmalist.
408 */
a2c0a97b 409void drm_vm_open_locked(struct vm_area_struct *vma)
1da177e4 410{
84b1fd10 411 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 412 struct drm_device *dev = priv->minor->dev;
8fc2fdf4 413 struct drm_vma_entry *vma_entry;
1da177e4
LT
414
415 DRM_DEBUG("0x%08lx,0x%08lx\n",
416 vma->vm_start, vma->vm_end - vma->vm_start);
417 atomic_inc(&dev->vma_count);
418
9a298b2a 419 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
1da177e4 420 if (vma_entry) {
b5e89ed5 421 vma_entry->vma = vma;
b5e89ed5 422 vma_entry->pid = current->pid;
bd1b331f 423 list_add(&vma_entry->head, &dev->vmalist);
1da177e4
LT
424 }
425}
426
d7d8aac7
TH
427static void drm_vm_open(struct vm_area_struct *vma)
428{
84b1fd10 429 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 430 struct drm_device *dev = priv->minor->dev;
d7d8aac7
TH
431
432 mutex_lock(&dev->struct_mutex);
433 drm_vm_open_locked(vma);
434 mutex_unlock(&dev->struct_mutex);
435}
436
31dfbc93 437void drm_vm_close_locked(struct vm_area_struct *vma)
1da177e4 438{
84b1fd10 439 struct drm_file *priv = vma->vm_file->private_data;
2c14f28b 440 struct drm_device *dev = priv->minor->dev;
8fc2fdf4 441 struct drm_vma_entry *pt, *temp;
1da177e4
LT
442
443 DRM_DEBUG("0x%08lx,0x%08lx\n",
444 vma->vm_start, vma->vm_end - vma->vm_start);
445 atomic_dec(&dev->vma_count);
446
bd1b331f 447 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
1da177e4 448 if (pt->vma == vma) {
bd1b331f 449 list_del(&pt->head);
9a298b2a 450 kfree(pt);
1da177e4
LT
451 break;
452 }
453 }
31dfbc93
CW
454}
455
456/**
457 * \c close method for all virtual memory types.
458 *
459 * \param vma virtual memory area.
460 *
461 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
462 * free it.
463 */
464static void drm_vm_close(struct vm_area_struct *vma)
465{
466 struct drm_file *priv = vma->vm_file->private_data;
467 struct drm_device *dev = priv->minor->dev;
468
469 mutex_lock(&dev->struct_mutex);
470 drm_vm_close_locked(vma);
30e2fb18 471 mutex_unlock(&dev->struct_mutex);
1da177e4
LT
472}
473
474/**
475 * mmap DMA memory.
476 *
6c340eac 477 * \param file_priv DRM file private.
1da177e4
LT
478 * \param vma virtual memory area.
479 * \return zero on success or a negative number on failure.
b5e89ed5 480 *
1da177e4
LT
481 * Sets the virtual memory area operations structure to vm_dma_ops, the file
482 * pointer, and calls vm_open().
483 */
c94f7029 484static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
1da177e4 485{
84b1fd10
DA
486 struct drm_file *priv = filp->private_data;
487 struct drm_device *dev;
cdd55a29 488 struct drm_device_dma *dma;
b5e89ed5 489 unsigned long length = vma->vm_end - vma->vm_start;
1da177e4 490
2c14f28b 491 dev = priv->minor->dev;
b5e89ed5 492 dma = dev->dma;
1545085a
TH
493 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
494 vma->vm_start, vma->vm_end, vma->vm_pgoff);
1da177e4 495
b5e89ed5 496 /* Length must match exact page count */
1da177e4 497 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
1da177e4
LT
498 return -EINVAL;
499 }
1da177e4 500
3417f33e
GS
501 if (!capable(CAP_SYS_ADMIN) &&
502 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
503 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
504#if defined(__i386__) || defined(__x86_64__)
505 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
506#else
507 /* Ye gads this is ugly. With more thought
508 we could move this up higher and use
509 `protection_map' instead. */
510 vma->vm_page_prot =
511 __pgprot(pte_val
512 (pte_wrprotect
513 (__pte(pgprot_val(vma->vm_page_prot)))));
514#endif
515 }
516
b5e89ed5 517 vma->vm_ops = &drm_vm_dma_ops;
1da177e4 518
b5e89ed5 519 vma->vm_flags |= VM_RESERVED; /* Don't swap */
2f98735c 520 vma->vm_flags |= VM_DONTEXPAND;
1da177e4 521
d7d8aac7 522 drm_vm_open_locked(vma);
1da177e4
LT
523 return 0;
524}
525
cbc60ca0 526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
1da177e4
LT
527{
528#ifdef __alpha__
82ba3fef 529 return dev->hose->dense_mem_base;
1da177e4
LT
530#else
531 return 0;
532#endif
533}
b5e89ed5 534
1da177e4
LT
535/**
536 * mmap DMA memory.
537 *
6c340eac 538 * \param file_priv DRM file private.
1da177e4
LT
539 * \param vma virtual memory area.
540 * \return zero on success or a negative number on failure.
b5e89ed5 541 *
1da177e4
LT
542 * If the virtual memory area has no offset associated with it then it's a DMA
543 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
544 * checks that the restricted flag is not set, sets the virtual memory operations
545 * according to the mapping type and remaps the pages. Finally sets the file
546 * pointer and calls vm_open().
547 */
a2c0a97b 548int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
1da177e4 549{
84b1fd10 550 struct drm_file *priv = filp->private_data;
2c14f28b 551 struct drm_device *dev = priv->minor->dev;
f77d390c 552 struct drm_local_map *map = NULL;
41c2e75e 553 resource_size_t offset = 0;
e0be428e 554 struct drm_hash_item *hash;
1da177e4 555
1545085a
TH
556 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
557 vma->vm_start, vma->vm_end, vma->vm_pgoff);
1da177e4 558
b5e89ed5
DA
559 if (!priv->authenticated)
560 return -EACCES;
1da177e4
LT
561
562 /* We check for "dma". On Apple's UniNorth, it's valid to have
563 * the AGP mapped at physical address 0
564 * --BenH.
565 */
1545085a 566 if (!vma->vm_pgoff
1da177e4 567#if __OS_HAS_AGP
b5e89ed5
DA
568 && (!dev->agp
569 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
1da177e4
LT
570#endif
571 )
572 return drm_mmap_dma(filp, vma);
573
1545085a 574 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
8d153f71
TH
575 DRM_ERROR("Could not find map\n");
576 return -EINVAL;
1da177e4
LT
577 }
578
55910517 579 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
b5e89ed5 580 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
1da177e4
LT
581 return -EPERM;
582
b5e89ed5 583 /* Check for valid size. */
54ba2f76 584 if (map->size < vma->vm_end - vma->vm_start)
b5e89ed5 585 return -EINVAL;
1da177e4
LT
586
587 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
588 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
589#if defined(__i386__) || defined(__x86_64__)
590 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
591#else
b5e89ed5
DA
592 /* Ye gads this is ugly. With more thought
593 we could move this up higher and use
594 `protection_map' instead. */
595 vma->vm_page_prot =
596 __pgprot(pte_val
597 (pte_wrprotect
598 (__pte(pgprot_val(vma->vm_page_prot)))));
1da177e4
LT
599#endif
600 }
601
602 switch (map->type) {
4b7fb9b5 603#if !defined(__arm__)
b5e89ed5
DA
604 case _DRM_AGP:
605 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
606 /*
607 * On some platforms we can't talk to bus dma address from the CPU, so for
608 * memory of type DRM_AGP, we'll deal with sorting out the real physical
ca0b07d9 609 * pages and mappings in fault()
b5e89ed5 610 */
1da177e4 611#if defined(__powerpc__)
b5e89ed5 612 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1da177e4 613#endif
b5e89ed5
DA
614 vma->vm_ops = &drm_vm_ops;
615 break;
616 }
617 /* fall through to _DRM_FRAME_BUFFER... */
4b7fb9b5 618#endif
1da177e4
LT
619 case _DRM_FRAME_BUFFER:
620 case _DRM_REGISTERS:
cbc60ca0 621 offset = drm_core_get_reg_ofs(dev);
5cc7f9ab
DA
622 vma->vm_flags |= VM_IO; /* not in core dump */
623 vma->vm_page_prot = drm_io_prot(map->type, vma);
4b7fb9b5 624#if !defined(__arm__)
3d77461e 625 if (io_remap_pfn_range(vma, vma->vm_start,
b5e89ed5
DA
626 (map->offset + offset) >> PAGE_SHIFT,
627 vma->vm_end - vma->vm_start,
628 vma->vm_page_prot))
b5e89ed5 629 return -EAGAIN;
4b7fb9b5
JC
630#else
631 if (remap_pfn_range(vma, vma->vm_start,
632 (map->offset + offset) >> PAGE_SHIFT,
633 vma->vm_end - vma->vm_start,
634 vma->vm_page_prot))
635 return -EAGAIN;
636#endif
637
1da177e4 638 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
41c2e75e 639 " offset = 0x%llx\n",
1da177e4 640 map->type,
41c2e75e 641 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
4b7fb9b5 642
1da177e4
LT
643 vma->vm_ops = &drm_vm_ops;
644 break;
2d0f9eaf 645 case _DRM_CONSISTENT:
38315878 646 /* Consistent memory is really like shared memory. But
ca0b07d9 647 * it's allocated in a different way, so avoid fault */
38315878
HD
648 if (remap_pfn_range(vma, vma->vm_start,
649 page_to_pfn(virt_to_page(map->handle)),
650 vma->vm_end - vma->vm_start, vma->vm_page_prot))
651 return -EAGAIN;
6876b3ba 652 vma->vm_page_prot = drm_dma_prot(map->type, vma);
38315878
HD
653 /* fall through to _DRM_SHM */
654 case _DRM_SHM:
1da177e4
LT
655 vma->vm_ops = &drm_vm_shm_ops;
656 vma->vm_private_data = (void *)map;
b5e89ed5
DA
657 /* Don't let this area swap. Change when
658 DRM_KERNEL advisory is supported. */
1da177e4 659 vma->vm_flags |= VM_RESERVED;
1da177e4
LT
660 break;
661 case _DRM_SCATTER_GATHER:
662 vma->vm_ops = &drm_vm_sg_ops;
663 vma->vm_private_data = (void *)map;
1da177e4 664 vma->vm_flags |= VM_RESERVED;
5ff64611 665 vma->vm_page_prot = drm_dma_prot(map->type, vma);
b5e89ed5 666 break;
1da177e4
LT
667 default:
668 return -EINVAL; /* This should never happen. */
669 }
b5e89ed5 670 vma->vm_flags |= VM_RESERVED; /* Don't swap */
2f98735c 671 vma->vm_flags |= VM_DONTEXPAND;
1da177e4 672
d7d8aac7 673 drm_vm_open_locked(vma);
1da177e4
LT
674 return 0;
675}
b5e89ed5 676
d7d8aac7
TH
677int drm_mmap(struct file *filp, struct vm_area_struct *vma)
678{
84b1fd10 679 struct drm_file *priv = filp->private_data;
2c14f28b 680 struct drm_device *dev = priv->minor->dev;
d7d8aac7
TH
681 int ret;
682
2c07a21d
DA
683 if (drm_device_is_unplugged(dev))
684 return -ENODEV;
685
d7d8aac7
TH
686 mutex_lock(&dev->struct_mutex);
687 ret = drm_mmap_locked(filp, vma);
688 mutex_unlock(&dev->struct_mutex);
689
690 return ret;
691}
1da177e4 692EXPORT_SYMBOL(drm_mmap);