]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/drm_bufs.c
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_bufs.c
1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/log2.h>
39 #include <linux/export.h>
40 #include <asm/shmparam.h>
41 #include <drm/drmP.h>
42
43 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
44 struct drm_local_map *map)
45 {
46 struct drm_map_list *entry;
47 list_for_each_entry(entry, &dev->maplist, head) {
48 /*
49 * Because the kernel-userspace ABI is fixed at a 32-bit offset
50 * while PCI resources may live above that, we only compare the
51 * lower 32 bits of the map offset for maps of type
52 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
53 * It is assumed that if a driver have more than one resource
54 * of each type, the lower 32 bits are different.
55 */
56 if (!entry->map ||
57 map->type != entry->map->type ||
58 entry->master != dev->primary->master)
59 continue;
60 switch (map->type) {
61 case _DRM_SHM:
62 if (map->flags != _DRM_CONTAINS_LOCK)
63 break;
64 return entry;
65 case _DRM_REGISTERS:
66 case _DRM_FRAME_BUFFER:
67 if ((entry->map->offset & 0xffffffff) ==
68 (map->offset & 0xffffffff))
69 return entry;
70 default: /* Make gcc happy */
71 ;
72 }
73 if (entry->map->offset == map->offset)
74 return entry;
75 }
76
77 return NULL;
78 }
79
80 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
81 unsigned long user_token, int hashed_handle, int shm)
82 {
83 int use_hashed_handle, shift;
84 unsigned long add;
85
86 #if (BITS_PER_LONG == 64)
87 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
88 #elif (BITS_PER_LONG == 32)
89 use_hashed_handle = hashed_handle;
90 #else
91 #error Unsupported long size. Neither 64 nor 32 bits.
92 #endif
93
94 if (!use_hashed_handle) {
95 int ret;
96 hash->key = user_token >> PAGE_SHIFT;
97 ret = drm_ht_insert_item(&dev->map_hash, hash);
98 if (ret != -EINVAL)
99 return ret;
100 }
101
102 shift = 0;
103 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
104 if (shm && (SHMLBA > PAGE_SIZE)) {
105 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
106
107 /* For shared memory, we have to preserve the SHMLBA
108 * bits of the eventual vma->vm_pgoff value during
109 * mmap(). Otherwise we run into cache aliasing problems
110 * on some platforms. On these platforms, the pgoff of
111 * a mmap() request is used to pick a suitable virtual
112 * address for the mmap() region such that it will not
113 * cause cache aliasing problems.
114 *
115 * Therefore, make sure the SHMLBA relevant bits of the
116 * hash value we use are equal to those in the original
117 * kernel virtual address.
118 */
119 shift = bits;
120 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
121 }
122
123 return drm_ht_just_insert_please(&dev->map_hash, hash,
124 user_token, 32 - PAGE_SHIFT - 3,
125 shift, add);
126 }
127
128 /**
129 * Core function to create a range of memory available for mapping by a
130 * non-root process.
131 *
132 * Adjusts the memory offset to its absolute value according to the mapping
133 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
134 * applicable and if supported by the kernel.
135 */
136 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
137 unsigned int size, enum drm_map_type type,
138 enum drm_map_flags flags,
139 struct drm_map_list ** maplist)
140 {
141 struct drm_local_map *map;
142 struct drm_map_list *list;
143 drm_dma_handle_t *dmah;
144 unsigned long user_token;
145 int ret;
146
147 map = kmalloc(sizeof(*map), GFP_KERNEL);
148 if (!map)
149 return -ENOMEM;
150
151 map->offset = offset;
152 map->size = size;
153 map->flags = flags;
154 map->type = type;
155
156 /* Only allow shared memory to be removable since we only keep enough
157 * book keeping information about shared memory to allow for removal
158 * when processes fork.
159 */
160 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
161 kfree(map);
162 return -EINVAL;
163 }
164 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
165 (unsigned long long)map->offset, map->size, map->type);
166
167 /* page-align _DRM_SHM maps. They are allocated here so there is no security
168 * hole created by that and it works around various broken drivers that use
169 * a non-aligned quantity to map the SAREA. --BenH
170 */
171 if (map->type == _DRM_SHM)
172 map->size = PAGE_ALIGN(map->size);
173
174 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
175 kfree(map);
176 return -EINVAL;
177 }
178 map->mtrr = -1;
179 map->handle = NULL;
180
181 switch (map->type) {
182 case _DRM_REGISTERS:
183 case _DRM_FRAME_BUFFER:
184 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
185 if (map->offset + (map->size-1) < map->offset ||
186 map->offset < virt_to_phys(high_memory)) {
187 kfree(map);
188 return -EINVAL;
189 }
190 #endif
191 /* Some drivers preinitialize some maps, without the X Server
192 * needing to be aware of it. Therefore, we just return success
193 * when the server tries to create a duplicate map.
194 */
195 list = drm_find_matching_map(dev, map);
196 if (list != NULL) {
197 if (list->map->size != map->size) {
198 DRM_DEBUG("Matching maps of type %d with "
199 "mismatched sizes, (%ld vs %ld)\n",
200 map->type, map->size,
201 list->map->size);
202 list->map->size = map->size;
203 }
204
205 kfree(map);
206 *maplist = list;
207 return 0;
208 }
209
210 if (map->type == _DRM_FRAME_BUFFER ||
211 (map->flags & _DRM_WRITE_COMBINING)) {
212 map->mtrr =
213 arch_phys_wc_add(map->offset, map->size);
214 }
215 if (map->type == _DRM_REGISTERS) {
216 if (map->flags & _DRM_WRITE_COMBINING)
217 map->handle = ioremap_wc(map->offset,
218 map->size);
219 else
220 map->handle = ioremap(map->offset, map->size);
221 if (!map->handle) {
222 kfree(map);
223 return -ENOMEM;
224 }
225 }
226
227 break;
228 case _DRM_SHM:
229 list = drm_find_matching_map(dev, map);
230 if (list != NULL) {
231 if(list->map->size != map->size) {
232 DRM_DEBUG("Matching maps of type %d with "
233 "mismatched sizes, (%ld vs %ld)\n",
234 map->type, map->size, list->map->size);
235 list->map->size = map->size;
236 }
237
238 kfree(map);
239 *maplist = list;
240 return 0;
241 }
242 map->handle = vmalloc_user(map->size);
243 DRM_DEBUG("%lu %d %p\n",
244 map->size, order_base_2(map->size), map->handle);
245 if (!map->handle) {
246 kfree(map);
247 return -ENOMEM;
248 }
249 map->offset = (unsigned long)map->handle;
250 if (map->flags & _DRM_CONTAINS_LOCK) {
251 /* Prevent a 2nd X Server from creating a 2nd lock */
252 if (dev->primary->master->lock.hw_lock != NULL) {
253 vfree(map->handle);
254 kfree(map);
255 return -EBUSY;
256 }
257 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
258 }
259 break;
260 case _DRM_AGP: {
261 struct drm_agp_mem *entry;
262 int valid = 0;
263
264 if (!dev->agp) {
265 kfree(map);
266 return -EINVAL;
267 }
268 #ifdef __alpha__
269 map->offset += dev->hose->mem_space->start;
270 #endif
271 /* In some cases (i810 driver), user space may have already
272 * added the AGP base itself, because dev->agp->base previously
273 * only got set during AGP enable. So, only add the base
274 * address if the map's offset isn't already within the
275 * aperture.
276 */
277 if (map->offset < dev->agp->base ||
278 map->offset > dev->agp->base +
279 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
280 map->offset += dev->agp->base;
281 }
282 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
283
284 /* This assumes the DRM is in total control of AGP space.
285 * It's not always the case as AGP can be in the control
286 * of user space (i.e. i810 driver). So this loop will get
287 * skipped and we double check that dev->agp->memory is
288 * actually set as well as being invalid before EPERM'ing
289 */
290 list_for_each_entry(entry, &dev->agp->memory, head) {
291 if ((map->offset >= entry->bound) &&
292 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
293 valid = 1;
294 break;
295 }
296 }
297 if (!list_empty(&dev->agp->memory) && !valid) {
298 kfree(map);
299 return -EPERM;
300 }
301 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
302 (unsigned long long)map->offset, map->size);
303
304 break;
305 }
306 case _DRM_SCATTER_GATHER:
307 if (!dev->sg) {
308 kfree(map);
309 return -EINVAL;
310 }
311 map->offset += (unsigned long)dev->sg->virtual;
312 break;
313 case _DRM_CONSISTENT:
314 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
315 * As we're limiting the address to 2^32-1 (or less),
316 * casting it down to 32 bits is no problem, but we
317 * need to point to a 64bit variable first. */
318 dmah = drm_pci_alloc(dev, map->size, map->size);
319 if (!dmah) {
320 kfree(map);
321 return -ENOMEM;
322 }
323 map->handle = dmah->vaddr;
324 map->offset = (unsigned long)dmah->busaddr;
325 kfree(dmah);
326 break;
327 default:
328 kfree(map);
329 return -EINVAL;
330 }
331
332 list = kzalloc(sizeof(*list), GFP_KERNEL);
333 if (!list) {
334 if (map->type == _DRM_REGISTERS)
335 iounmap(map->handle);
336 kfree(map);
337 return -EINVAL;
338 }
339 list->map = map;
340
341 mutex_lock(&dev->struct_mutex);
342 list_add(&list->head, &dev->maplist);
343
344 /* Assign a 32-bit handle */
345 /* We do it here so that dev->struct_mutex protects the increment */
346 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
347 map->offset;
348 ret = drm_map_handle(dev, &list->hash, user_token, 0,
349 (map->type == _DRM_SHM));
350 if (ret) {
351 if (map->type == _DRM_REGISTERS)
352 iounmap(map->handle);
353 kfree(map);
354 kfree(list);
355 mutex_unlock(&dev->struct_mutex);
356 return ret;
357 }
358
359 list->user_token = list->hash.key << PAGE_SHIFT;
360 mutex_unlock(&dev->struct_mutex);
361
362 if (!(map->flags & _DRM_DRIVER))
363 list->master = dev->primary->master;
364 *maplist = list;
365 return 0;
366 }
367
368 int drm_addmap(struct drm_device * dev, resource_size_t offset,
369 unsigned int size, enum drm_map_type type,
370 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
371 {
372 struct drm_map_list *list;
373 int rc;
374
375 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
376 if (!rc)
377 *map_ptr = list->map;
378 return rc;
379 }
380
381 EXPORT_SYMBOL(drm_addmap);
382
383 /**
384 * Ioctl to specify a range of memory that is available for mapping by a
385 * non-root process.
386 *
387 * \param inode device inode.
388 * \param file_priv DRM file private.
389 * \param cmd command.
390 * \param arg pointer to a drm_map structure.
391 * \return zero on success or a negative value on error.
392 *
393 */
394 int drm_addmap_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv)
396 {
397 struct drm_map *map = data;
398 struct drm_map_list *maplist;
399 int err;
400
401 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
402 return -EPERM;
403
404 err = drm_addmap_core(dev, map->offset, map->size, map->type,
405 map->flags, &maplist);
406
407 if (err)
408 return err;
409
410 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
411 map->handle = (void *)(unsigned long)maplist->user_token;
412
413 /*
414 * It appears that there are no users of this value whatsoever --
415 * drmAddMap just discards it. Let's not encourage its use.
416 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
417 * it's not a real mtrr index anymore.)
418 */
419 map->mtrr = -1;
420
421 return 0;
422 }
423
424 /**
425 * Remove a map private from list and deallocate resources if the mapping
426 * isn't in use.
427 *
428 * Searches the map on drm_device::maplist, removes it from the list, see if
429 * its being used, and free any associate resource (such as MTRR's) if it's not
430 * being on use.
431 *
432 * \sa drm_addmap
433 */
434 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
435 {
436 struct drm_map_list *r_list = NULL, *list_t;
437 drm_dma_handle_t dmah;
438 int found = 0;
439 struct drm_master *master;
440
441 /* Find the list entry for the map and remove it */
442 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
443 if (r_list->map == map) {
444 master = r_list->master;
445 list_del(&r_list->head);
446 drm_ht_remove_key(&dev->map_hash,
447 r_list->user_token >> PAGE_SHIFT);
448 kfree(r_list);
449 found = 1;
450 break;
451 }
452 }
453
454 if (!found)
455 return -EINVAL;
456
457 switch (map->type) {
458 case _DRM_REGISTERS:
459 iounmap(map->handle);
460 /* FALLTHROUGH */
461 case _DRM_FRAME_BUFFER:
462 arch_phys_wc_del(map->mtrr);
463 break;
464 case _DRM_SHM:
465 vfree(map->handle);
466 if (master) {
467 if (dev->sigdata.lock == master->lock.hw_lock)
468 dev->sigdata.lock = NULL;
469 master->lock.hw_lock = NULL; /* SHM removed */
470 master->lock.file_priv = NULL;
471 wake_up_interruptible_all(&master->lock.lock_queue);
472 }
473 break;
474 case _DRM_AGP:
475 case _DRM_SCATTER_GATHER:
476 break;
477 case _DRM_CONSISTENT:
478 dmah.vaddr = map->handle;
479 dmah.busaddr = map->offset;
480 dmah.size = map->size;
481 __drm_pci_free(dev, &dmah);
482 break;
483 }
484 kfree(map);
485
486 return 0;
487 }
488 EXPORT_SYMBOL(drm_rmmap_locked);
489
490 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
491 {
492 int ret;
493
494 mutex_lock(&dev->struct_mutex);
495 ret = drm_rmmap_locked(dev, map);
496 mutex_unlock(&dev->struct_mutex);
497
498 return ret;
499 }
500 EXPORT_SYMBOL(drm_rmmap);
501
502 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
503 * the last close of the device, and this is necessary for cleanup when things
504 * exit uncleanly. Therefore, having userland manually remove mappings seems
505 * like a pointless exercise since they're going away anyway.
506 *
507 * One use case might be after addmap is allowed for normal users for SHM and
508 * gets used by drivers that the server doesn't need to care about. This seems
509 * unlikely.
510 *
511 * \param inode device inode.
512 * \param file_priv DRM file private.
513 * \param cmd command.
514 * \param arg pointer to a struct drm_map structure.
515 * \return zero on success or a negative value on error.
516 */
517 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv)
519 {
520 struct drm_map *request = data;
521 struct drm_local_map *map = NULL;
522 struct drm_map_list *r_list;
523 int ret;
524
525 mutex_lock(&dev->struct_mutex);
526 list_for_each_entry(r_list, &dev->maplist, head) {
527 if (r_list->map &&
528 r_list->user_token == (unsigned long)request->handle &&
529 r_list->map->flags & _DRM_REMOVABLE) {
530 map = r_list->map;
531 break;
532 }
533 }
534
535 /* List has wrapped around to the head pointer, or its empty we didn't
536 * find anything.
537 */
538 if (list_empty(&dev->maplist) || !map) {
539 mutex_unlock(&dev->struct_mutex);
540 return -EINVAL;
541 }
542
543 /* Register and framebuffer maps are permanent */
544 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
545 mutex_unlock(&dev->struct_mutex);
546 return 0;
547 }
548
549 ret = drm_rmmap_locked(dev, map);
550
551 mutex_unlock(&dev->struct_mutex);
552
553 return ret;
554 }
555
556 /**
557 * Cleanup after an error on one of the addbufs() functions.
558 *
559 * \param dev DRM device.
560 * \param entry buffer entry where the error occurred.
561 *
562 * Frees any pages and buffers associated with the given entry.
563 */
564 static void drm_cleanup_buf_error(struct drm_device * dev,
565 struct drm_buf_entry * entry)
566 {
567 int i;
568
569 if (entry->seg_count) {
570 for (i = 0; i < entry->seg_count; i++) {
571 if (entry->seglist[i]) {
572 drm_pci_free(dev, entry->seglist[i]);
573 }
574 }
575 kfree(entry->seglist);
576
577 entry->seg_count = 0;
578 }
579
580 if (entry->buf_count) {
581 for (i = 0; i < entry->buf_count; i++) {
582 kfree(entry->buflist[i].dev_private);
583 }
584 kfree(entry->buflist);
585
586 entry->buf_count = 0;
587 }
588 }
589
590 #if __OS_HAS_AGP
591 /**
592 * Add AGP buffers for DMA transfers.
593 *
594 * \param dev struct drm_device to which the buffers are to be added.
595 * \param request pointer to a struct drm_buf_desc describing the request.
596 * \return zero on success or a negative number on failure.
597 *
598 * After some sanity checks creates a drm_buf structure for each buffer and
599 * reallocates the buffer list of the same size order to accommodate the new
600 * buffers.
601 */
602 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
603 {
604 struct drm_device_dma *dma = dev->dma;
605 struct drm_buf_entry *entry;
606 struct drm_agp_mem *agp_entry;
607 struct drm_buf *buf;
608 unsigned long offset;
609 unsigned long agp_offset;
610 int count;
611 int order;
612 int size;
613 int alignment;
614 int page_order;
615 int total;
616 int byte_count;
617 int i, valid;
618 struct drm_buf **temp_buflist;
619
620 if (!dma)
621 return -EINVAL;
622
623 count = request->count;
624 order = order_base_2(request->size);
625 size = 1 << order;
626
627 alignment = (request->flags & _DRM_PAGE_ALIGN)
628 ? PAGE_ALIGN(size) : size;
629 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
630 total = PAGE_SIZE << page_order;
631
632 byte_count = 0;
633 agp_offset = dev->agp->base + request->agp_start;
634
635 DRM_DEBUG("count: %d\n", count);
636 DRM_DEBUG("order: %d\n", order);
637 DRM_DEBUG("size: %d\n", size);
638 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
639 DRM_DEBUG("alignment: %d\n", alignment);
640 DRM_DEBUG("page_order: %d\n", page_order);
641 DRM_DEBUG("total: %d\n", total);
642
643 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
644 return -EINVAL;
645
646 /* Make sure buffers are located in AGP memory that we own */
647 valid = 0;
648 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
649 if ((agp_offset >= agp_entry->bound) &&
650 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
651 valid = 1;
652 break;
653 }
654 }
655 if (!list_empty(&dev->agp->memory) && !valid) {
656 DRM_DEBUG("zone invalid\n");
657 return -EINVAL;
658 }
659 spin_lock(&dev->buf_lock);
660 if (dev->buf_use) {
661 spin_unlock(&dev->buf_lock);
662 return -EBUSY;
663 }
664 atomic_inc(&dev->buf_alloc);
665 spin_unlock(&dev->buf_lock);
666
667 mutex_lock(&dev->struct_mutex);
668 entry = &dma->bufs[order];
669 if (entry->buf_count) {
670 mutex_unlock(&dev->struct_mutex);
671 atomic_dec(&dev->buf_alloc);
672 return -ENOMEM; /* May only call once for each order */
673 }
674
675 if (count < 0 || count > 4096) {
676 mutex_unlock(&dev->struct_mutex);
677 atomic_dec(&dev->buf_alloc);
678 return -EINVAL;
679 }
680
681 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
682 if (!entry->buflist) {
683 mutex_unlock(&dev->struct_mutex);
684 atomic_dec(&dev->buf_alloc);
685 return -ENOMEM;
686 }
687
688 entry->buf_size = size;
689 entry->page_order = page_order;
690
691 offset = 0;
692
693 while (entry->buf_count < count) {
694 buf = &entry->buflist[entry->buf_count];
695 buf->idx = dma->buf_count + entry->buf_count;
696 buf->total = alignment;
697 buf->order = order;
698 buf->used = 0;
699
700 buf->offset = (dma->byte_count + offset);
701 buf->bus_address = agp_offset + offset;
702 buf->address = (void *)(agp_offset + offset);
703 buf->next = NULL;
704 buf->waiting = 0;
705 buf->pending = 0;
706 buf->file_priv = NULL;
707
708 buf->dev_priv_size = dev->driver->dev_priv_size;
709 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
710 if (!buf->dev_private) {
711 /* Set count correctly so we free the proper amount. */
712 entry->buf_count = count;
713 drm_cleanup_buf_error(dev, entry);
714 mutex_unlock(&dev->struct_mutex);
715 atomic_dec(&dev->buf_alloc);
716 return -ENOMEM;
717 }
718
719 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
720
721 offset += alignment;
722 entry->buf_count++;
723 byte_count += PAGE_SIZE << page_order;
724 }
725
726 DRM_DEBUG("byte_count: %d\n", byte_count);
727
728 temp_buflist = krealloc(dma->buflist,
729 (dma->buf_count + entry->buf_count) *
730 sizeof(*dma->buflist), GFP_KERNEL);
731 if (!temp_buflist) {
732 /* Free the entry because it isn't valid */
733 drm_cleanup_buf_error(dev, entry);
734 mutex_unlock(&dev->struct_mutex);
735 atomic_dec(&dev->buf_alloc);
736 return -ENOMEM;
737 }
738 dma->buflist = temp_buflist;
739
740 for (i = 0; i < entry->buf_count; i++) {
741 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
742 }
743
744 dma->buf_count += entry->buf_count;
745 dma->seg_count += entry->seg_count;
746 dma->page_count += byte_count >> PAGE_SHIFT;
747 dma->byte_count += byte_count;
748
749 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
750 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
751
752 mutex_unlock(&dev->struct_mutex);
753
754 request->count = entry->buf_count;
755 request->size = size;
756
757 dma->flags = _DRM_DMA_USE_AGP;
758
759 atomic_dec(&dev->buf_alloc);
760 return 0;
761 }
762 EXPORT_SYMBOL(drm_addbufs_agp);
763 #endif /* __OS_HAS_AGP */
764
765 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
766 {
767 struct drm_device_dma *dma = dev->dma;
768 int count;
769 int order;
770 int size;
771 int total;
772 int page_order;
773 struct drm_buf_entry *entry;
774 drm_dma_handle_t *dmah;
775 struct drm_buf *buf;
776 int alignment;
777 unsigned long offset;
778 int i;
779 int byte_count;
780 int page_count;
781 unsigned long *temp_pagelist;
782 struct drm_buf **temp_buflist;
783
784 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
785 return -EINVAL;
786
787 if (!dma)
788 return -EINVAL;
789
790 if (!capable(CAP_SYS_ADMIN))
791 return -EPERM;
792
793 count = request->count;
794 order = order_base_2(request->size);
795 size = 1 << order;
796
797 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
798 request->count, request->size, size, order);
799
800 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
801 return -EINVAL;
802
803 alignment = (request->flags & _DRM_PAGE_ALIGN)
804 ? PAGE_ALIGN(size) : size;
805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
806 total = PAGE_SIZE << page_order;
807
808 spin_lock(&dev->buf_lock);
809 if (dev->buf_use) {
810 spin_unlock(&dev->buf_lock);
811 return -EBUSY;
812 }
813 atomic_inc(&dev->buf_alloc);
814 spin_unlock(&dev->buf_lock);
815
816 mutex_lock(&dev->struct_mutex);
817 entry = &dma->bufs[order];
818 if (entry->buf_count) {
819 mutex_unlock(&dev->struct_mutex);
820 atomic_dec(&dev->buf_alloc);
821 return -ENOMEM; /* May only call once for each order */
822 }
823
824 if (count < 0 || count > 4096) {
825 mutex_unlock(&dev->struct_mutex);
826 atomic_dec(&dev->buf_alloc);
827 return -EINVAL;
828 }
829
830 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
831 if (!entry->buflist) {
832 mutex_unlock(&dev->struct_mutex);
833 atomic_dec(&dev->buf_alloc);
834 return -ENOMEM;
835 }
836
837 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
838 if (!entry->seglist) {
839 kfree(entry->buflist);
840 mutex_unlock(&dev->struct_mutex);
841 atomic_dec(&dev->buf_alloc);
842 return -ENOMEM;
843 }
844
845 /* Keep the original pagelist until we know all the allocations
846 * have succeeded
847 */
848 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
849 sizeof(*dma->pagelist), GFP_KERNEL);
850 if (!temp_pagelist) {
851 kfree(entry->buflist);
852 kfree(entry->seglist);
853 mutex_unlock(&dev->struct_mutex);
854 atomic_dec(&dev->buf_alloc);
855 return -ENOMEM;
856 }
857 memcpy(temp_pagelist,
858 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
859 DRM_DEBUG("pagelist: %d entries\n",
860 dma->page_count + (count << page_order));
861
862 entry->buf_size = size;
863 entry->page_order = page_order;
864 byte_count = 0;
865 page_count = 0;
866
867 while (entry->buf_count < count) {
868
869 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
870
871 if (!dmah) {
872 /* Set count correctly so we free the proper amount. */
873 entry->buf_count = count;
874 entry->seg_count = count;
875 drm_cleanup_buf_error(dev, entry);
876 kfree(temp_pagelist);
877 mutex_unlock(&dev->struct_mutex);
878 atomic_dec(&dev->buf_alloc);
879 return -ENOMEM;
880 }
881 entry->seglist[entry->seg_count++] = dmah;
882 for (i = 0; i < (1 << page_order); i++) {
883 DRM_DEBUG("page %d @ 0x%08lx\n",
884 dma->page_count + page_count,
885 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
886 temp_pagelist[dma->page_count + page_count++]
887 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
888 }
889 for (offset = 0;
890 offset + size <= total && entry->buf_count < count;
891 offset += alignment, ++entry->buf_count) {
892 buf = &entry->buflist[entry->buf_count];
893 buf->idx = dma->buf_count + entry->buf_count;
894 buf->total = alignment;
895 buf->order = order;
896 buf->used = 0;
897 buf->offset = (dma->byte_count + byte_count + offset);
898 buf->address = (void *)(dmah->vaddr + offset);
899 buf->bus_address = dmah->busaddr + offset;
900 buf->next = NULL;
901 buf->waiting = 0;
902 buf->pending = 0;
903 buf->file_priv = NULL;
904
905 buf->dev_priv_size = dev->driver->dev_priv_size;
906 buf->dev_private = kzalloc(buf->dev_priv_size,
907 GFP_KERNEL);
908 if (!buf->dev_private) {
909 /* Set count correctly so we free the proper amount. */
910 entry->buf_count = count;
911 entry->seg_count = count;
912 drm_cleanup_buf_error(dev, entry);
913 kfree(temp_pagelist);
914 mutex_unlock(&dev->struct_mutex);
915 atomic_dec(&dev->buf_alloc);
916 return -ENOMEM;
917 }
918
919 DRM_DEBUG("buffer %d @ %p\n",
920 entry->buf_count, buf->address);
921 }
922 byte_count += PAGE_SIZE << page_order;
923 }
924
925 temp_buflist = krealloc(dma->buflist,
926 (dma->buf_count + entry->buf_count) *
927 sizeof(*dma->buflist), GFP_KERNEL);
928 if (!temp_buflist) {
929 /* Free the entry because it isn't valid */
930 drm_cleanup_buf_error(dev, entry);
931 kfree(temp_pagelist);
932 mutex_unlock(&dev->struct_mutex);
933 atomic_dec(&dev->buf_alloc);
934 return -ENOMEM;
935 }
936 dma->buflist = temp_buflist;
937
938 for (i = 0; i < entry->buf_count; i++) {
939 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
940 }
941
942 /* No allocations failed, so now we can replace the original pagelist
943 * with the new one.
944 */
945 if (dma->page_count) {
946 kfree(dma->pagelist);
947 }
948 dma->pagelist = temp_pagelist;
949
950 dma->buf_count += entry->buf_count;
951 dma->seg_count += entry->seg_count;
952 dma->page_count += entry->seg_count << page_order;
953 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
954
955 mutex_unlock(&dev->struct_mutex);
956
957 request->count = entry->buf_count;
958 request->size = size;
959
960 if (request->flags & _DRM_PCI_BUFFER_RO)
961 dma->flags = _DRM_DMA_USE_PCI_RO;
962
963 atomic_dec(&dev->buf_alloc);
964 return 0;
965
966 }
967 EXPORT_SYMBOL(drm_addbufs_pci);
968
969 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
970 {
971 struct drm_device_dma *dma = dev->dma;
972 struct drm_buf_entry *entry;
973 struct drm_buf *buf;
974 unsigned long offset;
975 unsigned long agp_offset;
976 int count;
977 int order;
978 int size;
979 int alignment;
980 int page_order;
981 int total;
982 int byte_count;
983 int i;
984 struct drm_buf **temp_buflist;
985
986 if (!drm_core_check_feature(dev, DRIVER_SG))
987 return -EINVAL;
988
989 if (!dma)
990 return -EINVAL;
991
992 if (!capable(CAP_SYS_ADMIN))
993 return -EPERM;
994
995 count = request->count;
996 order = order_base_2(request->size);
997 size = 1 << order;
998
999 alignment = (request->flags & _DRM_PAGE_ALIGN)
1000 ? PAGE_ALIGN(size) : size;
1001 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1002 total = PAGE_SIZE << page_order;
1003
1004 byte_count = 0;
1005 agp_offset = request->agp_start;
1006
1007 DRM_DEBUG("count: %d\n", count);
1008 DRM_DEBUG("order: %d\n", order);
1009 DRM_DEBUG("size: %d\n", size);
1010 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1011 DRM_DEBUG("alignment: %d\n", alignment);
1012 DRM_DEBUG("page_order: %d\n", page_order);
1013 DRM_DEBUG("total: %d\n", total);
1014
1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1016 return -EINVAL;
1017
1018 spin_lock(&dev->buf_lock);
1019 if (dev->buf_use) {
1020 spin_unlock(&dev->buf_lock);
1021 return -EBUSY;
1022 }
1023 atomic_inc(&dev->buf_alloc);
1024 spin_unlock(&dev->buf_lock);
1025
1026 mutex_lock(&dev->struct_mutex);
1027 entry = &dma->bufs[order];
1028 if (entry->buf_count) {
1029 mutex_unlock(&dev->struct_mutex);
1030 atomic_dec(&dev->buf_alloc);
1031 return -ENOMEM; /* May only call once for each order */
1032 }
1033
1034 if (count < 0 || count > 4096) {
1035 mutex_unlock(&dev->struct_mutex);
1036 atomic_dec(&dev->buf_alloc);
1037 return -EINVAL;
1038 }
1039
1040 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1041 GFP_KERNEL);
1042 if (!entry->buflist) {
1043 mutex_unlock(&dev->struct_mutex);
1044 atomic_dec(&dev->buf_alloc);
1045 return -ENOMEM;
1046 }
1047
1048 entry->buf_size = size;
1049 entry->page_order = page_order;
1050
1051 offset = 0;
1052
1053 while (entry->buf_count < count) {
1054 buf = &entry->buflist[entry->buf_count];
1055 buf->idx = dma->buf_count + entry->buf_count;
1056 buf->total = alignment;
1057 buf->order = order;
1058 buf->used = 0;
1059
1060 buf->offset = (dma->byte_count + offset);
1061 buf->bus_address = agp_offset + offset;
1062 buf->address = (void *)(agp_offset + offset
1063 + (unsigned long)dev->sg->virtual);
1064 buf->next = NULL;
1065 buf->waiting = 0;
1066 buf->pending = 0;
1067 buf->file_priv = NULL;
1068
1069 buf->dev_priv_size = dev->driver->dev_priv_size;
1070 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1071 if (!buf->dev_private) {
1072 /* Set count correctly so we free the proper amount. */
1073 entry->buf_count = count;
1074 drm_cleanup_buf_error(dev, entry);
1075 mutex_unlock(&dev->struct_mutex);
1076 atomic_dec(&dev->buf_alloc);
1077 return -ENOMEM;
1078 }
1079
1080 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1081
1082 offset += alignment;
1083 entry->buf_count++;
1084 byte_count += PAGE_SIZE << page_order;
1085 }
1086
1087 DRM_DEBUG("byte_count: %d\n", byte_count);
1088
1089 temp_buflist = krealloc(dma->buflist,
1090 (dma->buf_count + entry->buf_count) *
1091 sizeof(*dma->buflist), GFP_KERNEL);
1092 if (!temp_buflist) {
1093 /* Free the entry because it isn't valid */
1094 drm_cleanup_buf_error(dev, entry);
1095 mutex_unlock(&dev->struct_mutex);
1096 atomic_dec(&dev->buf_alloc);
1097 return -ENOMEM;
1098 }
1099 dma->buflist = temp_buflist;
1100
1101 for (i = 0; i < entry->buf_count; i++) {
1102 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1103 }
1104
1105 dma->buf_count += entry->buf_count;
1106 dma->seg_count += entry->seg_count;
1107 dma->page_count += byte_count >> PAGE_SHIFT;
1108 dma->byte_count += byte_count;
1109
1110 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1111 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1112
1113 mutex_unlock(&dev->struct_mutex);
1114
1115 request->count = entry->buf_count;
1116 request->size = size;
1117
1118 dma->flags = _DRM_DMA_USE_SG;
1119
1120 atomic_dec(&dev->buf_alloc);
1121 return 0;
1122 }
1123
1124 /**
1125 * Add buffers for DMA transfers (ioctl).
1126 *
1127 * \param inode device inode.
1128 * \param file_priv DRM file private.
1129 * \param cmd command.
1130 * \param arg pointer to a struct drm_buf_desc request.
1131 * \return zero on success or a negative number on failure.
1132 *
1133 * According with the memory type specified in drm_buf_desc::flags and the
1134 * build options, it dispatches the call either to addbufs_agp(),
1135 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1136 * PCI memory respectively.
1137 */
1138 int drm_addbufs(struct drm_device *dev, void *data,
1139 struct drm_file *file_priv)
1140 {
1141 struct drm_buf_desc *request = data;
1142 int ret;
1143
1144 if (drm_core_check_feature(dev, DRIVER_MODESET))
1145 return -EINVAL;
1146
1147 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1148 return -EINVAL;
1149
1150 #if __OS_HAS_AGP
1151 if (request->flags & _DRM_AGP_BUFFER)
1152 ret = drm_addbufs_agp(dev, request);
1153 else
1154 #endif
1155 if (request->flags & _DRM_SG_BUFFER)
1156 ret = drm_addbufs_sg(dev, request);
1157 else if (request->flags & _DRM_FB_BUFFER)
1158 ret = -EINVAL;
1159 else
1160 ret = drm_addbufs_pci(dev, request);
1161
1162 return ret;
1163 }
1164
1165 /**
1166 * Get information about the buffer mappings.
1167 *
1168 * This was originally mean for debugging purposes, or by a sophisticated
1169 * client library to determine how best to use the available buffers (e.g.,
1170 * large buffers can be used for image transfer).
1171 *
1172 * \param inode device inode.
1173 * \param file_priv DRM file private.
1174 * \param cmd command.
1175 * \param arg pointer to a drm_buf_info structure.
1176 * \return zero on success or a negative number on failure.
1177 *
1178 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1179 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space.
1181 */
1182 int drm_infobufs(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv)
1184 {
1185 struct drm_device_dma *dma = dev->dma;
1186 struct drm_buf_info *request = data;
1187 int i;
1188 int count;
1189
1190 if (drm_core_check_feature(dev, DRIVER_MODESET))
1191 return -EINVAL;
1192
1193 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1194 return -EINVAL;
1195
1196 if (!dma)
1197 return -EINVAL;
1198
1199 spin_lock(&dev->buf_lock);
1200 if (atomic_read(&dev->buf_alloc)) {
1201 spin_unlock(&dev->buf_lock);
1202 return -EBUSY;
1203 }
1204 ++dev->buf_use; /* Can't allocate more after this call */
1205 spin_unlock(&dev->buf_lock);
1206
1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1208 if (dma->bufs[i].buf_count)
1209 ++count;
1210 }
1211
1212 DRM_DEBUG("count = %d\n", count);
1213
1214 if (request->count >= count) {
1215 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1216 if (dma->bufs[i].buf_count) {
1217 struct drm_buf_desc __user *to =
1218 &request->list[count];
1219 struct drm_buf_entry *from = &dma->bufs[i];
1220 if (copy_to_user(&to->count,
1221 &from->buf_count,
1222 sizeof(from->buf_count)) ||
1223 copy_to_user(&to->size,
1224 &from->buf_size,
1225 sizeof(from->buf_size)) ||
1226 copy_to_user(&to->low_mark,
1227 &from->low_mark,
1228 sizeof(from->low_mark)) ||
1229 copy_to_user(&to->high_mark,
1230 &from->high_mark,
1231 sizeof(from->high_mark)))
1232 return -EFAULT;
1233
1234 DRM_DEBUG("%d %d %d %d %d\n",
1235 i,
1236 dma->bufs[i].buf_count,
1237 dma->bufs[i].buf_size,
1238 dma->bufs[i].low_mark,
1239 dma->bufs[i].high_mark);
1240 ++count;
1241 }
1242 }
1243 }
1244 request->count = count;
1245
1246 return 0;
1247 }
1248
1249 /**
1250 * Specifies a low and high water mark for buffer allocation
1251 *
1252 * \param inode device inode.
1253 * \param file_priv DRM file private.
1254 * \param cmd command.
1255 * \param arg a pointer to a drm_buf_desc structure.
1256 * \return zero on success or a negative number on failure.
1257 *
1258 * Verifies that the size order is bounded between the admissible orders and
1259 * updates the respective drm_device_dma::bufs entry low and high water mark.
1260 *
1261 * \note This ioctl is deprecated and mostly never used.
1262 */
1263 int drm_markbufs(struct drm_device *dev, void *data,
1264 struct drm_file *file_priv)
1265 {
1266 struct drm_device_dma *dma = dev->dma;
1267 struct drm_buf_desc *request = data;
1268 int order;
1269 struct drm_buf_entry *entry;
1270
1271 if (drm_core_check_feature(dev, DRIVER_MODESET))
1272 return -EINVAL;
1273
1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1275 return -EINVAL;
1276
1277 if (!dma)
1278 return -EINVAL;
1279
1280 DRM_DEBUG("%d, %d, %d\n",
1281 request->size, request->low_mark, request->high_mark);
1282 order = order_base_2(request->size);
1283 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1284 return -EINVAL;
1285 entry = &dma->bufs[order];
1286
1287 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1288 return -EINVAL;
1289 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1290 return -EINVAL;
1291
1292 entry->low_mark = request->low_mark;
1293 entry->high_mark = request->high_mark;
1294
1295 return 0;
1296 }
1297
1298 /**
1299 * Unreserve the buffers in list, previously reserved using drmDMA.
1300 *
1301 * \param inode device inode.
1302 * \param file_priv DRM file private.
1303 * \param cmd command.
1304 * \param arg pointer to a drm_buf_free structure.
1305 * \return zero on success or a negative number on failure.
1306 *
1307 * Calls free_buffer() for each used buffer.
1308 * This function is primarily used for debugging.
1309 */
1310 int drm_freebufs(struct drm_device *dev, void *data,
1311 struct drm_file *file_priv)
1312 {
1313 struct drm_device_dma *dma = dev->dma;
1314 struct drm_buf_free *request = data;
1315 int i;
1316 int idx;
1317 struct drm_buf *buf;
1318
1319 if (drm_core_check_feature(dev, DRIVER_MODESET))
1320 return -EINVAL;
1321
1322 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1323 return -EINVAL;
1324
1325 if (!dma)
1326 return -EINVAL;
1327
1328 DRM_DEBUG("%d\n", request->count);
1329 for (i = 0; i < request->count; i++) {
1330 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1331 return -EFAULT;
1332 if (idx < 0 || idx >= dma->buf_count) {
1333 DRM_ERROR("Index %d (of %d max)\n",
1334 idx, dma->buf_count - 1);
1335 return -EINVAL;
1336 }
1337 buf = dma->buflist[idx];
1338 if (buf->file_priv != file_priv) {
1339 DRM_ERROR("Process %d freeing buffer not owned\n",
1340 task_pid_nr(current));
1341 return -EINVAL;
1342 }
1343 drm_free_buffer(dev, buf);
1344 }
1345
1346 return 0;
1347 }
1348
1349 /**
1350 * Maps all of the DMA buffers into client-virtual space (ioctl).
1351 *
1352 * \param inode device inode.
1353 * \param file_priv DRM file private.
1354 * \param cmd command.
1355 * \param arg pointer to a drm_buf_map structure.
1356 * \return zero on success or a negative number on failure.
1357 *
1358 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1359 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1360 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1361 * drm_mmap_dma().
1362 */
1363 int drm_mapbufs(struct drm_device *dev, void *data,
1364 struct drm_file *file_priv)
1365 {
1366 struct drm_device_dma *dma = dev->dma;
1367 int retcode = 0;
1368 const int zero = 0;
1369 unsigned long virtual;
1370 unsigned long address;
1371 struct drm_buf_map *request = data;
1372 int i;
1373
1374 if (drm_core_check_feature(dev, DRIVER_MODESET))
1375 return -EINVAL;
1376
1377 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1378 return -EINVAL;
1379
1380 if (!dma)
1381 return -EINVAL;
1382
1383 spin_lock(&dev->buf_lock);
1384 if (atomic_read(&dev->buf_alloc)) {
1385 spin_unlock(&dev->buf_lock);
1386 return -EBUSY;
1387 }
1388 dev->buf_use++; /* Can't allocate more after this call */
1389 spin_unlock(&dev->buf_lock);
1390
1391 if (request->count >= dma->buf_count) {
1392 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1393 || (drm_core_check_feature(dev, DRIVER_SG)
1394 && (dma->flags & _DRM_DMA_USE_SG))) {
1395 struct drm_local_map *map = dev->agp_buffer_map;
1396 unsigned long token = dev->agp_buffer_token;
1397
1398 if (!map) {
1399 retcode = -EINVAL;
1400 goto done;
1401 }
1402 virtual = vm_mmap(file_priv->filp, 0, map->size,
1403 PROT_READ | PROT_WRITE,
1404 MAP_SHARED,
1405 token);
1406 } else {
1407 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1408 PROT_READ | PROT_WRITE,
1409 MAP_SHARED, 0);
1410 }
1411 if (virtual > -1024UL) {
1412 /* Real error */
1413 retcode = (signed long)virtual;
1414 goto done;
1415 }
1416 request->virtual = (void __user *)virtual;
1417
1418 for (i = 0; i < dma->buf_count; i++) {
1419 if (copy_to_user(&request->list[i].idx,
1420 &dma->buflist[i]->idx,
1421 sizeof(request->list[0].idx))) {
1422 retcode = -EFAULT;
1423 goto done;
1424 }
1425 if (copy_to_user(&request->list[i].total,
1426 &dma->buflist[i]->total,
1427 sizeof(request->list[0].total))) {
1428 retcode = -EFAULT;
1429 goto done;
1430 }
1431 if (copy_to_user(&request->list[i].used,
1432 &zero, sizeof(zero))) {
1433 retcode = -EFAULT;
1434 goto done;
1435 }
1436 address = virtual + dma->buflist[i]->offset; /* *** */
1437 if (copy_to_user(&request->list[i].address,
1438 &address, sizeof(address))) {
1439 retcode = -EFAULT;
1440 goto done;
1441 }
1442 }
1443 }
1444 done:
1445 request->count = dma->buf_count;
1446 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1447
1448 return retcode;
1449 }
1450
1451 int drm_dma_ioctl(struct drm_device *dev, void *data,
1452 struct drm_file *file_priv)
1453 {
1454 if (drm_core_check_feature(dev, DRIVER_MODESET))
1455 return -EINVAL;
1456
1457 if (dev->driver->dma_ioctl)
1458 return dev->driver->dma_ioctl(dev, data, file_priv);
1459 else
1460 return -EINVAL;
1461 }
1462
1463 struct drm_local_map *drm_getsarea(struct drm_device *dev)
1464 {
1465 struct drm_map_list *entry;
1466
1467 list_for_each_entry(entry, &dev->maplist, head) {
1468 if (entry->map && entry->map->type == _DRM_SHM &&
1469 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1470 return entry->map;
1471 }
1472 }
1473 return NULL;
1474 }
1475 EXPORT_SYMBOL(drm_getsarea);