]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/char/drm/drm_bufs.c
3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
50 EXPORT_SYMBOL(drm_get_resource_len
);
52 static drm_map_list_t
*drm_find_matching_map(drm_device_t
*dev
,
55 struct list_head
*list
;
57 list_for_each(list
, &dev
->maplist
->head
) {
58 drm_map_list_t
*entry
= list_entry(list
, drm_map_list_t
, head
);
59 if (entry
->map
&& map
->type
== entry
->map
->type
&&
60 entry
->map
->offset
== map
->offset
) {
69 * Used to allocate 32-bit handles for mappings.
71 #define START_RANGE 0x10000000
72 #define END_RANGE 0x40000000
75 static __inline__
unsigned int HandleID(unsigned long lhandle
,
78 static unsigned int map32_handle
= START_RANGE
;
81 if (lhandle
& 0xffffffff00000000) {
83 map32_handle
+= PAGE_SIZE
;
84 if (map32_handle
> END_RANGE
)
85 map32_handle
= START_RANGE
;
90 drm_map_list_t
*_entry
;
91 list_for_each_entry(_entry
, &dev
->maplist
->head
, head
) {
92 if (_entry
->user_token
== hash
)
95 if (&_entry
->head
== &dev
->maplist
->head
)
99 map32_handle
+= PAGE_SIZE
;
103 # define HandleID(x,dev) (unsigned int)(x)
107 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
109 * \param inode device inode.
110 * \param filp file pointer.
111 * \param cmd command.
112 * \param arg pointer to a drm_map structure.
113 * \return zero on success or a negative value on error.
115 * Adjusts the memory offset to its absolute value according to the mapping
116 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
117 * applicable and if supported by the kernel.
119 static int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
120 unsigned int size
, drm_map_type_t type
,
121 drm_map_flags_t flags
, drm_map_list_t
** maplist
)
124 drm_map_list_t
*list
;
125 drm_dma_handle_t
*dmah
;
127 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
131 map
->offset
= offset
;
136 /* Only allow shared memory to be removable since we only keep enough
137 * book keeping information about shared memory to allow for removal
138 * when processes fork.
140 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
141 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
144 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
145 map
->offset
, map
->size
, map
->type
);
146 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
147 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
155 case _DRM_FRAME_BUFFER
:
156 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
157 if (map
->offset
+ (map
->size
-1) < map
->offset
||
158 map
->offset
< virt_to_phys(high_memory
)) {
159 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
164 map
->offset
+= dev
->hose
->mem_space
->start
;
166 /* Some drivers preinitialize some maps, without the X Server
167 * needing to be aware of it. Therefore, we just return success
168 * when the server tries to create a duplicate map.
170 list
= drm_find_matching_map(dev
, map
);
172 if (list
->map
->size
!= map
->size
) {
173 DRM_DEBUG("Matching maps of type %d with "
174 "mismatched sizes, (%ld vs %ld)\n",
175 map
->type
, map
->size
,
177 list
->map
->size
= map
->size
;
180 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
185 if (drm_core_has_MTRR(dev
)) {
186 if (map
->type
== _DRM_FRAME_BUFFER
||
187 (map
->flags
& _DRM_WRITE_COMBINING
)) {
188 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
189 MTRR_TYPE_WRCOMB
, 1);
192 if (map
->type
== _DRM_REGISTERS
)
193 map
->handle
= drm_ioremap(map
->offset
, map
->size
, dev
);
197 map
->handle
= vmalloc_32(map
->size
);
198 DRM_DEBUG("%lu %d %p\n",
199 map
->size
, drm_order(map
->size
), map
->handle
);
201 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
204 map
->offset
= (unsigned long)map
->handle
;
205 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
206 /* Prevent a 2nd X Server from creating a 2nd lock */
207 if (dev
->lock
.hw_lock
!= NULL
) {
209 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
212 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
216 if (drm_core_has_AGP(dev
)) {
218 map
->offset
+= dev
->hose
->mem_space
->start
;
220 map
->offset
+= dev
->agp
->base
;
221 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
224 case _DRM_SCATTER_GATHER
:
226 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
229 map
->offset
+= (unsigned long)dev
->sg
->virtual;
231 case _DRM_CONSISTENT
:
232 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
233 * As we're limiting the address to 2^32-1 (or less),
234 * casting it down to 32 bits is no problem, but we
235 * need to point to a 64bit variable first. */
236 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
238 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
241 map
->handle
= dmah
->vaddr
;
242 map
->offset
= (unsigned long)dmah
->busaddr
;
246 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
250 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
252 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
255 memset(list
, 0, sizeof(*list
));
258 mutex_lock(&dev
->struct_mutex
);
259 list_add(&list
->head
, &dev
->maplist
->head
);
260 /* Assign a 32-bit handle */
261 /* We do it here so that dev->struct_mutex protects the increment */
262 list
->user_token
= HandleID(map
->type
== _DRM_SHM
263 ? (unsigned long)map
->handle
265 mutex_unlock(&dev
->struct_mutex
);
271 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
272 unsigned int size
, drm_map_type_t type
,
273 drm_map_flags_t flags
, drm_local_map_t
** map_ptr
)
275 drm_map_list_t
*list
;
278 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
280 *map_ptr
= list
->map
;
284 EXPORT_SYMBOL(drm_addmap
);
286 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
287 unsigned int cmd
, unsigned long arg
)
289 drm_file_t
*priv
= filp
->private_data
;
290 drm_device_t
*dev
= priv
->head
->dev
;
292 drm_map_list_t
*maplist
;
293 drm_map_t __user
*argp
= (void __user
*)arg
;
296 if (!(filp
->f_mode
& 3))
297 return -EACCES
; /* Require read/write */
299 if (copy_from_user(&map
, argp
, sizeof(map
))) {
303 if (!(capable(CAP_SYS_ADMIN
) || map
.type
== _DRM_AGP
))
306 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
312 if (copy_to_user(argp
, maplist
->map
, sizeof(drm_map_t
)))
315 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
316 if (put_user((void *)(unsigned long)maplist
->user_token
, &argp
->handle
))
322 * Remove a map private from list and deallocate resources if the mapping
325 * \param inode device inode.
326 * \param filp file pointer.
327 * \param cmd command.
328 * \param arg pointer to a drm_map_t structure.
329 * \return zero on success or a negative value on error.
331 * Searches the map on drm_device::maplist, removes it from the list, see if
332 * its being used, and free any associate resource (such as MTRR's) if it's not
337 int drm_rmmap_locked(drm_device_t
*dev
, drm_local_map_t
*map
)
339 struct list_head
*list
;
340 drm_map_list_t
*r_list
= NULL
;
341 drm_dma_handle_t dmah
;
343 /* Find the list entry for the map and remove it */
344 list_for_each(list
, &dev
->maplist
->head
) {
345 r_list
= list_entry(list
, drm_map_list_t
, head
);
347 if (r_list
->map
== map
) {
349 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
354 /* List has wrapped around to the head pointer, or it's empty and we
355 * didn't find anything.
357 if (list
== (&dev
->maplist
->head
)) {
363 drm_ioremapfree(map
->handle
, map
->size
, dev
);
365 case _DRM_FRAME_BUFFER
:
366 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
368 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
369 DRM_DEBUG("mtrr_del=%d\n", retcode
);
376 case _DRM_SCATTER_GATHER
:
378 case _DRM_CONSISTENT
:
379 dmah
.vaddr
= map
->handle
;
380 dmah
.busaddr
= map
->offset
;
381 dmah
.size
= map
->size
;
382 __drm_pci_free(dev
, &dmah
);
385 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
389 EXPORT_SYMBOL(drm_rmmap_locked
);
391 int drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
395 mutex_lock(&dev
->struct_mutex
);
396 ret
= drm_rmmap_locked(dev
, map
);
397 mutex_unlock(&dev
->struct_mutex
);
401 EXPORT_SYMBOL(drm_rmmap
);
403 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
404 * the last close of the device, and this is necessary for cleanup when things
405 * exit uncleanly. Therefore, having userland manually remove mappings seems
406 * like a pointless exercise since they're going away anyway.
408 * One use case might be after addmap is allowed for normal users for SHM and
409 * gets used by drivers that the server doesn't need to care about. This seems
412 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
413 unsigned int cmd
, unsigned long arg
)
415 drm_file_t
*priv
= filp
->private_data
;
416 drm_device_t
*dev
= priv
->head
->dev
;
418 drm_local_map_t
*map
= NULL
;
419 struct list_head
*list
;
422 if (copy_from_user(&request
, (drm_map_t __user
*) arg
, sizeof(request
))) {
426 mutex_lock(&dev
->struct_mutex
);
427 list_for_each(list
, &dev
->maplist
->head
) {
428 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
431 r_list
->user_token
== (unsigned long)request
.handle
&&
432 r_list
->map
->flags
& _DRM_REMOVABLE
) {
438 /* List has wrapped around to the head pointer, or its empty we didn't
441 if (list
== (&dev
->maplist
->head
)) {
442 mutex_unlock(&dev
->struct_mutex
);
449 /* Register and framebuffer maps are permanent */
450 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
451 mutex_unlock(&dev
->struct_mutex
);
455 ret
= drm_rmmap_locked(dev
, map
);
457 mutex_unlock(&dev
->struct_mutex
);
463 * Cleanup after an error on one of the addbufs() functions.
465 * \param dev DRM device.
466 * \param entry buffer entry where the error occurred.
468 * Frees any pages and buffers associated with the given entry.
470 static void drm_cleanup_buf_error(drm_device_t
* dev
, drm_buf_entry_t
* entry
)
474 if (entry
->seg_count
) {
475 for (i
= 0; i
< entry
->seg_count
; i
++) {
476 if (entry
->seglist
[i
]) {
477 drm_pci_free(dev
, entry
->seglist
[i
]);
480 drm_free(entry
->seglist
,
482 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
484 entry
->seg_count
= 0;
487 if (entry
->buf_count
) {
488 for (i
= 0; i
< entry
->buf_count
; i
++) {
489 if (entry
->buflist
[i
].dev_private
) {
490 drm_free(entry
->buflist
[i
].dev_private
,
491 entry
->buflist
[i
].dev_priv_size
,
495 drm_free(entry
->buflist
,
497 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
499 entry
->buf_count
= 0;
505 * Add AGP buffers for DMA transfers.
507 * \param dev drm_device_t to which the buffers are to be added.
508 * \param request pointer to a drm_buf_desc_t describing the request.
509 * \return zero on success or a negative number on failure.
511 * After some sanity checks creates a drm_buf structure for each buffer and
512 * reallocates the buffer list of the same size order to accommodate the new
515 int drm_addbufs_agp(drm_device_t
* dev
, drm_buf_desc_t
* request
)
517 drm_device_dma_t
*dma
= dev
->dma
;
518 drm_buf_entry_t
*entry
;
520 unsigned long offset
;
521 unsigned long agp_offset
;
530 drm_buf_t
**temp_buflist
;
535 count
= request
->count
;
536 order
= drm_order(request
->size
);
539 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
540 ? PAGE_ALIGN(size
) : size
;
541 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
542 total
= PAGE_SIZE
<< page_order
;
545 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
547 DRM_DEBUG("count: %d\n", count
);
548 DRM_DEBUG("order: %d\n", order
);
549 DRM_DEBUG("size: %d\n", size
);
550 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
551 DRM_DEBUG("alignment: %d\n", alignment
);
552 DRM_DEBUG("page_order: %d\n", page_order
);
553 DRM_DEBUG("total: %d\n", total
);
555 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
557 if (dev
->queue_count
)
558 return -EBUSY
; /* Not while in use */
560 spin_lock(&dev
->count_lock
);
562 spin_unlock(&dev
->count_lock
);
565 atomic_inc(&dev
->buf_alloc
);
566 spin_unlock(&dev
->count_lock
);
568 mutex_lock(&dev
->struct_mutex
);
569 entry
= &dma
->bufs
[order
];
570 if (entry
->buf_count
) {
571 mutex_unlock(&dev
->struct_mutex
);
572 atomic_dec(&dev
->buf_alloc
);
573 return -ENOMEM
; /* May only call once for each order */
576 if (count
< 0 || count
> 4096) {
577 mutex_unlock(&dev
->struct_mutex
);
578 atomic_dec(&dev
->buf_alloc
);
582 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
584 if (!entry
->buflist
) {
585 mutex_unlock(&dev
->struct_mutex
);
586 atomic_dec(&dev
->buf_alloc
);
589 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
591 entry
->buf_size
= size
;
592 entry
->page_order
= page_order
;
596 while (entry
->buf_count
< count
) {
597 buf
= &entry
->buflist
[entry
->buf_count
];
598 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
599 buf
->total
= alignment
;
603 buf
->offset
= (dma
->byte_count
+ offset
);
604 buf
->bus_address
= agp_offset
+ offset
;
605 buf
->address
= (void *)(agp_offset
+ offset
);
609 init_waitqueue_head(&buf
->dma_wait
);
612 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
613 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
614 if (!buf
->dev_private
) {
615 /* Set count correctly so we free the proper amount. */
616 entry
->buf_count
= count
;
617 drm_cleanup_buf_error(dev
, entry
);
618 mutex_unlock(&dev
->struct_mutex
);
619 atomic_dec(&dev
->buf_alloc
);
622 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
624 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
628 byte_count
+= PAGE_SIZE
<< page_order
;
631 DRM_DEBUG("byte_count: %d\n", byte_count
);
633 temp_buflist
= drm_realloc(dma
->buflist
,
634 dma
->buf_count
* sizeof(*dma
->buflist
),
635 (dma
->buf_count
+ entry
->buf_count
)
636 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
638 /* Free the entry because it isn't valid */
639 drm_cleanup_buf_error(dev
, entry
);
640 mutex_unlock(&dev
->struct_mutex
);
641 atomic_dec(&dev
->buf_alloc
);
644 dma
->buflist
= temp_buflist
;
646 for (i
= 0; i
< entry
->buf_count
; i
++) {
647 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
650 dma
->buf_count
+= entry
->buf_count
;
651 dma
->seg_count
+= entry
->seg_count
;
652 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
653 dma
->byte_count
+= byte_count
;
655 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
656 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
658 mutex_unlock(&dev
->struct_mutex
);
660 request
->count
= entry
->buf_count
;
661 request
->size
= size
;
663 dma
->flags
= _DRM_DMA_USE_AGP
;
665 atomic_dec(&dev
->buf_alloc
);
668 EXPORT_SYMBOL(drm_addbufs_agp
);
669 #endif /* __OS_HAS_AGP */
671 int drm_addbufs_pci(drm_device_t
* dev
, drm_buf_desc_t
* request
)
673 drm_device_dma_t
*dma
= dev
->dma
;
679 drm_buf_entry_t
*entry
;
680 drm_dma_handle_t
*dmah
;
683 unsigned long offset
;
687 unsigned long *temp_pagelist
;
688 drm_buf_t
**temp_buflist
;
690 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
696 if (!capable(CAP_SYS_ADMIN
))
699 count
= request
->count
;
700 order
= drm_order(request
->size
);
703 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
704 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
706 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
708 if (dev
->queue_count
)
709 return -EBUSY
; /* Not while in use */
711 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
712 ? PAGE_ALIGN(size
) : size
;
713 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
714 total
= PAGE_SIZE
<< page_order
;
716 spin_lock(&dev
->count_lock
);
718 spin_unlock(&dev
->count_lock
);
721 atomic_inc(&dev
->buf_alloc
);
722 spin_unlock(&dev
->count_lock
);
724 mutex_lock(&dev
->struct_mutex
);
725 entry
= &dma
->bufs
[order
];
726 if (entry
->buf_count
) {
727 mutex_unlock(&dev
->struct_mutex
);
728 atomic_dec(&dev
->buf_alloc
);
729 return -ENOMEM
; /* May only call once for each order */
732 if (count
< 0 || count
> 4096) {
733 mutex_unlock(&dev
->struct_mutex
);
734 atomic_dec(&dev
->buf_alloc
);
738 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
740 if (!entry
->buflist
) {
741 mutex_unlock(&dev
->struct_mutex
);
742 atomic_dec(&dev
->buf_alloc
);
745 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
747 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
749 if (!entry
->seglist
) {
750 drm_free(entry
->buflist
,
751 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
752 mutex_unlock(&dev
->struct_mutex
);
753 atomic_dec(&dev
->buf_alloc
);
756 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
758 /* Keep the original pagelist until we know all the allocations
761 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
762 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
763 if (!temp_pagelist
) {
764 drm_free(entry
->buflist
,
765 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
766 drm_free(entry
->seglist
,
767 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
768 mutex_unlock(&dev
->struct_mutex
);
769 atomic_dec(&dev
->buf_alloc
);
772 memcpy(temp_pagelist
,
773 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
774 DRM_DEBUG("pagelist: %d entries\n",
775 dma
->page_count
+ (count
<< page_order
));
777 entry
->buf_size
= size
;
778 entry
->page_order
= page_order
;
782 while (entry
->buf_count
< count
) {
784 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
787 /* Set count correctly so we free the proper amount. */
788 entry
->buf_count
= count
;
789 entry
->seg_count
= count
;
790 drm_cleanup_buf_error(dev
, entry
);
791 drm_free(temp_pagelist
,
792 (dma
->page_count
+ (count
<< page_order
))
793 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
794 mutex_unlock(&dev
->struct_mutex
);
795 atomic_dec(&dev
->buf_alloc
);
798 entry
->seglist
[entry
->seg_count
++] = dmah
;
799 for (i
= 0; i
< (1 << page_order
); i
++) {
800 DRM_DEBUG("page %d @ 0x%08lx\n",
801 dma
->page_count
+ page_count
,
802 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
803 temp_pagelist
[dma
->page_count
+ page_count
++]
804 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
807 offset
+ size
<= total
&& entry
->buf_count
< count
;
808 offset
+= alignment
, ++entry
->buf_count
) {
809 buf
= &entry
->buflist
[entry
->buf_count
];
810 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
811 buf
->total
= alignment
;
814 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
815 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
816 buf
->bus_address
= dmah
->busaddr
+ offset
;
820 init_waitqueue_head(&buf
->dma_wait
);
823 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
824 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
826 if (!buf
->dev_private
) {
827 /* Set count correctly so we free the proper amount. */
828 entry
->buf_count
= count
;
829 entry
->seg_count
= count
;
830 drm_cleanup_buf_error(dev
, entry
);
831 drm_free(temp_pagelist
,
833 (count
<< page_order
))
834 * sizeof(*dma
->pagelist
),
836 mutex_unlock(&dev
->struct_mutex
);
837 atomic_dec(&dev
->buf_alloc
);
840 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
842 DRM_DEBUG("buffer %d @ %p\n",
843 entry
->buf_count
, buf
->address
);
845 byte_count
+= PAGE_SIZE
<< page_order
;
848 temp_buflist
= drm_realloc(dma
->buflist
,
849 dma
->buf_count
* sizeof(*dma
->buflist
),
850 (dma
->buf_count
+ entry
->buf_count
)
851 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
853 /* Free the entry because it isn't valid */
854 drm_cleanup_buf_error(dev
, entry
);
855 drm_free(temp_pagelist
,
856 (dma
->page_count
+ (count
<< page_order
))
857 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
858 mutex_unlock(&dev
->struct_mutex
);
859 atomic_dec(&dev
->buf_alloc
);
862 dma
->buflist
= temp_buflist
;
864 for (i
= 0; i
< entry
->buf_count
; i
++) {
865 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
868 /* No allocations failed, so now we can replace the orginal pagelist
871 if (dma
->page_count
) {
872 drm_free(dma
->pagelist
,
873 dma
->page_count
* sizeof(*dma
->pagelist
),
876 dma
->pagelist
= temp_pagelist
;
878 dma
->buf_count
+= entry
->buf_count
;
879 dma
->seg_count
+= entry
->seg_count
;
880 dma
->page_count
+= entry
->seg_count
<< page_order
;
881 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
883 mutex_unlock(&dev
->struct_mutex
);
885 request
->count
= entry
->buf_count
;
886 request
->size
= size
;
888 atomic_dec(&dev
->buf_alloc
);
892 EXPORT_SYMBOL(drm_addbufs_pci
);
894 static int drm_addbufs_sg(drm_device_t
* dev
, drm_buf_desc_t
* request
)
896 drm_device_dma_t
*dma
= dev
->dma
;
897 drm_buf_entry_t
*entry
;
899 unsigned long offset
;
900 unsigned long agp_offset
;
909 drm_buf_t
**temp_buflist
;
911 if (!drm_core_check_feature(dev
, DRIVER_SG
))
917 if (!capable(CAP_SYS_ADMIN
))
920 count
= request
->count
;
921 order
= drm_order(request
->size
);
924 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
925 ? PAGE_ALIGN(size
) : size
;
926 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
927 total
= PAGE_SIZE
<< page_order
;
930 agp_offset
= request
->agp_start
;
932 DRM_DEBUG("count: %d\n", count
);
933 DRM_DEBUG("order: %d\n", order
);
934 DRM_DEBUG("size: %d\n", size
);
935 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
936 DRM_DEBUG("alignment: %d\n", alignment
);
937 DRM_DEBUG("page_order: %d\n", page_order
);
938 DRM_DEBUG("total: %d\n", total
);
940 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
942 if (dev
->queue_count
)
943 return -EBUSY
; /* Not while in use */
945 spin_lock(&dev
->count_lock
);
947 spin_unlock(&dev
->count_lock
);
950 atomic_inc(&dev
->buf_alloc
);
951 spin_unlock(&dev
->count_lock
);
953 mutex_lock(&dev
->struct_mutex
);
954 entry
= &dma
->bufs
[order
];
955 if (entry
->buf_count
) {
956 mutex_unlock(&dev
->struct_mutex
);
957 atomic_dec(&dev
->buf_alloc
);
958 return -ENOMEM
; /* May only call once for each order */
961 if (count
< 0 || count
> 4096) {
962 mutex_unlock(&dev
->struct_mutex
);
963 atomic_dec(&dev
->buf_alloc
);
967 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
969 if (!entry
->buflist
) {
970 mutex_unlock(&dev
->struct_mutex
);
971 atomic_dec(&dev
->buf_alloc
);
974 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
976 entry
->buf_size
= size
;
977 entry
->page_order
= page_order
;
981 while (entry
->buf_count
< count
) {
982 buf
= &entry
->buflist
[entry
->buf_count
];
983 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
984 buf
->total
= alignment
;
988 buf
->offset
= (dma
->byte_count
+ offset
);
989 buf
->bus_address
= agp_offset
+ offset
;
990 buf
->address
= (void *)(agp_offset
+ offset
991 + (unsigned long)dev
->sg
->virtual);
995 init_waitqueue_head(&buf
->dma_wait
);
998 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
999 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1000 if (!buf
->dev_private
) {
1001 /* Set count correctly so we free the proper amount. */
1002 entry
->buf_count
= count
;
1003 drm_cleanup_buf_error(dev
, entry
);
1004 mutex_unlock(&dev
->struct_mutex
);
1005 atomic_dec(&dev
->buf_alloc
);
1009 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1011 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1013 offset
+= alignment
;
1015 byte_count
+= PAGE_SIZE
<< page_order
;
1018 DRM_DEBUG("byte_count: %d\n", byte_count
);
1020 temp_buflist
= drm_realloc(dma
->buflist
,
1021 dma
->buf_count
* sizeof(*dma
->buflist
),
1022 (dma
->buf_count
+ entry
->buf_count
)
1023 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1024 if (!temp_buflist
) {
1025 /* Free the entry because it isn't valid */
1026 drm_cleanup_buf_error(dev
, entry
);
1027 mutex_unlock(&dev
->struct_mutex
);
1028 atomic_dec(&dev
->buf_alloc
);
1031 dma
->buflist
= temp_buflist
;
1033 for (i
= 0; i
< entry
->buf_count
; i
++) {
1034 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1037 dma
->buf_count
+= entry
->buf_count
;
1038 dma
->seg_count
+= entry
->seg_count
;
1039 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1040 dma
->byte_count
+= byte_count
;
1042 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1043 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1045 mutex_unlock(&dev
->struct_mutex
);
1047 request
->count
= entry
->buf_count
;
1048 request
->size
= size
;
1050 dma
->flags
= _DRM_DMA_USE_SG
;
1052 atomic_dec(&dev
->buf_alloc
);
1056 int drm_addbufs_fb(drm_device_t
* dev
, drm_buf_desc_t
* request
)
1058 drm_device_dma_t
*dma
= dev
->dma
;
1059 drm_buf_entry_t
*entry
;
1061 unsigned long offset
;
1062 unsigned long agp_offset
;
1071 drm_buf_t
**temp_buflist
;
1073 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1079 if (!capable(CAP_SYS_ADMIN
))
1082 count
= request
->count
;
1083 order
= drm_order(request
->size
);
1086 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1087 ? PAGE_ALIGN(size
) : size
;
1088 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1089 total
= PAGE_SIZE
<< page_order
;
1092 agp_offset
= request
->agp_start
;
1094 DRM_DEBUG("count: %d\n", count
);
1095 DRM_DEBUG("order: %d\n", order
);
1096 DRM_DEBUG("size: %d\n", size
);
1097 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1098 DRM_DEBUG("alignment: %d\n", alignment
);
1099 DRM_DEBUG("page_order: %d\n", page_order
);
1100 DRM_DEBUG("total: %d\n", total
);
1102 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1104 if (dev
->queue_count
)
1105 return -EBUSY
; /* Not while in use */
1107 spin_lock(&dev
->count_lock
);
1109 spin_unlock(&dev
->count_lock
);
1112 atomic_inc(&dev
->buf_alloc
);
1113 spin_unlock(&dev
->count_lock
);
1115 mutex_lock(&dev
->struct_mutex
);
1116 entry
= &dma
->bufs
[order
];
1117 if (entry
->buf_count
) {
1118 mutex_unlock(&dev
->struct_mutex
);
1119 atomic_dec(&dev
->buf_alloc
);
1120 return -ENOMEM
; /* May only call once for each order */
1123 if (count
< 0 || count
> 4096) {
1124 mutex_unlock(&dev
->struct_mutex
);
1125 atomic_dec(&dev
->buf_alloc
);
1129 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1131 if (!entry
->buflist
) {
1132 mutex_unlock(&dev
->struct_mutex
);
1133 atomic_dec(&dev
->buf_alloc
);
1136 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1138 entry
->buf_size
= size
;
1139 entry
->page_order
= page_order
;
1143 while (entry
->buf_count
< count
) {
1144 buf
= &entry
->buflist
[entry
->buf_count
];
1145 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1146 buf
->total
= alignment
;
1150 buf
->offset
= (dma
->byte_count
+ offset
);
1151 buf
->bus_address
= agp_offset
+ offset
;
1152 buf
->address
= (void *)(agp_offset
+ offset
);
1156 init_waitqueue_head(&buf
->dma_wait
);
1159 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1160 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1161 if (!buf
->dev_private
) {
1162 /* Set count correctly so we free the proper amount. */
1163 entry
->buf_count
= count
;
1164 drm_cleanup_buf_error(dev
, entry
);
1165 mutex_unlock(&dev
->struct_mutex
);
1166 atomic_dec(&dev
->buf_alloc
);
1169 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1171 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1173 offset
+= alignment
;
1175 byte_count
+= PAGE_SIZE
<< page_order
;
1178 DRM_DEBUG("byte_count: %d\n", byte_count
);
1180 temp_buflist
= drm_realloc(dma
->buflist
,
1181 dma
->buf_count
* sizeof(*dma
->buflist
),
1182 (dma
->buf_count
+ entry
->buf_count
)
1183 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1184 if (!temp_buflist
) {
1185 /* Free the entry because it isn't valid */
1186 drm_cleanup_buf_error(dev
, entry
);
1187 mutex_unlock(&dev
->struct_mutex
);
1188 atomic_dec(&dev
->buf_alloc
);
1191 dma
->buflist
= temp_buflist
;
1193 for (i
= 0; i
< entry
->buf_count
; i
++) {
1194 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1197 dma
->buf_count
+= entry
->buf_count
;
1198 dma
->seg_count
+= entry
->seg_count
;
1199 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1200 dma
->byte_count
+= byte_count
;
1202 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1203 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1205 mutex_unlock(&dev
->struct_mutex
);
1207 request
->count
= entry
->buf_count
;
1208 request
->size
= size
;
1210 dma
->flags
= _DRM_DMA_USE_FB
;
1212 atomic_dec(&dev
->buf_alloc
);
1215 EXPORT_SYMBOL(drm_addbufs_fb
);
1219 * Add buffers for DMA transfers (ioctl).
1221 * \param inode device inode.
1222 * \param filp file pointer.
1223 * \param cmd command.
1224 * \param arg pointer to a drm_buf_desc_t request.
1225 * \return zero on success or a negative number on failure.
1227 * According with the memory type specified in drm_buf_desc::flags and the
1228 * build options, it dispatches the call either to addbufs_agp(),
1229 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1230 * PCI memory respectively.
1232 int drm_addbufs(struct inode
*inode
, struct file
*filp
,
1233 unsigned int cmd
, unsigned long arg
)
1235 drm_buf_desc_t request
;
1236 drm_file_t
*priv
= filp
->private_data
;
1237 drm_device_t
*dev
= priv
->head
->dev
;
1240 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1243 if (copy_from_user(&request
, (drm_buf_desc_t __user
*) arg
,
1248 if (request
.flags
& _DRM_AGP_BUFFER
)
1249 ret
= drm_addbufs_agp(dev
, &request
);
1252 if (request
.flags
& _DRM_SG_BUFFER
)
1253 ret
= drm_addbufs_sg(dev
, &request
);
1254 else if (request
.flags
& _DRM_FB_BUFFER
)
1255 ret
= drm_addbufs_fb(dev
, &request
);
1257 ret
= drm_addbufs_pci(dev
, &request
);
1260 if (copy_to_user((void __user
*)arg
, &request
, sizeof(request
))) {
1268 * Get information about the buffer mappings.
1270 * This was originally mean for debugging purposes, or by a sophisticated
1271 * client library to determine how best to use the available buffers (e.g.,
1272 * large buffers can be used for image transfer).
1274 * \param inode device inode.
1275 * \param filp file pointer.
1276 * \param cmd command.
1277 * \param arg pointer to a drm_buf_info structure.
1278 * \return zero on success or a negative number on failure.
1280 * Increments drm_device::buf_use while holding the drm_device::count_lock
1281 * lock, preventing of allocating more buffers after this call. Information
1282 * about each requested buffer is then copied into user space.
1284 int drm_infobufs(struct inode
*inode
, struct file
*filp
,
1285 unsigned int cmd
, unsigned long arg
)
1287 drm_file_t
*priv
= filp
->private_data
;
1288 drm_device_t
*dev
= priv
->head
->dev
;
1289 drm_device_dma_t
*dma
= dev
->dma
;
1290 drm_buf_info_t request
;
1291 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1295 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1301 spin_lock(&dev
->count_lock
);
1302 if (atomic_read(&dev
->buf_alloc
)) {
1303 spin_unlock(&dev
->count_lock
);
1306 ++dev
->buf_use
; /* Can't allocate more after this call */
1307 spin_unlock(&dev
->count_lock
);
1309 if (copy_from_user(&request
, argp
, sizeof(request
)))
1312 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1313 if (dma
->bufs
[i
].buf_count
)
1317 DRM_DEBUG("count = %d\n", count
);
1319 if (request
.count
>= count
) {
1320 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1321 if (dma
->bufs
[i
].buf_count
) {
1322 drm_buf_desc_t __user
*to
=
1323 &request
.list
[count
];
1324 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1325 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1326 if (copy_to_user(&to
->count
,
1328 sizeof(from
->buf_count
)) ||
1329 copy_to_user(&to
->size
,
1331 sizeof(from
->buf_size
)) ||
1332 copy_to_user(&to
->low_mark
,
1334 sizeof(list
->low_mark
)) ||
1335 copy_to_user(&to
->high_mark
,
1337 sizeof(list
->high_mark
)))
1340 DRM_DEBUG("%d %d %d %d %d\n",
1342 dma
->bufs
[i
].buf_count
,
1343 dma
->bufs
[i
].buf_size
,
1344 dma
->bufs
[i
].freelist
.low_mark
,
1345 dma
->bufs
[i
].freelist
.high_mark
);
1350 request
.count
= count
;
1352 if (copy_to_user(argp
, &request
, sizeof(request
)))
1359 * Specifies a low and high water mark for buffer allocation
1361 * \param inode device inode.
1362 * \param filp file pointer.
1363 * \param cmd command.
1364 * \param arg a pointer to a drm_buf_desc structure.
1365 * \return zero on success or a negative number on failure.
1367 * Verifies that the size order is bounded between the admissible orders and
1368 * updates the respective drm_device_dma::bufs entry low and high water mark.
1370 * \note This ioctl is deprecated and mostly never used.
1372 int drm_markbufs(struct inode
*inode
, struct file
*filp
,
1373 unsigned int cmd
, unsigned long arg
)
1375 drm_file_t
*priv
= filp
->private_data
;
1376 drm_device_t
*dev
= priv
->head
->dev
;
1377 drm_device_dma_t
*dma
= dev
->dma
;
1378 drm_buf_desc_t request
;
1380 drm_buf_entry_t
*entry
;
1382 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1388 if (copy_from_user(&request
,
1389 (drm_buf_desc_t __user
*) arg
, sizeof(request
)))
1392 DRM_DEBUG("%d, %d, %d\n",
1393 request
.size
, request
.low_mark
, request
.high_mark
);
1394 order
= drm_order(request
.size
);
1395 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1397 entry
= &dma
->bufs
[order
];
1399 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1401 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1404 entry
->freelist
.low_mark
= request
.low_mark
;
1405 entry
->freelist
.high_mark
= request
.high_mark
;
1411 * Unreserve the buffers in list, previously reserved using drmDMA.
1413 * \param inode device inode.
1414 * \param filp file pointer.
1415 * \param cmd command.
1416 * \param arg pointer to a drm_buf_free structure.
1417 * \return zero on success or a negative number on failure.
1419 * Calls free_buffer() for each used buffer.
1420 * This function is primarily used for debugging.
1422 int drm_freebufs(struct inode
*inode
, struct file
*filp
,
1423 unsigned int cmd
, unsigned long arg
)
1425 drm_file_t
*priv
= filp
->private_data
;
1426 drm_device_t
*dev
= priv
->head
->dev
;
1427 drm_device_dma_t
*dma
= dev
->dma
;
1428 drm_buf_free_t request
;
1433 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1439 if (copy_from_user(&request
,
1440 (drm_buf_free_t __user
*) arg
, sizeof(request
)))
1443 DRM_DEBUG("%d\n", request
.count
);
1444 for (i
= 0; i
< request
.count
; i
++) {
1445 if (copy_from_user(&idx
, &request
.list
[i
], sizeof(idx
)))
1447 if (idx
< 0 || idx
>= dma
->buf_count
) {
1448 DRM_ERROR("Index %d (of %d max)\n",
1449 idx
, dma
->buf_count
- 1);
1452 buf
= dma
->buflist
[idx
];
1453 if (buf
->filp
!= filp
) {
1454 DRM_ERROR("Process %d freeing buffer not owned\n",
1458 drm_free_buffer(dev
, buf
);
1465 * Maps all of the DMA buffers into client-virtual space (ioctl).
1467 * \param inode device inode.
1468 * \param filp file pointer.
1469 * \param cmd command.
1470 * \param arg pointer to a drm_buf_map structure.
1471 * \return zero on success or a negative number on failure.
1473 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1474 * about each buffer into user space. The PCI buffers are already mapped on the
1475 * addbufs_pci() call.
1477 int drm_mapbufs(struct inode
*inode
, struct file
*filp
,
1478 unsigned int cmd
, unsigned long arg
)
1480 drm_file_t
*priv
= filp
->private_data
;
1481 drm_device_t
*dev
= priv
->head
->dev
;
1482 drm_device_dma_t
*dma
= dev
->dma
;
1483 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1486 unsigned long virtual;
1487 unsigned long address
;
1488 drm_buf_map_t request
;
1491 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1497 spin_lock(&dev
->count_lock
);
1498 if (atomic_read(&dev
->buf_alloc
)) {
1499 spin_unlock(&dev
->count_lock
);
1502 dev
->buf_use
++; /* Can't allocate more after this call */
1503 spin_unlock(&dev
->count_lock
);
1505 if (copy_from_user(&request
, argp
, sizeof(request
)))
1508 if (request
.count
>= dma
->buf_count
) {
1509 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1510 || (drm_core_check_feature(dev
, DRIVER_SG
)
1511 && (dma
->flags
& _DRM_DMA_USE_SG
))
1512 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1513 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1514 drm_map_t
*map
= dev
->agp_buffer_map
;
1515 unsigned long token
= dev
->agp_buffer_token
;
1522 down_write(¤t
->mm
->mmap_sem
);
1523 virtual = do_mmap(filp
, 0, map
->size
,
1524 PROT_READ
| PROT_WRITE
,
1526 up_write(¤t
->mm
->mmap_sem
);
1528 down_write(¤t
->mm
->mmap_sem
);
1529 virtual = do_mmap(filp
, 0, dma
->byte_count
,
1530 PROT_READ
| PROT_WRITE
,
1532 up_write(¤t
->mm
->mmap_sem
);
1534 if (virtual > -1024UL) {
1536 retcode
= (signed long)virtual;
1539 request
.virtual = (void __user
*)virtual;
1541 for (i
= 0; i
< dma
->buf_count
; i
++) {
1542 if (copy_to_user(&request
.list
[i
].idx
,
1543 &dma
->buflist
[i
]->idx
,
1544 sizeof(request
.list
[0].idx
))) {
1548 if (copy_to_user(&request
.list
[i
].total
,
1549 &dma
->buflist
[i
]->total
,
1550 sizeof(request
.list
[0].total
))) {
1554 if (copy_to_user(&request
.list
[i
].used
,
1555 &zero
, sizeof(zero
))) {
1559 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1560 if (copy_to_user(&request
.list
[i
].address
,
1561 &address
, sizeof(address
))) {
1568 request
.count
= dma
->buf_count
;
1569 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
1571 if (copy_to_user(argp
, &request
, sizeof(request
)))
1578 * Compute size order. Returns the exponent of the smaller power of two which
1579 * is greater or equal to given number.
1584 * \todo Can be made faster.
1586 int drm_order(unsigned long size
)
1591 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1593 if (size
& (size
- 1))
1598 EXPORT_SYMBOL(drm_order
);