]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/char/drm/drm_bufs.c
3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
50 EXPORT_SYMBOL(drm_get_resource_len
);
52 static drm_map_list_t
*drm_find_matching_map(drm_device_t
*dev
,
55 drm_map_list_t
*entry
;
56 list_for_each_entry(entry
, &dev
->maplist
, head
) {
57 if (entry
->map
&& map
->type
== entry
->map
->type
&&
58 ((entry
->map
->offset
== map
->offset
) ||
59 (map
->type
== _DRM_SHM
&& map
->flags
==_DRM_CONTAINS_LOCK
))) {
67 static int drm_map_handle(drm_device_t
*dev
, drm_hash_item_t
*hash
,
68 unsigned long user_token
, int hashed_handle
)
70 int use_hashed_handle
;
71 #if (BITS_PER_LONG == 64)
72 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
73 #elif (BITS_PER_LONG == 32)
74 use_hashed_handle
= hashed_handle
;
76 #error Unsupported long size. Neither 64 nor 32 bits.
79 if (!use_hashed_handle
) {
81 hash
->key
= user_token
>> PAGE_SHIFT
;
82 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
86 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
87 user_token
, 32 - PAGE_SHIFT
- 3,
88 0, DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
);
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
94 * \param inode device inode.
95 * \param filp file pointer.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
104 static int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
105 unsigned int size
, enum drm_map_type type
,
106 enum drm_map_flags flags
, drm_map_list_t
** maplist
)
109 drm_map_list_t
*list
;
110 drm_dma_handle_t
*dmah
;
111 unsigned long user_token
;
114 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
118 map
->offset
= offset
;
123 /* Only allow shared memory to be removable since we only keep enough
124 * book keeping information about shared memory to allow for removal
125 * when processes fork.
127 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
128 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
131 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
132 map
->offset
, map
->size
, map
->type
);
133 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
134 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
142 case _DRM_FRAME_BUFFER
:
143 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
144 if (map
->offset
+ (map
->size
-1) < map
->offset
||
145 map
->offset
< virt_to_phys(high_memory
)) {
146 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
151 map
->offset
+= dev
->hose
->mem_space
->start
;
153 /* Some drivers preinitialize some maps, without the X Server
154 * needing to be aware of it. Therefore, we just return success
155 * when the server tries to create a duplicate map.
157 list
= drm_find_matching_map(dev
, map
);
159 if (list
->map
->size
!= map
->size
) {
160 DRM_DEBUG("Matching maps of type %d with "
161 "mismatched sizes, (%ld vs %ld)\n",
162 map
->type
, map
->size
,
164 list
->map
->size
= map
->size
;
167 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
172 if (drm_core_has_MTRR(dev
)) {
173 if (map
->type
== _DRM_FRAME_BUFFER
||
174 (map
->flags
& _DRM_WRITE_COMBINING
)) {
175 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
176 MTRR_TYPE_WRCOMB
, 1);
179 if (map
->type
== _DRM_REGISTERS
)
180 map
->handle
= ioremap(map
->offset
, map
->size
);
183 list
= drm_find_matching_map(dev
, map
);
185 if(list
->map
->size
!= map
->size
) {
186 DRM_DEBUG("Matching maps of type %d with "
187 "mismatched sizes, (%ld vs %ld)\n",
188 map
->type
, map
->size
, list
->map
->size
);
189 list
->map
->size
= map
->size
;
192 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
196 map
->handle
= vmalloc_user(map
->size
);
197 DRM_DEBUG("%lu %d %p\n",
198 map
->size
, drm_order(map
->size
), map
->handle
);
200 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
203 map
->offset
= (unsigned long)map
->handle
;
204 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
205 /* Prevent a 2nd X Server from creating a 2nd lock */
206 if (dev
->lock
.hw_lock
!= NULL
) {
208 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
211 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
215 drm_agp_mem_t
*entry
;
218 if (!drm_core_has_AGP(dev
)) {
219 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
223 map
->offset
+= dev
->hose
->mem_space
->start
;
225 /* Note: dev->agp->base may actually be 0 when the DRM
226 * is not in control of AGP space. But if user space is
227 * it should already have added the AGP base itself.
229 map
->offset
+= dev
->agp
->base
;
230 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
232 /* This assumes the DRM is in total control of AGP space.
233 * It's not always the case as AGP can be in the control
234 * of user space (i.e. i810 driver). So this loop will get
235 * skipped and we double check that dev->agp->memory is
236 * actually set as well as being invalid before EPERM'ing
238 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
239 if ((map
->offset
>= entry
->bound
) &&
240 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
245 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
246 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
249 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map
->offset
, map
->size
);
253 case _DRM_SCATTER_GATHER
:
255 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
258 map
->offset
+= (unsigned long)dev
->sg
->virtual;
260 case _DRM_CONSISTENT
:
261 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
262 * As we're limiting the address to 2^32-1 (or less),
263 * casting it down to 32 bits is no problem, but we
264 * need to point to a 64bit variable first. */
265 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
267 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
270 map
->handle
= dmah
->vaddr
;
271 map
->offset
= (unsigned long)dmah
->busaddr
;
275 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
279 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
281 if (map
->type
== _DRM_REGISTERS
)
282 iounmap(map
->handle
);
283 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
286 memset(list
, 0, sizeof(*list
));
289 mutex_lock(&dev
->struct_mutex
);
290 list_add(&list
->head
, &dev
->maplist
);
292 /* Assign a 32-bit handle */
293 /* We do it here so that dev->struct_mutex protects the increment */
294 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
296 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0);
298 if (map
->type
== _DRM_REGISTERS
)
299 iounmap(map
->handle
);
300 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
301 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
302 mutex_unlock(&dev
->struct_mutex
);
306 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
307 mutex_unlock(&dev
->struct_mutex
);
313 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
314 unsigned int size
, enum drm_map_type type
,
315 enum drm_map_flags flags
, drm_local_map_t
** map_ptr
)
317 drm_map_list_t
*list
;
320 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
322 *map_ptr
= list
->map
;
326 EXPORT_SYMBOL(drm_addmap
);
328 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
329 unsigned int cmd
, unsigned long arg
)
331 drm_file_t
*priv
= filp
->private_data
;
332 drm_device_t
*dev
= priv
->head
->dev
;
334 drm_map_list_t
*maplist
;
335 struct drm_map __user
*argp
= (void __user
*)arg
;
338 if (!(filp
->f_mode
& 3))
339 return -EACCES
; /* Require read/write */
341 if (copy_from_user(&map
, argp
, sizeof(map
))) {
345 if (!(capable(CAP_SYS_ADMIN
) || map
.type
== _DRM_AGP
))
348 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
354 if (copy_to_user(argp
, maplist
->map
, sizeof(struct drm_map
)))
357 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
358 if (put_user((void *)(unsigned long)maplist
->user_token
, &argp
->handle
))
364 * Remove a map private from list and deallocate resources if the mapping
367 * \param inode device inode.
368 * \param filp file pointer.
369 * \param cmd command.
370 * \param arg pointer to a struct drm_map structure.
371 * \return zero on success or a negative value on error.
373 * Searches the map on drm_device::maplist, removes it from the list, see if
374 * its being used, and free any associate resource (such as MTRR's) if it's not
379 int drm_rmmap_locked(drm_device_t
*dev
, drm_local_map_t
*map
)
381 drm_map_list_t
*r_list
= NULL
, *list_t
;
382 drm_dma_handle_t dmah
;
385 /* Find the list entry for the map and remove it */
386 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
387 if (r_list
->map
== map
) {
388 list_del(&r_list
->head
);
389 drm_ht_remove_key(&dev
->map_hash
,
390 r_list
->user_token
>> PAGE_SHIFT
);
391 drm_free(r_list
, sizeof(*r_list
), DRM_MEM_MAPS
);
402 iounmap(map
->handle
);
404 case _DRM_FRAME_BUFFER
:
405 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
407 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
408 DRM_DEBUG("mtrr_del=%d\n", retcode
);
415 case _DRM_SCATTER_GATHER
:
417 case _DRM_CONSISTENT
:
418 dmah
.vaddr
= map
->handle
;
419 dmah
.busaddr
= map
->offset
;
420 dmah
.size
= map
->size
;
421 __drm_pci_free(dev
, &dmah
);
424 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
429 int drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
433 mutex_lock(&dev
->struct_mutex
);
434 ret
= drm_rmmap_locked(dev
, map
);
435 mutex_unlock(&dev
->struct_mutex
);
440 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
441 * the last close of the device, and this is necessary for cleanup when things
442 * exit uncleanly. Therefore, having userland manually remove mappings seems
443 * like a pointless exercise since they're going away anyway.
445 * One use case might be after addmap is allowed for normal users for SHM and
446 * gets used by drivers that the server doesn't need to care about. This seems
449 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
450 unsigned int cmd
, unsigned long arg
)
452 drm_file_t
*priv
= filp
->private_data
;
453 drm_device_t
*dev
= priv
->head
->dev
;
454 struct drm_map request
;
455 drm_local_map_t
*map
= NULL
;
456 drm_map_list_t
*r_list
;
459 if (copy_from_user(&request
, (struct drm_map __user
*) arg
, sizeof(request
))) {
463 mutex_lock(&dev
->struct_mutex
);
464 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
466 r_list
->user_token
== (unsigned long)request
.handle
&&
467 r_list
->map
->flags
& _DRM_REMOVABLE
) {
473 /* List has wrapped around to the head pointer, or its empty we didn't
476 if (list_empty(&dev
->maplist
) || !map
) {
477 mutex_unlock(&dev
->struct_mutex
);
482 mutex_unlock(&dev
->struct_mutex
);
486 /* Register and framebuffer maps are permanent */
487 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
488 mutex_unlock(&dev
->struct_mutex
);
492 ret
= drm_rmmap_locked(dev
, map
);
494 mutex_unlock(&dev
->struct_mutex
);
500 * Cleanup after an error on one of the addbufs() functions.
502 * \param dev DRM device.
503 * \param entry buffer entry where the error occurred.
505 * Frees any pages and buffers associated with the given entry.
507 static void drm_cleanup_buf_error(drm_device_t
* dev
, drm_buf_entry_t
* entry
)
511 if (entry
->seg_count
) {
512 for (i
= 0; i
< entry
->seg_count
; i
++) {
513 if (entry
->seglist
[i
]) {
514 drm_pci_free(dev
, entry
->seglist
[i
]);
517 drm_free(entry
->seglist
,
519 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
521 entry
->seg_count
= 0;
524 if (entry
->buf_count
) {
525 for (i
= 0; i
< entry
->buf_count
; i
++) {
526 if (entry
->buflist
[i
].dev_private
) {
527 drm_free(entry
->buflist
[i
].dev_private
,
528 entry
->buflist
[i
].dev_priv_size
,
532 drm_free(entry
->buflist
,
534 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
536 entry
->buf_count
= 0;
542 * Add AGP buffers for DMA transfers.
544 * \param dev drm_device_t to which the buffers are to be added.
545 * \param request pointer to a struct drm_buf_desc describing the request.
546 * \return zero on success or a negative number on failure.
548 * After some sanity checks creates a drm_buf structure for each buffer and
549 * reallocates the buffer list of the same size order to accommodate the new
552 int drm_addbufs_agp(drm_device_t
* dev
, struct drm_buf_desc
* request
)
554 drm_device_dma_t
*dma
= dev
->dma
;
555 drm_buf_entry_t
*entry
;
556 drm_agp_mem_t
*agp_entry
;
558 unsigned long offset
;
559 unsigned long agp_offset
;
568 drm_buf_t
**temp_buflist
;
573 count
= request
->count
;
574 order
= drm_order(request
->size
);
577 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
578 ? PAGE_ALIGN(size
) : size
;
579 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
580 total
= PAGE_SIZE
<< page_order
;
583 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
585 DRM_DEBUG("count: %d\n", count
);
586 DRM_DEBUG("order: %d\n", order
);
587 DRM_DEBUG("size: %d\n", size
);
588 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
589 DRM_DEBUG("alignment: %d\n", alignment
);
590 DRM_DEBUG("page_order: %d\n", page_order
);
591 DRM_DEBUG("total: %d\n", total
);
593 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
595 if (dev
->queue_count
)
596 return -EBUSY
; /* Not while in use */
598 /* Make sure buffers are located in AGP memory that we own */
600 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
601 if ((agp_offset
>= agp_entry
->bound
) &&
602 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
607 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
608 DRM_DEBUG("zone invalid\n");
611 spin_lock(&dev
->count_lock
);
613 spin_unlock(&dev
->count_lock
);
616 atomic_inc(&dev
->buf_alloc
);
617 spin_unlock(&dev
->count_lock
);
619 mutex_lock(&dev
->struct_mutex
);
620 entry
= &dma
->bufs
[order
];
621 if (entry
->buf_count
) {
622 mutex_unlock(&dev
->struct_mutex
);
623 atomic_dec(&dev
->buf_alloc
);
624 return -ENOMEM
; /* May only call once for each order */
627 if (count
< 0 || count
> 4096) {
628 mutex_unlock(&dev
->struct_mutex
);
629 atomic_dec(&dev
->buf_alloc
);
633 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
635 if (!entry
->buflist
) {
636 mutex_unlock(&dev
->struct_mutex
);
637 atomic_dec(&dev
->buf_alloc
);
640 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
642 entry
->buf_size
= size
;
643 entry
->page_order
= page_order
;
647 while (entry
->buf_count
< count
) {
648 buf
= &entry
->buflist
[entry
->buf_count
];
649 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
650 buf
->total
= alignment
;
654 buf
->offset
= (dma
->byte_count
+ offset
);
655 buf
->bus_address
= agp_offset
+ offset
;
656 buf
->address
= (void *)(agp_offset
+ offset
);
660 init_waitqueue_head(&buf
->dma_wait
);
663 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
664 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
665 if (!buf
->dev_private
) {
666 /* Set count correctly so we free the proper amount. */
667 entry
->buf_count
= count
;
668 drm_cleanup_buf_error(dev
, entry
);
669 mutex_unlock(&dev
->struct_mutex
);
670 atomic_dec(&dev
->buf_alloc
);
673 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
675 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
679 byte_count
+= PAGE_SIZE
<< page_order
;
682 DRM_DEBUG("byte_count: %d\n", byte_count
);
684 temp_buflist
= drm_realloc(dma
->buflist
,
685 dma
->buf_count
* sizeof(*dma
->buflist
),
686 (dma
->buf_count
+ entry
->buf_count
)
687 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
689 /* Free the entry because it isn't valid */
690 drm_cleanup_buf_error(dev
, entry
);
691 mutex_unlock(&dev
->struct_mutex
);
692 atomic_dec(&dev
->buf_alloc
);
695 dma
->buflist
= temp_buflist
;
697 for (i
= 0; i
< entry
->buf_count
; i
++) {
698 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
701 dma
->buf_count
+= entry
->buf_count
;
702 dma
->seg_count
+= entry
->seg_count
;
703 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
704 dma
->byte_count
+= byte_count
;
706 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
707 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
709 mutex_unlock(&dev
->struct_mutex
);
711 request
->count
= entry
->buf_count
;
712 request
->size
= size
;
714 dma
->flags
= _DRM_DMA_USE_AGP
;
716 atomic_dec(&dev
->buf_alloc
);
719 EXPORT_SYMBOL(drm_addbufs_agp
);
720 #endif /* __OS_HAS_AGP */
722 int drm_addbufs_pci(drm_device_t
* dev
, struct drm_buf_desc
* request
)
724 drm_device_dma_t
*dma
= dev
->dma
;
730 drm_buf_entry_t
*entry
;
731 drm_dma_handle_t
*dmah
;
734 unsigned long offset
;
738 unsigned long *temp_pagelist
;
739 drm_buf_t
**temp_buflist
;
741 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
747 if (!capable(CAP_SYS_ADMIN
))
750 count
= request
->count
;
751 order
= drm_order(request
->size
);
754 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
755 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
757 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
759 if (dev
->queue_count
)
760 return -EBUSY
; /* Not while in use */
762 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
763 ? PAGE_ALIGN(size
) : size
;
764 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
765 total
= PAGE_SIZE
<< page_order
;
767 spin_lock(&dev
->count_lock
);
769 spin_unlock(&dev
->count_lock
);
772 atomic_inc(&dev
->buf_alloc
);
773 spin_unlock(&dev
->count_lock
);
775 mutex_lock(&dev
->struct_mutex
);
776 entry
= &dma
->bufs
[order
];
777 if (entry
->buf_count
) {
778 mutex_unlock(&dev
->struct_mutex
);
779 atomic_dec(&dev
->buf_alloc
);
780 return -ENOMEM
; /* May only call once for each order */
783 if (count
< 0 || count
> 4096) {
784 mutex_unlock(&dev
->struct_mutex
);
785 atomic_dec(&dev
->buf_alloc
);
789 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
791 if (!entry
->buflist
) {
792 mutex_unlock(&dev
->struct_mutex
);
793 atomic_dec(&dev
->buf_alloc
);
796 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
798 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
800 if (!entry
->seglist
) {
801 drm_free(entry
->buflist
,
802 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
803 mutex_unlock(&dev
->struct_mutex
);
804 atomic_dec(&dev
->buf_alloc
);
807 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
809 /* Keep the original pagelist until we know all the allocations
812 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
813 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
814 if (!temp_pagelist
) {
815 drm_free(entry
->buflist
,
816 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
817 drm_free(entry
->seglist
,
818 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
819 mutex_unlock(&dev
->struct_mutex
);
820 atomic_dec(&dev
->buf_alloc
);
823 memcpy(temp_pagelist
,
824 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
825 DRM_DEBUG("pagelist: %d entries\n",
826 dma
->page_count
+ (count
<< page_order
));
828 entry
->buf_size
= size
;
829 entry
->page_order
= page_order
;
833 while (entry
->buf_count
< count
) {
835 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
838 /* Set count correctly so we free the proper amount. */
839 entry
->buf_count
= count
;
840 entry
->seg_count
= count
;
841 drm_cleanup_buf_error(dev
, entry
);
842 drm_free(temp_pagelist
,
843 (dma
->page_count
+ (count
<< page_order
))
844 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
845 mutex_unlock(&dev
->struct_mutex
);
846 atomic_dec(&dev
->buf_alloc
);
849 entry
->seglist
[entry
->seg_count
++] = dmah
;
850 for (i
= 0; i
< (1 << page_order
); i
++) {
851 DRM_DEBUG("page %d @ 0x%08lx\n",
852 dma
->page_count
+ page_count
,
853 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
854 temp_pagelist
[dma
->page_count
+ page_count
++]
855 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
858 offset
+ size
<= total
&& entry
->buf_count
< count
;
859 offset
+= alignment
, ++entry
->buf_count
) {
860 buf
= &entry
->buflist
[entry
->buf_count
];
861 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
862 buf
->total
= alignment
;
865 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
866 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
867 buf
->bus_address
= dmah
->busaddr
+ offset
;
871 init_waitqueue_head(&buf
->dma_wait
);
874 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
875 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
877 if (!buf
->dev_private
) {
878 /* Set count correctly so we free the proper amount. */
879 entry
->buf_count
= count
;
880 entry
->seg_count
= count
;
881 drm_cleanup_buf_error(dev
, entry
);
882 drm_free(temp_pagelist
,
884 (count
<< page_order
))
885 * sizeof(*dma
->pagelist
),
887 mutex_unlock(&dev
->struct_mutex
);
888 atomic_dec(&dev
->buf_alloc
);
891 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
893 DRM_DEBUG("buffer %d @ %p\n",
894 entry
->buf_count
, buf
->address
);
896 byte_count
+= PAGE_SIZE
<< page_order
;
899 temp_buflist
= drm_realloc(dma
->buflist
,
900 dma
->buf_count
* sizeof(*dma
->buflist
),
901 (dma
->buf_count
+ entry
->buf_count
)
902 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
904 /* Free the entry because it isn't valid */
905 drm_cleanup_buf_error(dev
, entry
);
906 drm_free(temp_pagelist
,
907 (dma
->page_count
+ (count
<< page_order
))
908 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
909 mutex_unlock(&dev
->struct_mutex
);
910 atomic_dec(&dev
->buf_alloc
);
913 dma
->buflist
= temp_buflist
;
915 for (i
= 0; i
< entry
->buf_count
; i
++) {
916 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
919 /* No allocations failed, so now we can replace the orginal pagelist
922 if (dma
->page_count
) {
923 drm_free(dma
->pagelist
,
924 dma
->page_count
* sizeof(*dma
->pagelist
),
927 dma
->pagelist
= temp_pagelist
;
929 dma
->buf_count
+= entry
->buf_count
;
930 dma
->seg_count
+= entry
->seg_count
;
931 dma
->page_count
+= entry
->seg_count
<< page_order
;
932 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
934 mutex_unlock(&dev
->struct_mutex
);
936 request
->count
= entry
->buf_count
;
937 request
->size
= size
;
939 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
940 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
942 atomic_dec(&dev
->buf_alloc
);
946 EXPORT_SYMBOL(drm_addbufs_pci
);
948 static int drm_addbufs_sg(drm_device_t
* dev
, struct drm_buf_desc
* request
)
950 drm_device_dma_t
*dma
= dev
->dma
;
951 drm_buf_entry_t
*entry
;
953 unsigned long offset
;
954 unsigned long agp_offset
;
963 drm_buf_t
**temp_buflist
;
965 if (!drm_core_check_feature(dev
, DRIVER_SG
))
971 if (!capable(CAP_SYS_ADMIN
))
974 count
= request
->count
;
975 order
= drm_order(request
->size
);
978 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
979 ? PAGE_ALIGN(size
) : size
;
980 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
981 total
= PAGE_SIZE
<< page_order
;
984 agp_offset
= request
->agp_start
;
986 DRM_DEBUG("count: %d\n", count
);
987 DRM_DEBUG("order: %d\n", order
);
988 DRM_DEBUG("size: %d\n", size
);
989 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
990 DRM_DEBUG("alignment: %d\n", alignment
);
991 DRM_DEBUG("page_order: %d\n", page_order
);
992 DRM_DEBUG("total: %d\n", total
);
994 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
996 if (dev
->queue_count
)
997 return -EBUSY
; /* Not while in use */
999 spin_lock(&dev
->count_lock
);
1001 spin_unlock(&dev
->count_lock
);
1004 atomic_inc(&dev
->buf_alloc
);
1005 spin_unlock(&dev
->count_lock
);
1007 mutex_lock(&dev
->struct_mutex
);
1008 entry
= &dma
->bufs
[order
];
1009 if (entry
->buf_count
) {
1010 mutex_unlock(&dev
->struct_mutex
);
1011 atomic_dec(&dev
->buf_alloc
);
1012 return -ENOMEM
; /* May only call once for each order */
1015 if (count
< 0 || count
> 4096) {
1016 mutex_unlock(&dev
->struct_mutex
);
1017 atomic_dec(&dev
->buf_alloc
);
1021 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1023 if (!entry
->buflist
) {
1024 mutex_unlock(&dev
->struct_mutex
);
1025 atomic_dec(&dev
->buf_alloc
);
1028 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1030 entry
->buf_size
= size
;
1031 entry
->page_order
= page_order
;
1035 while (entry
->buf_count
< count
) {
1036 buf
= &entry
->buflist
[entry
->buf_count
];
1037 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1038 buf
->total
= alignment
;
1042 buf
->offset
= (dma
->byte_count
+ offset
);
1043 buf
->bus_address
= agp_offset
+ offset
;
1044 buf
->address
= (void *)(agp_offset
+ offset
1045 + (unsigned long)dev
->sg
->virtual);
1049 init_waitqueue_head(&buf
->dma_wait
);
1052 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1053 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1054 if (!buf
->dev_private
) {
1055 /* Set count correctly so we free the proper amount. */
1056 entry
->buf_count
= count
;
1057 drm_cleanup_buf_error(dev
, entry
);
1058 mutex_unlock(&dev
->struct_mutex
);
1059 atomic_dec(&dev
->buf_alloc
);
1063 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1065 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1067 offset
+= alignment
;
1069 byte_count
+= PAGE_SIZE
<< page_order
;
1072 DRM_DEBUG("byte_count: %d\n", byte_count
);
1074 temp_buflist
= drm_realloc(dma
->buflist
,
1075 dma
->buf_count
* sizeof(*dma
->buflist
),
1076 (dma
->buf_count
+ entry
->buf_count
)
1077 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1078 if (!temp_buflist
) {
1079 /* Free the entry because it isn't valid */
1080 drm_cleanup_buf_error(dev
, entry
);
1081 mutex_unlock(&dev
->struct_mutex
);
1082 atomic_dec(&dev
->buf_alloc
);
1085 dma
->buflist
= temp_buflist
;
1087 for (i
= 0; i
< entry
->buf_count
; i
++) {
1088 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1091 dma
->buf_count
+= entry
->buf_count
;
1092 dma
->seg_count
+= entry
->seg_count
;
1093 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1094 dma
->byte_count
+= byte_count
;
1096 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1097 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1099 mutex_unlock(&dev
->struct_mutex
);
1101 request
->count
= entry
->buf_count
;
1102 request
->size
= size
;
1104 dma
->flags
= _DRM_DMA_USE_SG
;
1106 atomic_dec(&dev
->buf_alloc
);
1110 static int drm_addbufs_fb(drm_device_t
* dev
, struct drm_buf_desc
* request
)
1112 drm_device_dma_t
*dma
= dev
->dma
;
1113 drm_buf_entry_t
*entry
;
1115 unsigned long offset
;
1116 unsigned long agp_offset
;
1125 drm_buf_t
**temp_buflist
;
1127 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1133 if (!capable(CAP_SYS_ADMIN
))
1136 count
= request
->count
;
1137 order
= drm_order(request
->size
);
1140 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1141 ? PAGE_ALIGN(size
) : size
;
1142 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1143 total
= PAGE_SIZE
<< page_order
;
1146 agp_offset
= request
->agp_start
;
1148 DRM_DEBUG("count: %d\n", count
);
1149 DRM_DEBUG("order: %d\n", order
);
1150 DRM_DEBUG("size: %d\n", size
);
1151 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1152 DRM_DEBUG("alignment: %d\n", alignment
);
1153 DRM_DEBUG("page_order: %d\n", page_order
);
1154 DRM_DEBUG("total: %d\n", total
);
1156 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1158 if (dev
->queue_count
)
1159 return -EBUSY
; /* Not while in use */
1161 spin_lock(&dev
->count_lock
);
1163 spin_unlock(&dev
->count_lock
);
1166 atomic_inc(&dev
->buf_alloc
);
1167 spin_unlock(&dev
->count_lock
);
1169 mutex_lock(&dev
->struct_mutex
);
1170 entry
= &dma
->bufs
[order
];
1171 if (entry
->buf_count
) {
1172 mutex_unlock(&dev
->struct_mutex
);
1173 atomic_dec(&dev
->buf_alloc
);
1174 return -ENOMEM
; /* May only call once for each order */
1177 if (count
< 0 || count
> 4096) {
1178 mutex_unlock(&dev
->struct_mutex
);
1179 atomic_dec(&dev
->buf_alloc
);
1183 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1185 if (!entry
->buflist
) {
1186 mutex_unlock(&dev
->struct_mutex
);
1187 atomic_dec(&dev
->buf_alloc
);
1190 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1192 entry
->buf_size
= size
;
1193 entry
->page_order
= page_order
;
1197 while (entry
->buf_count
< count
) {
1198 buf
= &entry
->buflist
[entry
->buf_count
];
1199 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1200 buf
->total
= alignment
;
1204 buf
->offset
= (dma
->byte_count
+ offset
);
1205 buf
->bus_address
= agp_offset
+ offset
;
1206 buf
->address
= (void *)(agp_offset
+ offset
);
1210 init_waitqueue_head(&buf
->dma_wait
);
1213 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1214 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1215 if (!buf
->dev_private
) {
1216 /* Set count correctly so we free the proper amount. */
1217 entry
->buf_count
= count
;
1218 drm_cleanup_buf_error(dev
, entry
);
1219 mutex_unlock(&dev
->struct_mutex
);
1220 atomic_dec(&dev
->buf_alloc
);
1223 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1225 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1227 offset
+= alignment
;
1229 byte_count
+= PAGE_SIZE
<< page_order
;
1232 DRM_DEBUG("byte_count: %d\n", byte_count
);
1234 temp_buflist
= drm_realloc(dma
->buflist
,
1235 dma
->buf_count
* sizeof(*dma
->buflist
),
1236 (dma
->buf_count
+ entry
->buf_count
)
1237 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1238 if (!temp_buflist
) {
1239 /* Free the entry because it isn't valid */
1240 drm_cleanup_buf_error(dev
, entry
);
1241 mutex_unlock(&dev
->struct_mutex
);
1242 atomic_dec(&dev
->buf_alloc
);
1245 dma
->buflist
= temp_buflist
;
1247 for (i
= 0; i
< entry
->buf_count
; i
++) {
1248 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1251 dma
->buf_count
+= entry
->buf_count
;
1252 dma
->seg_count
+= entry
->seg_count
;
1253 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1254 dma
->byte_count
+= byte_count
;
1256 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1257 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1259 mutex_unlock(&dev
->struct_mutex
);
1261 request
->count
= entry
->buf_count
;
1262 request
->size
= size
;
1264 dma
->flags
= _DRM_DMA_USE_FB
;
1266 atomic_dec(&dev
->buf_alloc
);
1272 * Add buffers for DMA transfers (ioctl).
1274 * \param inode device inode.
1275 * \param filp file pointer.
1276 * \param cmd command.
1277 * \param arg pointer to a struct drm_buf_desc request.
1278 * \return zero on success or a negative number on failure.
1280 * According with the memory type specified in drm_buf_desc::flags and the
1281 * build options, it dispatches the call either to addbufs_agp(),
1282 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1283 * PCI memory respectively.
1285 int drm_addbufs(struct inode
*inode
, struct file
*filp
,
1286 unsigned int cmd
, unsigned long arg
)
1288 struct drm_buf_desc request
;
1289 drm_file_t
*priv
= filp
->private_data
;
1290 drm_device_t
*dev
= priv
->head
->dev
;
1293 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1296 if (copy_from_user(&request
, (struct drm_buf_desc __user
*) arg
,
1301 if (request
.flags
& _DRM_AGP_BUFFER
)
1302 ret
= drm_addbufs_agp(dev
, &request
);
1305 if (request
.flags
& _DRM_SG_BUFFER
)
1306 ret
= drm_addbufs_sg(dev
, &request
);
1307 else if (request
.flags
& _DRM_FB_BUFFER
)
1308 ret
= drm_addbufs_fb(dev
, &request
);
1310 ret
= drm_addbufs_pci(dev
, &request
);
1313 if (copy_to_user((void __user
*)arg
, &request
, sizeof(request
))) {
1321 * Get information about the buffer mappings.
1323 * This was originally mean for debugging purposes, or by a sophisticated
1324 * client library to determine how best to use the available buffers (e.g.,
1325 * large buffers can be used for image transfer).
1327 * \param inode device inode.
1328 * \param filp file pointer.
1329 * \param cmd command.
1330 * \param arg pointer to a drm_buf_info structure.
1331 * \return zero on success or a negative number on failure.
1333 * Increments drm_device::buf_use while holding the drm_device::count_lock
1334 * lock, preventing of allocating more buffers after this call. Information
1335 * about each requested buffer is then copied into user space.
1337 int drm_infobufs(struct inode
*inode
, struct file
*filp
,
1338 unsigned int cmd
, unsigned long arg
)
1340 drm_file_t
*priv
= filp
->private_data
;
1341 drm_device_t
*dev
= priv
->head
->dev
;
1342 drm_device_dma_t
*dma
= dev
->dma
;
1343 struct drm_buf_info request
;
1344 struct drm_buf_info __user
*argp
= (void __user
*)arg
;
1348 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1354 spin_lock(&dev
->count_lock
);
1355 if (atomic_read(&dev
->buf_alloc
)) {
1356 spin_unlock(&dev
->count_lock
);
1359 ++dev
->buf_use
; /* Can't allocate more after this call */
1360 spin_unlock(&dev
->count_lock
);
1362 if (copy_from_user(&request
, argp
, sizeof(request
)))
1365 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1366 if (dma
->bufs
[i
].buf_count
)
1370 DRM_DEBUG("count = %d\n", count
);
1372 if (request
.count
>= count
) {
1373 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1374 if (dma
->bufs
[i
].buf_count
) {
1375 struct drm_buf_desc __user
*to
=
1376 &request
.list
[count
];
1377 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1378 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1379 if (copy_to_user(&to
->count
,
1381 sizeof(from
->buf_count
)) ||
1382 copy_to_user(&to
->size
,
1384 sizeof(from
->buf_size
)) ||
1385 copy_to_user(&to
->low_mark
,
1387 sizeof(list
->low_mark
)) ||
1388 copy_to_user(&to
->high_mark
,
1390 sizeof(list
->high_mark
)))
1393 DRM_DEBUG("%d %d %d %d %d\n",
1395 dma
->bufs
[i
].buf_count
,
1396 dma
->bufs
[i
].buf_size
,
1397 dma
->bufs
[i
].freelist
.low_mark
,
1398 dma
->bufs
[i
].freelist
.high_mark
);
1403 request
.count
= count
;
1405 if (copy_to_user(argp
, &request
, sizeof(request
)))
1412 * Specifies a low and high water mark for buffer allocation
1414 * \param inode device inode.
1415 * \param filp file pointer.
1416 * \param cmd command.
1417 * \param arg a pointer to a drm_buf_desc structure.
1418 * \return zero on success or a negative number on failure.
1420 * Verifies that the size order is bounded between the admissible orders and
1421 * updates the respective drm_device_dma::bufs entry low and high water mark.
1423 * \note This ioctl is deprecated and mostly never used.
1425 int drm_markbufs(struct inode
*inode
, struct file
*filp
,
1426 unsigned int cmd
, unsigned long arg
)
1428 drm_file_t
*priv
= filp
->private_data
;
1429 drm_device_t
*dev
= priv
->head
->dev
;
1430 drm_device_dma_t
*dma
= dev
->dma
;
1431 struct drm_buf_desc request
;
1433 drm_buf_entry_t
*entry
;
1435 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1441 if (copy_from_user(&request
,
1442 (struct drm_buf_desc __user
*) arg
, sizeof(request
)))
1445 DRM_DEBUG("%d, %d, %d\n",
1446 request
.size
, request
.low_mark
, request
.high_mark
);
1447 order
= drm_order(request
.size
);
1448 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1450 entry
= &dma
->bufs
[order
];
1452 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1454 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1457 entry
->freelist
.low_mark
= request
.low_mark
;
1458 entry
->freelist
.high_mark
= request
.high_mark
;
1464 * Unreserve the buffers in list, previously reserved using drmDMA.
1466 * \param inode device inode.
1467 * \param filp file pointer.
1468 * \param cmd command.
1469 * \param arg pointer to a drm_buf_free structure.
1470 * \return zero on success or a negative number on failure.
1472 * Calls free_buffer() for each used buffer.
1473 * This function is primarily used for debugging.
1475 int drm_freebufs(struct inode
*inode
, struct file
*filp
,
1476 unsigned int cmd
, unsigned long arg
)
1478 drm_file_t
*priv
= filp
->private_data
;
1479 drm_device_t
*dev
= priv
->head
->dev
;
1480 drm_device_dma_t
*dma
= dev
->dma
;
1481 struct drm_buf_free request
;
1486 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1492 if (copy_from_user(&request
,
1493 (struct drm_buf_free __user
*) arg
, sizeof(request
)))
1496 DRM_DEBUG("%d\n", request
.count
);
1497 for (i
= 0; i
< request
.count
; i
++) {
1498 if (copy_from_user(&idx
, &request
.list
[i
], sizeof(idx
)))
1500 if (idx
< 0 || idx
>= dma
->buf_count
) {
1501 DRM_ERROR("Index %d (of %d max)\n",
1502 idx
, dma
->buf_count
- 1);
1505 buf
= dma
->buflist
[idx
];
1506 if (buf
->filp
!= filp
) {
1507 DRM_ERROR("Process %d freeing buffer not owned\n",
1511 drm_free_buffer(dev
, buf
);
1518 * Maps all of the DMA buffers into client-virtual space (ioctl).
1520 * \param inode device inode.
1521 * \param filp file pointer.
1522 * \param cmd command.
1523 * \param arg pointer to a drm_buf_map structure.
1524 * \return zero on success or a negative number on failure.
1526 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1527 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1528 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1531 int drm_mapbufs(struct inode
*inode
, struct file
*filp
,
1532 unsigned int cmd
, unsigned long arg
)
1534 drm_file_t
*priv
= filp
->private_data
;
1535 drm_device_t
*dev
= priv
->head
->dev
;
1536 drm_device_dma_t
*dma
= dev
->dma
;
1537 struct drm_buf_map __user
*argp
= (void __user
*)arg
;
1540 unsigned long virtual;
1541 unsigned long address
;
1542 struct drm_buf_map request
;
1545 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1551 spin_lock(&dev
->count_lock
);
1552 if (atomic_read(&dev
->buf_alloc
)) {
1553 spin_unlock(&dev
->count_lock
);
1556 dev
->buf_use
++; /* Can't allocate more after this call */
1557 spin_unlock(&dev
->count_lock
);
1559 if (copy_from_user(&request
, argp
, sizeof(request
)))
1562 if (request
.count
>= dma
->buf_count
) {
1563 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1564 || (drm_core_check_feature(dev
, DRIVER_SG
)
1565 && (dma
->flags
& _DRM_DMA_USE_SG
))
1566 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1567 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1568 struct drm_map
*map
= dev
->agp_buffer_map
;
1569 unsigned long token
= dev
->agp_buffer_token
;
1576 down_write(¤t
->mm
->mmap_sem
);
1577 virtual = do_mmap(filp
, 0, map
->size
,
1578 PROT_READ
| PROT_WRITE
,
1580 up_write(¤t
->mm
->mmap_sem
);
1582 down_write(¤t
->mm
->mmap_sem
);
1583 virtual = do_mmap(filp
, 0, dma
->byte_count
,
1584 PROT_READ
| PROT_WRITE
,
1586 up_write(¤t
->mm
->mmap_sem
);
1588 if (virtual > -1024UL) {
1590 retcode
= (signed long)virtual;
1593 request
.virtual = (void __user
*)virtual;
1595 for (i
= 0; i
< dma
->buf_count
; i
++) {
1596 if (copy_to_user(&request
.list
[i
].idx
,
1597 &dma
->buflist
[i
]->idx
,
1598 sizeof(request
.list
[0].idx
))) {
1602 if (copy_to_user(&request
.list
[i
].total
,
1603 &dma
->buflist
[i
]->total
,
1604 sizeof(request
.list
[0].total
))) {
1608 if (copy_to_user(&request
.list
[i
].used
,
1609 &zero
, sizeof(zero
))) {
1613 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1614 if (copy_to_user(&request
.list
[i
].address
,
1615 &address
, sizeof(address
))) {
1622 request
.count
= dma
->buf_count
;
1623 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
1625 if (copy_to_user(argp
, &request
, sizeof(request
)))
1632 * Compute size order. Returns the exponent of the smaller power of two which
1633 * is greater or equal to given number.
1638 * \todo Can be made faster.
1640 int drm_order(unsigned long size
)
1645 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1647 if (size
& (size
- 1))
1652 EXPORT_SYMBOL(drm_order
);