]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * \file drm_bufs.h | |
3 | * Generic buffer template | |
4 | * | |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | |
6 | * \author Gareth Hughes <gareth@valinux.com> | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com | |
11 | * | |
12 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
14 | * All Rights Reserved. | |
15 | * | |
16 | * Permission is hereby granted, free of charge, to any person obtaining a | |
17 | * copy of this software and associated documentation files (the "Software"), | |
18 | * to deal in the Software without restriction, including without limitation | |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
20 | * and/or sell copies of the Software, and to permit persons to whom the | |
21 | * Software is furnished to do so, subject to the following conditions: | |
22 | * | |
23 | * The above copyright notice and this permission notice (including the next | |
24 | * paragraph) shall be included in all copies or substantial portions of the | |
25 | * Software. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
33 | * OTHER DEALINGS IN THE SOFTWARE. | |
34 | */ | |
35 | ||
36 | #include <linux/vmalloc.h> | |
37 | #include "drmP.h" | |
38 | ||
39 | /** | |
40 | * Compute size order. Returns the exponent of the smaller power of two which | |
41 | * is greater or equal to given number. | |
42 | * | |
43 | * \param size size. | |
44 | * \return order. | |
45 | * | |
46 | * \todo Can be made faster. | |
47 | */ | |
48 | int drm_order( unsigned long size ) | |
49 | { | |
50 | int order; | |
51 | unsigned long tmp; | |
52 | ||
53 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) | |
54 | ; | |
55 | ||
56 | if (size & (size - 1)) | |
57 | ++order; | |
58 | ||
59 | return order; | |
60 | } | |
61 | EXPORT_SYMBOL(drm_order); | |
62 | ||
63 | /** | |
64 | * Ioctl to specify a range of memory that is available for mapping by a non-root process. | |
65 | * | |
66 | * \param inode device inode. | |
67 | * \param filp file pointer. | |
68 | * \param cmd command. | |
69 | * \param arg pointer to a drm_map structure. | |
70 | * \return zero on success or a negative value on error. | |
71 | * | |
72 | * Adjusts the memory offset to its absolute value according to the mapping | |
73 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where | |
74 | * applicable and if supported by the kernel. | |
75 | */ | |
76 | int drm_addmap( struct inode *inode, struct file *filp, | |
77 | unsigned int cmd, unsigned long arg ) | |
78 | { | |
79 | drm_file_t *priv = filp->private_data; | |
80 | drm_device_t *dev = priv->head->dev; | |
81 | drm_map_t *map; | |
82 | drm_map_t __user *argp = (void __user *)arg; | |
83 | drm_map_list_t *list; | |
84 | ||
85 | if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ | |
86 | ||
87 | map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); | |
88 | if ( !map ) | |
89 | return -ENOMEM; | |
90 | ||
91 | if ( copy_from_user( map, argp, sizeof(*map) ) ) { | |
92 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
93 | return -EFAULT; | |
94 | } | |
95 | ||
96 | /* Only allow shared memory to be removable since we only keep enough | |
97 | * book keeping information about shared memory to allow for removal | |
98 | * when processes fork. | |
99 | */ | |
100 | if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) { | |
101 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
102 | return -EINVAL; | |
103 | } | |
104 | DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n", | |
105 | map->offset, map->size, map->type ); | |
106 | if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) { | |
107 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
108 | return -EINVAL; | |
109 | } | |
110 | map->mtrr = -1; | |
111 | map->handle = NULL; | |
112 | ||
113 | switch ( map->type ) { | |
114 | case _DRM_REGISTERS: | |
115 | case _DRM_FRAME_BUFFER: | |
116 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) | |
117 | if ( map->offset + map->size < map->offset || | |
118 | map->offset < virt_to_phys(high_memory) ) { | |
119 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
120 | return -EINVAL; | |
121 | } | |
122 | #endif | |
123 | #ifdef __alpha__ | |
124 | map->offset += dev->hose->mem_space->start; | |
125 | #endif | |
126 | if (drm_core_has_MTRR(dev)) { | |
127 | if ( map->type == _DRM_FRAME_BUFFER || | |
128 | (map->flags & _DRM_WRITE_COMBINING) ) { | |
129 | map->mtrr = mtrr_add( map->offset, map->size, | |
130 | MTRR_TYPE_WRCOMB, 1 ); | |
131 | } | |
132 | } | |
133 | if (map->type == _DRM_REGISTERS) | |
134 | map->handle = drm_ioremap( map->offset, map->size, | |
135 | dev ); | |
136 | break; | |
137 | ||
138 | case _DRM_SHM: | |
139 | map->handle = vmalloc_32(map->size); | |
140 | DRM_DEBUG( "%lu %d %p\n", | |
141 | map->size, drm_order( map->size ), map->handle ); | |
142 | if ( !map->handle ) { | |
143 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
144 | return -ENOMEM; | |
145 | } | |
146 | map->offset = (unsigned long)map->handle; | |
147 | if ( map->flags & _DRM_CONTAINS_LOCK ) { | |
148 | /* Prevent a 2nd X Server from creating a 2nd lock */ | |
149 | if (dev->lock.hw_lock != NULL) { | |
150 | vfree( map->handle ); | |
151 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
152 | return -EBUSY; | |
153 | } | |
154 | dev->sigdata.lock = | |
155 | dev->lock.hw_lock = map->handle; /* Pointer to lock */ | |
156 | } | |
157 | break; | |
158 | case _DRM_AGP: | |
159 | if (drm_core_has_AGP(dev)) { | |
160 | #ifdef __alpha__ | |
161 | map->offset += dev->hose->mem_space->start; | |
162 | #endif | |
163 | map->offset += dev->agp->base; | |
164 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ | |
165 | } | |
166 | break; | |
167 | case _DRM_SCATTER_GATHER: | |
168 | if (!dev->sg) { | |
169 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
170 | return -EINVAL; | |
171 | } | |
172 | map->offset += dev->sg->handle; | |
173 | break; | |
174 | ||
175 | default: | |
176 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
177 | return -EINVAL; | |
178 | } | |
179 | ||
180 | list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); | |
181 | if(!list) { | |
182 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
183 | return -EINVAL; | |
184 | } | |
185 | memset(list, 0, sizeof(*list)); | |
186 | list->map = map; | |
187 | ||
188 | down(&dev->struct_sem); | |
189 | list_add(&list->head, &dev->maplist->head); | |
190 | up(&dev->struct_sem); | |
191 | ||
192 | if ( copy_to_user( argp, map, sizeof(*map) ) ) | |
193 | return -EFAULT; | |
194 | if ( map->type != _DRM_SHM ) { | |
195 | if ( copy_to_user( &argp->handle, | |
196 | &map->offset, | |
197 | sizeof(map->offset) ) ) | |
198 | return -EFAULT; | |
199 | } | |
200 | return 0; | |
201 | } | |
202 | ||
203 | ||
204 | /** | |
205 | * Remove a map private from list and deallocate resources if the mapping | |
206 | * isn't in use. | |
207 | * | |
208 | * \param inode device inode. | |
209 | * \param filp file pointer. | |
210 | * \param cmd command. | |
211 | * \param arg pointer to a drm_map_t structure. | |
212 | * \return zero on success or a negative value on error. | |
213 | * | |
214 | * Searches the map on drm_device::maplist, removes it from the list, see if | |
215 | * its being used, and free any associate resource (such as MTRR's) if it's not | |
216 | * being on use. | |
217 | * | |
218 | * \sa addmap(). | |
219 | */ | |
220 | int drm_rmmap(struct inode *inode, struct file *filp, | |
221 | unsigned int cmd, unsigned long arg) | |
222 | { | |
223 | drm_file_t *priv = filp->private_data; | |
224 | drm_device_t *dev = priv->head->dev; | |
225 | struct list_head *list; | |
226 | drm_map_list_t *r_list = NULL; | |
227 | drm_vma_entry_t *pt, *prev; | |
228 | drm_map_t *map; | |
229 | drm_map_t request; | |
230 | int found_maps = 0; | |
231 | ||
232 | if (copy_from_user(&request, (drm_map_t __user *)arg, | |
233 | sizeof(request))) { | |
234 | return -EFAULT; | |
235 | } | |
236 | ||
237 | down(&dev->struct_sem); | |
238 | list = &dev->maplist->head; | |
239 | list_for_each(list, &dev->maplist->head) { | |
240 | r_list = list_entry(list, drm_map_list_t, head); | |
241 | ||
242 | if(r_list->map && | |
243 | r_list->map->handle == request.handle && | |
244 | r_list->map->flags & _DRM_REMOVABLE) break; | |
245 | } | |
246 | ||
247 | /* List has wrapped around to the head pointer, or its empty we didn't | |
248 | * find anything. | |
249 | */ | |
250 | if(list == (&dev->maplist->head)) { | |
251 | up(&dev->struct_sem); | |
252 | return -EINVAL; | |
253 | } | |
254 | map = r_list->map; | |
255 | list_del(list); | |
256 | drm_free(list, sizeof(*list), DRM_MEM_MAPS); | |
257 | ||
258 | for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { | |
259 | if (pt->vma->vm_private_data == map) found_maps++; | |
260 | } | |
261 | ||
262 | if(!found_maps) { | |
263 | switch (map->type) { | |
264 | case _DRM_REGISTERS: | |
265 | case _DRM_FRAME_BUFFER: | |
266 | if (drm_core_has_MTRR(dev)) { | |
267 | if (map->mtrr >= 0) { | |
268 | int retcode; | |
269 | retcode = mtrr_del(map->mtrr, | |
270 | map->offset, | |
271 | map->size); | |
272 | DRM_DEBUG("mtrr_del = %d\n", retcode); | |
273 | } | |
274 | } | |
275 | drm_ioremapfree(map->handle, map->size, dev); | |
276 | break; | |
277 | case _DRM_SHM: | |
278 | vfree(map->handle); | |
279 | break; | |
280 | case _DRM_AGP: | |
281 | case _DRM_SCATTER_GATHER: | |
282 | break; | |
283 | } | |
284 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
285 | } | |
286 | up(&dev->struct_sem); | |
287 | return 0; | |
288 | } | |
289 | ||
290 | /** | |
291 | * Cleanup after an error on one of the addbufs() functions. | |
292 | * | |
293 | * \param entry buffer entry where the error occurred. | |
294 | * | |
295 | * Frees any pages and buffers associated with the given entry. | |
296 | */ | |
297 | static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) | |
298 | { | |
299 | int i; | |
300 | ||
301 | if (entry->seg_count) { | |
302 | for (i = 0; i < entry->seg_count; i++) { | |
303 | if (entry->seglist[i]) { | |
304 | drm_free_pages(entry->seglist[i], | |
305 | entry->page_order, | |
306 | DRM_MEM_DMA); | |
307 | } | |
308 | } | |
309 | drm_free(entry->seglist, | |
310 | entry->seg_count * | |
311 | sizeof(*entry->seglist), | |
312 | DRM_MEM_SEGS); | |
313 | ||
314 | entry->seg_count = 0; | |
315 | } | |
316 | ||
317 | if (entry->buf_count) { | |
318 | for (i = 0; i < entry->buf_count; i++) { | |
319 | if (entry->buflist[i].dev_private) { | |
320 | drm_free(entry->buflist[i].dev_private, | |
321 | entry->buflist[i].dev_priv_size, | |
322 | DRM_MEM_BUFS); | |
323 | } | |
324 | } | |
325 | drm_free(entry->buflist, | |
326 | entry->buf_count * | |
327 | sizeof(*entry->buflist), | |
328 | DRM_MEM_BUFS); | |
329 | ||
330 | entry->buf_count = 0; | |
331 | } | |
332 | } | |
333 | ||
334 | #if __OS_HAS_AGP | |
335 | /** | |
336 | * Add AGP buffers for DMA transfers (ioctl). | |
337 | * | |
338 | * \param inode device inode. | |
339 | * \param filp file pointer. | |
340 | * \param cmd command. | |
341 | * \param arg pointer to a drm_buf_desc_t request. | |
342 | * \return zero on success or a negative number on failure. | |
343 | * | |
344 | * After some sanity checks creates a drm_buf structure for each buffer and | |
345 | * reallocates the buffer list of the same size order to accommodate the new | |
346 | * buffers. | |
347 | */ | |
348 | int drm_addbufs_agp( struct inode *inode, struct file *filp, | |
349 | unsigned int cmd, unsigned long arg ) | |
350 | { | |
351 | drm_file_t *priv = filp->private_data; | |
352 | drm_device_t *dev = priv->head->dev; | |
353 | drm_device_dma_t *dma = dev->dma; | |
354 | drm_buf_desc_t request; | |
355 | drm_buf_entry_t *entry; | |
356 | drm_buf_t *buf; | |
357 | unsigned long offset; | |
358 | unsigned long agp_offset; | |
359 | int count; | |
360 | int order; | |
361 | int size; | |
362 | int alignment; | |
363 | int page_order; | |
364 | int total; | |
365 | int byte_count; | |
366 | int i; | |
367 | drm_buf_t **temp_buflist; | |
368 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
369 | ||
370 | if ( !dma ) return -EINVAL; | |
371 | ||
372 | if ( copy_from_user( &request, argp, | |
373 | sizeof(request) ) ) | |
374 | return -EFAULT; | |
375 | ||
376 | count = request.count; | |
377 | order = drm_order( request.size ); | |
378 | size = 1 << order; | |
379 | ||
380 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
381 | ? PAGE_ALIGN(size) : size; | |
382 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
383 | total = PAGE_SIZE << page_order; | |
384 | ||
385 | byte_count = 0; | |
386 | agp_offset = dev->agp->base + request.agp_start; | |
387 | ||
388 | DRM_DEBUG( "count: %d\n", count ); | |
389 | DRM_DEBUG( "order: %d\n", order ); | |
390 | DRM_DEBUG( "size: %d\n", size ); | |
391 | DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); | |
392 | DRM_DEBUG( "alignment: %d\n", alignment ); | |
393 | DRM_DEBUG( "page_order: %d\n", page_order ); | |
394 | DRM_DEBUG( "total: %d\n", total ); | |
395 | ||
396 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
397 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | |
398 | ||
399 | spin_lock( &dev->count_lock ); | |
400 | if ( dev->buf_use ) { | |
401 | spin_unlock( &dev->count_lock ); | |
402 | return -EBUSY; | |
403 | } | |
404 | atomic_inc( &dev->buf_alloc ); | |
405 | spin_unlock( &dev->count_lock ); | |
406 | ||
407 | down( &dev->struct_sem ); | |
408 | entry = &dma->bufs[order]; | |
409 | if ( entry->buf_count ) { | |
410 | up( &dev->struct_sem ); | |
411 | atomic_dec( &dev->buf_alloc ); | |
412 | return -ENOMEM; /* May only call once for each order */ | |
413 | } | |
414 | ||
415 | if (count < 0 || count > 4096) { | |
416 | up( &dev->struct_sem ); | |
417 | atomic_dec( &dev->buf_alloc ); | |
418 | return -EINVAL; | |
419 | } | |
420 | ||
421 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | |
422 | DRM_MEM_BUFS ); | |
423 | if ( !entry->buflist ) { | |
424 | up( &dev->struct_sem ); | |
425 | atomic_dec( &dev->buf_alloc ); | |
426 | return -ENOMEM; | |
427 | } | |
428 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | |
429 | ||
430 | entry->buf_size = size; | |
431 | entry->page_order = page_order; | |
432 | ||
433 | offset = 0; | |
434 | ||
435 | while ( entry->buf_count < count ) { | |
436 | buf = &entry->buflist[entry->buf_count]; | |
437 | buf->idx = dma->buf_count + entry->buf_count; | |
438 | buf->total = alignment; | |
439 | buf->order = order; | |
440 | buf->used = 0; | |
441 | ||
442 | buf->offset = (dma->byte_count + offset); | |
443 | buf->bus_address = agp_offset + offset; | |
444 | buf->address = (void *)(agp_offset + offset); | |
445 | buf->next = NULL; | |
446 | buf->waiting = 0; | |
447 | buf->pending = 0; | |
448 | init_waitqueue_head( &buf->dma_wait ); | |
449 | buf->filp = NULL; | |
450 | ||
451 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
452 | buf->dev_private = drm_alloc( buf->dev_priv_size, | |
453 | DRM_MEM_BUFS ); | |
454 | if(!buf->dev_private) { | |
455 | /* Set count correctly so we free the proper amount. */ | |
456 | entry->buf_count = count; | |
457 | drm_cleanup_buf_error(dev,entry); | |
458 | up( &dev->struct_sem ); | |
459 | atomic_dec( &dev->buf_alloc ); | |
460 | return -ENOMEM; | |
461 | } | |
462 | memset( buf->dev_private, 0, buf->dev_priv_size ); | |
463 | ||
464 | DRM_DEBUG( "buffer %d @ %p\n", | |
465 | entry->buf_count, buf->address ); | |
466 | ||
467 | offset += alignment; | |
468 | entry->buf_count++; | |
469 | byte_count += PAGE_SIZE << page_order; | |
470 | } | |
471 | ||
472 | DRM_DEBUG( "byte_count: %d\n", byte_count ); | |
473 | ||
474 | temp_buflist = drm_realloc( dma->buflist, | |
475 | dma->buf_count * sizeof(*dma->buflist), | |
476 | (dma->buf_count + entry->buf_count) | |
477 | * sizeof(*dma->buflist), | |
478 | DRM_MEM_BUFS ); | |
479 | if(!temp_buflist) { | |
480 | /* Free the entry because it isn't valid */ | |
481 | drm_cleanup_buf_error(dev,entry); | |
482 | up( &dev->struct_sem ); | |
483 | atomic_dec( &dev->buf_alloc ); | |
484 | return -ENOMEM; | |
485 | } | |
486 | dma->buflist = temp_buflist; | |
487 | ||
488 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | |
489 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
490 | } | |
491 | ||
492 | dma->buf_count += entry->buf_count; | |
493 | dma->byte_count += byte_count; | |
494 | ||
495 | DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); | |
496 | DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); | |
497 | ||
498 | up( &dev->struct_sem ); | |
499 | ||
500 | request.count = entry->buf_count; | |
501 | request.size = size; | |
502 | ||
503 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
504 | return -EFAULT; | |
505 | ||
506 | dma->flags = _DRM_DMA_USE_AGP; | |
507 | ||
508 | atomic_dec( &dev->buf_alloc ); | |
509 | return 0; | |
510 | } | |
511 | #endif /* __OS_HAS_AGP */ | |
512 | ||
513 | int drm_addbufs_pci( struct inode *inode, struct file *filp, | |
514 | unsigned int cmd, unsigned long arg ) | |
515 | { | |
516 | drm_file_t *priv = filp->private_data; | |
517 | drm_device_t *dev = priv->head->dev; | |
518 | drm_device_dma_t *dma = dev->dma; | |
519 | drm_buf_desc_t request; | |
520 | int count; | |
521 | int order; | |
522 | int size; | |
523 | int total; | |
524 | int page_order; | |
525 | drm_buf_entry_t *entry; | |
526 | unsigned long page; | |
527 | drm_buf_t *buf; | |
528 | int alignment; | |
529 | unsigned long offset; | |
530 | int i; | |
531 | int byte_count; | |
532 | int page_count; | |
533 | unsigned long *temp_pagelist; | |
534 | drm_buf_t **temp_buflist; | |
535 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
536 | ||
537 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; | |
538 | if ( !dma ) return -EINVAL; | |
539 | ||
540 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
541 | return -EFAULT; | |
542 | ||
543 | count = request.count; | |
544 | order = drm_order( request.size ); | |
545 | size = 1 << order; | |
546 | ||
547 | DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", | |
548 | request.count, request.size, size, | |
549 | order, dev->queue_count ); | |
550 | ||
551 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
552 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | |
553 | ||
554 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
555 | ? PAGE_ALIGN(size) : size; | |
556 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
557 | total = PAGE_SIZE << page_order; | |
558 | ||
559 | spin_lock( &dev->count_lock ); | |
560 | if ( dev->buf_use ) { | |
561 | spin_unlock( &dev->count_lock ); | |
562 | return -EBUSY; | |
563 | } | |
564 | atomic_inc( &dev->buf_alloc ); | |
565 | spin_unlock( &dev->count_lock ); | |
566 | ||
567 | down( &dev->struct_sem ); | |
568 | entry = &dma->bufs[order]; | |
569 | if ( entry->buf_count ) { | |
570 | up( &dev->struct_sem ); | |
571 | atomic_dec( &dev->buf_alloc ); | |
572 | return -ENOMEM; /* May only call once for each order */ | |
573 | } | |
574 | ||
575 | if (count < 0 || count > 4096) { | |
576 | up( &dev->struct_sem ); | |
577 | atomic_dec( &dev->buf_alloc ); | |
578 | return -EINVAL; | |
579 | } | |
580 | ||
581 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | |
582 | DRM_MEM_BUFS ); | |
583 | if ( !entry->buflist ) { | |
584 | up( &dev->struct_sem ); | |
585 | atomic_dec( &dev->buf_alloc ); | |
586 | return -ENOMEM; | |
587 | } | |
588 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | |
589 | ||
590 | entry->seglist = drm_alloc( count * sizeof(*entry->seglist), | |
591 | DRM_MEM_SEGS ); | |
592 | if ( !entry->seglist ) { | |
593 | drm_free( entry->buflist, | |
594 | count * sizeof(*entry->buflist), | |
595 | DRM_MEM_BUFS ); | |
596 | up( &dev->struct_sem ); | |
597 | atomic_dec( &dev->buf_alloc ); | |
598 | return -ENOMEM; | |
599 | } | |
600 | memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); | |
601 | ||
602 | /* Keep the original pagelist until we know all the allocations | |
603 | * have succeeded | |
604 | */ | |
605 | temp_pagelist = drm_alloc( (dma->page_count + (count << page_order)) | |
606 | * sizeof(*dma->pagelist), | |
607 | DRM_MEM_PAGES ); | |
608 | if (!temp_pagelist) { | |
609 | drm_free( entry->buflist, | |
610 | count * sizeof(*entry->buflist), | |
611 | DRM_MEM_BUFS ); | |
612 | drm_free( entry->seglist, | |
613 | count * sizeof(*entry->seglist), | |
614 | DRM_MEM_SEGS ); | |
615 | up( &dev->struct_sem ); | |
616 | atomic_dec( &dev->buf_alloc ); | |
617 | return -ENOMEM; | |
618 | } | |
619 | memcpy(temp_pagelist, | |
620 | dma->pagelist, | |
621 | dma->page_count * sizeof(*dma->pagelist)); | |
622 | DRM_DEBUG( "pagelist: %d entries\n", | |
623 | dma->page_count + (count << page_order) ); | |
624 | ||
625 | entry->buf_size = size; | |
626 | entry->page_order = page_order; | |
627 | byte_count = 0; | |
628 | page_count = 0; | |
629 | ||
630 | while ( entry->buf_count < count ) { | |
631 | page = drm_alloc_pages( page_order, DRM_MEM_DMA ); | |
632 | if ( !page ) { | |
633 | /* Set count correctly so we free the proper amount. */ | |
634 | entry->buf_count = count; | |
635 | entry->seg_count = count; | |
636 | drm_cleanup_buf_error(dev, entry); | |
637 | drm_free( temp_pagelist, | |
638 | (dma->page_count + (count << page_order)) | |
639 | * sizeof(*dma->pagelist), | |
640 | DRM_MEM_PAGES ); | |
641 | up( &dev->struct_sem ); | |
642 | atomic_dec( &dev->buf_alloc ); | |
643 | return -ENOMEM; | |
644 | } | |
645 | entry->seglist[entry->seg_count++] = page; | |
646 | for ( i = 0 ; i < (1 << page_order) ; i++ ) { | |
647 | DRM_DEBUG( "page %d @ 0x%08lx\n", | |
648 | dma->page_count + page_count, | |
649 | page + PAGE_SIZE * i ); | |
650 | temp_pagelist[dma->page_count + page_count++] | |
651 | = page + PAGE_SIZE * i; | |
652 | } | |
653 | for ( offset = 0 ; | |
654 | offset + size <= total && entry->buf_count < count ; | |
655 | offset += alignment, ++entry->buf_count ) { | |
656 | buf = &entry->buflist[entry->buf_count]; | |
657 | buf->idx = dma->buf_count + entry->buf_count; | |
658 | buf->total = alignment; | |
659 | buf->order = order; | |
660 | buf->used = 0; | |
661 | buf->offset = (dma->byte_count + byte_count + offset); | |
662 | buf->address = (void *)(page + offset); | |
663 | buf->next = NULL; | |
664 | buf->waiting = 0; | |
665 | buf->pending = 0; | |
666 | init_waitqueue_head( &buf->dma_wait ); | |
667 | buf->filp = NULL; | |
668 | ||
669 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
670 | buf->dev_private = drm_alloc( buf->dev_priv_size, | |
671 | DRM_MEM_BUFS ); | |
672 | if(!buf->dev_private) { | |
673 | /* Set count correctly so we free the proper amount. */ | |
674 | entry->buf_count = count; | |
675 | entry->seg_count = count; | |
676 | drm_cleanup_buf_error(dev,entry); | |
677 | drm_free( temp_pagelist, | |
678 | (dma->page_count + (count << page_order)) | |
679 | * sizeof(*dma->pagelist), | |
680 | DRM_MEM_PAGES ); | |
681 | up( &dev->struct_sem ); | |
682 | atomic_dec( &dev->buf_alloc ); | |
683 | return -ENOMEM; | |
684 | } | |
685 | memset( buf->dev_private, 0, buf->dev_priv_size ); | |
686 | ||
687 | DRM_DEBUG( "buffer %d @ %p\n", | |
688 | entry->buf_count, buf->address ); | |
689 | } | |
690 | byte_count += PAGE_SIZE << page_order; | |
691 | } | |
692 | ||
693 | temp_buflist = drm_realloc( dma->buflist, | |
694 | dma->buf_count * sizeof(*dma->buflist), | |
695 | (dma->buf_count + entry->buf_count) | |
696 | * sizeof(*dma->buflist), | |
697 | DRM_MEM_BUFS ); | |
698 | if (!temp_buflist) { | |
699 | /* Free the entry because it isn't valid */ | |
700 | drm_cleanup_buf_error(dev,entry); | |
701 | drm_free( temp_pagelist, | |
702 | (dma->page_count + (count << page_order)) | |
703 | * sizeof(*dma->pagelist), | |
704 | DRM_MEM_PAGES ); | |
705 | up( &dev->struct_sem ); | |
706 | atomic_dec( &dev->buf_alloc ); | |
707 | return -ENOMEM; | |
708 | } | |
709 | dma->buflist = temp_buflist; | |
710 | ||
711 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | |
712 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
713 | } | |
714 | ||
715 | /* No allocations failed, so now we can replace the orginal pagelist | |
716 | * with the new one. | |
717 | */ | |
718 | if (dma->page_count) { | |
719 | drm_free(dma->pagelist, | |
720 | dma->page_count * sizeof(*dma->pagelist), | |
721 | DRM_MEM_PAGES); | |
722 | } | |
723 | dma->pagelist = temp_pagelist; | |
724 | ||
725 | dma->buf_count += entry->buf_count; | |
726 | dma->seg_count += entry->seg_count; | |
727 | dma->page_count += entry->seg_count << page_order; | |
728 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | |
729 | ||
730 | up( &dev->struct_sem ); | |
731 | ||
732 | request.count = entry->buf_count; | |
733 | request.size = size; | |
734 | ||
735 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
736 | return -EFAULT; | |
737 | ||
738 | atomic_dec( &dev->buf_alloc ); | |
739 | return 0; | |
740 | ||
741 | } | |
742 | ||
743 | int drm_addbufs_sg( struct inode *inode, struct file *filp, | |
744 | unsigned int cmd, unsigned long arg ) | |
745 | { | |
746 | drm_file_t *priv = filp->private_data; | |
747 | drm_device_t *dev = priv->head->dev; | |
748 | drm_device_dma_t *dma = dev->dma; | |
749 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
750 | drm_buf_desc_t request; | |
751 | drm_buf_entry_t *entry; | |
752 | drm_buf_t *buf; | |
753 | unsigned long offset; | |
754 | unsigned long agp_offset; | |
755 | int count; | |
756 | int order; | |
757 | int size; | |
758 | int alignment; | |
759 | int page_order; | |
760 | int total; | |
761 | int byte_count; | |
762 | int i; | |
763 | drm_buf_t **temp_buflist; | |
764 | ||
765 | if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; | |
766 | ||
767 | if ( !dma ) return -EINVAL; | |
768 | ||
769 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
770 | return -EFAULT; | |
771 | ||
772 | count = request.count; | |
773 | order = drm_order( request.size ); | |
774 | size = 1 << order; | |
775 | ||
776 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
777 | ? PAGE_ALIGN(size) : size; | |
778 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
779 | total = PAGE_SIZE << page_order; | |
780 | ||
781 | byte_count = 0; | |
782 | agp_offset = request.agp_start; | |
783 | ||
784 | DRM_DEBUG( "count: %d\n", count ); | |
785 | DRM_DEBUG( "order: %d\n", order ); | |
786 | DRM_DEBUG( "size: %d\n", size ); | |
787 | DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); | |
788 | DRM_DEBUG( "alignment: %d\n", alignment ); | |
789 | DRM_DEBUG( "page_order: %d\n", page_order ); | |
790 | DRM_DEBUG( "total: %d\n", total ); | |
791 | ||
792 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
793 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | |
794 | ||
795 | spin_lock( &dev->count_lock ); | |
796 | if ( dev->buf_use ) { | |
797 | spin_unlock( &dev->count_lock ); | |
798 | return -EBUSY; | |
799 | } | |
800 | atomic_inc( &dev->buf_alloc ); | |
801 | spin_unlock( &dev->count_lock ); | |
802 | ||
803 | down( &dev->struct_sem ); | |
804 | entry = &dma->bufs[order]; | |
805 | if ( entry->buf_count ) { | |
806 | up( &dev->struct_sem ); | |
807 | atomic_dec( &dev->buf_alloc ); | |
808 | return -ENOMEM; /* May only call once for each order */ | |
809 | } | |
810 | ||
811 | if (count < 0 || count > 4096) { | |
812 | up( &dev->struct_sem ); | |
813 | atomic_dec( &dev->buf_alloc ); | |
814 | return -EINVAL; | |
815 | } | |
816 | ||
817 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | |
818 | DRM_MEM_BUFS ); | |
819 | if ( !entry->buflist ) { | |
820 | up( &dev->struct_sem ); | |
821 | atomic_dec( &dev->buf_alloc ); | |
822 | return -ENOMEM; | |
823 | } | |
824 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | |
825 | ||
826 | entry->buf_size = size; | |
827 | entry->page_order = page_order; | |
828 | ||
829 | offset = 0; | |
830 | ||
831 | while ( entry->buf_count < count ) { | |
832 | buf = &entry->buflist[entry->buf_count]; | |
833 | buf->idx = dma->buf_count + entry->buf_count; | |
834 | buf->total = alignment; | |
835 | buf->order = order; | |
836 | buf->used = 0; | |
837 | ||
838 | buf->offset = (dma->byte_count + offset); | |
839 | buf->bus_address = agp_offset + offset; | |
840 | buf->address = (void *)(agp_offset + offset + dev->sg->handle); | |
841 | buf->next = NULL; | |
842 | buf->waiting = 0; | |
843 | buf->pending = 0; | |
844 | init_waitqueue_head( &buf->dma_wait ); | |
845 | buf->filp = NULL; | |
846 | ||
847 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
848 | buf->dev_private = drm_alloc( buf->dev_priv_size, | |
849 | DRM_MEM_BUFS ); | |
850 | if(!buf->dev_private) { | |
851 | /* Set count correctly so we free the proper amount. */ | |
852 | entry->buf_count = count; | |
853 | drm_cleanup_buf_error(dev,entry); | |
854 | up( &dev->struct_sem ); | |
855 | atomic_dec( &dev->buf_alloc ); | |
856 | return -ENOMEM; | |
857 | } | |
858 | ||
859 | memset( buf->dev_private, 0, buf->dev_priv_size ); | |
860 | ||
861 | DRM_DEBUG( "buffer %d @ %p\n", | |
862 | entry->buf_count, buf->address ); | |
863 | ||
864 | offset += alignment; | |
865 | entry->buf_count++; | |
866 | byte_count += PAGE_SIZE << page_order; | |
867 | } | |
868 | ||
869 | DRM_DEBUG( "byte_count: %d\n", byte_count ); | |
870 | ||
871 | temp_buflist = drm_realloc( dma->buflist, | |
872 | dma->buf_count * sizeof(*dma->buflist), | |
873 | (dma->buf_count + entry->buf_count) | |
874 | * sizeof(*dma->buflist), | |
875 | DRM_MEM_BUFS ); | |
876 | if(!temp_buflist) { | |
877 | /* Free the entry because it isn't valid */ | |
878 | drm_cleanup_buf_error(dev,entry); | |
879 | up( &dev->struct_sem ); | |
880 | atomic_dec( &dev->buf_alloc ); | |
881 | return -ENOMEM; | |
882 | } | |
883 | dma->buflist = temp_buflist; | |
884 | ||
885 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | |
886 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
887 | } | |
888 | ||
889 | dma->buf_count += entry->buf_count; | |
890 | dma->byte_count += byte_count; | |
891 | ||
892 | DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); | |
893 | DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); | |
894 | ||
895 | up( &dev->struct_sem ); | |
896 | ||
897 | request.count = entry->buf_count; | |
898 | request.size = size; | |
899 | ||
900 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
901 | return -EFAULT; | |
902 | ||
903 | dma->flags = _DRM_DMA_USE_SG; | |
904 | ||
905 | atomic_dec( &dev->buf_alloc ); | |
906 | return 0; | |
907 | } | |
908 | ||
909 | /** | |
910 | * Add buffers for DMA transfers (ioctl). | |
911 | * | |
912 | * \param inode device inode. | |
913 | * \param filp file pointer. | |
914 | * \param cmd command. | |
915 | * \param arg pointer to a drm_buf_desc_t request. | |
916 | * \return zero on success or a negative number on failure. | |
917 | * | |
918 | * According with the memory type specified in drm_buf_desc::flags and the | |
919 | * build options, it dispatches the call either to addbufs_agp(), | |
920 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent | |
921 | * PCI memory respectively. | |
922 | */ | |
923 | int drm_addbufs( struct inode *inode, struct file *filp, | |
924 | unsigned int cmd, unsigned long arg ) | |
925 | { | |
926 | drm_buf_desc_t request; | |
927 | drm_file_t *priv = filp->private_data; | |
928 | drm_device_t *dev = priv->head->dev; | |
929 | ||
930 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
931 | return -EINVAL; | |
932 | ||
933 | if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg, | |
934 | sizeof(request) ) ) | |
935 | return -EFAULT; | |
936 | ||
937 | #if __OS_HAS_AGP | |
938 | if ( request.flags & _DRM_AGP_BUFFER ) | |
939 | return drm_addbufs_agp( inode, filp, cmd, arg ); | |
940 | else | |
941 | #endif | |
942 | if ( request.flags & _DRM_SG_BUFFER ) | |
943 | return drm_addbufs_sg( inode, filp, cmd, arg ); | |
944 | else | |
945 | return drm_addbufs_pci( inode, filp, cmd, arg ); | |
946 | } | |
947 | ||
948 | ||
949 | /** | |
950 | * Get information about the buffer mappings. | |
951 | * | |
952 | * This was originally mean for debugging purposes, or by a sophisticated | |
953 | * client library to determine how best to use the available buffers (e.g., | |
954 | * large buffers can be used for image transfer). | |
955 | * | |
956 | * \param inode device inode. | |
957 | * \param filp file pointer. | |
958 | * \param cmd command. | |
959 | * \param arg pointer to a drm_buf_info structure. | |
960 | * \return zero on success or a negative number on failure. | |
961 | * | |
962 | * Increments drm_device::buf_use while holding the drm_device::count_lock | |
963 | * lock, preventing of allocating more buffers after this call. Information | |
964 | * about each requested buffer is then copied into user space. | |
965 | */ | |
966 | int drm_infobufs( struct inode *inode, struct file *filp, | |
967 | unsigned int cmd, unsigned long arg ) | |
968 | { | |
969 | drm_file_t *priv = filp->private_data; | |
970 | drm_device_t *dev = priv->head->dev; | |
971 | drm_device_dma_t *dma = dev->dma; | |
972 | drm_buf_info_t request; | |
973 | drm_buf_info_t __user *argp = (void __user *)arg; | |
974 | int i; | |
975 | int count; | |
976 | ||
977 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
978 | return -EINVAL; | |
979 | ||
980 | if ( !dma ) return -EINVAL; | |
981 | ||
982 | spin_lock( &dev->count_lock ); | |
983 | if ( atomic_read( &dev->buf_alloc ) ) { | |
984 | spin_unlock( &dev->count_lock ); | |
985 | return -EBUSY; | |
986 | } | |
987 | ++dev->buf_use; /* Can't allocate more after this call */ | |
988 | spin_unlock( &dev->count_lock ); | |
989 | ||
990 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
991 | return -EFAULT; | |
992 | ||
993 | for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { | |
994 | if ( dma->bufs[i].buf_count ) ++count; | |
995 | } | |
996 | ||
997 | DRM_DEBUG( "count = %d\n", count ); | |
998 | ||
999 | if ( request.count >= count ) { | |
1000 | for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { | |
1001 | if ( dma->bufs[i].buf_count ) { | |
1002 | drm_buf_desc_t __user *to = &request.list[count]; | |
1003 | drm_buf_entry_t *from = &dma->bufs[i]; | |
1004 | drm_freelist_t *list = &dma->bufs[i].freelist; | |
1005 | if ( copy_to_user( &to->count, | |
1006 | &from->buf_count, | |
1007 | sizeof(from->buf_count) ) || | |
1008 | copy_to_user( &to->size, | |
1009 | &from->buf_size, | |
1010 | sizeof(from->buf_size) ) || | |
1011 | copy_to_user( &to->low_mark, | |
1012 | &list->low_mark, | |
1013 | sizeof(list->low_mark) ) || | |
1014 | copy_to_user( &to->high_mark, | |
1015 | &list->high_mark, | |
1016 | sizeof(list->high_mark) ) ) | |
1017 | return -EFAULT; | |
1018 | ||
1019 | DRM_DEBUG( "%d %d %d %d %d\n", | |
1020 | i, | |
1021 | dma->bufs[i].buf_count, | |
1022 | dma->bufs[i].buf_size, | |
1023 | dma->bufs[i].freelist.low_mark, | |
1024 | dma->bufs[i].freelist.high_mark ); | |
1025 | ++count; | |
1026 | } | |
1027 | } | |
1028 | } | |
1029 | request.count = count; | |
1030 | ||
1031 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
1032 | return -EFAULT; | |
1033 | ||
1034 | return 0; | |
1035 | } | |
1036 | ||
1037 | /** | |
1038 | * Specifies a low and high water mark for buffer allocation | |
1039 | * | |
1040 | * \param inode device inode. | |
1041 | * \param filp file pointer. | |
1042 | * \param cmd command. | |
1043 | * \param arg a pointer to a drm_buf_desc structure. | |
1044 | * \return zero on success or a negative number on failure. | |
1045 | * | |
1046 | * Verifies that the size order is bounded between the admissible orders and | |
1047 | * updates the respective drm_device_dma::bufs entry low and high water mark. | |
1048 | * | |
1049 | * \note This ioctl is deprecated and mostly never used. | |
1050 | */ | |
1051 | int drm_markbufs( struct inode *inode, struct file *filp, | |
1052 | unsigned int cmd, unsigned long arg ) | |
1053 | { | |
1054 | drm_file_t *priv = filp->private_data; | |
1055 | drm_device_t *dev = priv->head->dev; | |
1056 | drm_device_dma_t *dma = dev->dma; | |
1057 | drm_buf_desc_t request; | |
1058 | int order; | |
1059 | drm_buf_entry_t *entry; | |
1060 | ||
1061 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1062 | return -EINVAL; | |
1063 | ||
1064 | if ( !dma ) return -EINVAL; | |
1065 | ||
1066 | if ( copy_from_user( &request, | |
1067 | (drm_buf_desc_t __user *)arg, | |
1068 | sizeof(request) ) ) | |
1069 | return -EFAULT; | |
1070 | ||
1071 | DRM_DEBUG( "%d, %d, %d\n", | |
1072 | request.size, request.low_mark, request.high_mark ); | |
1073 | order = drm_order( request.size ); | |
1074 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
1075 | entry = &dma->bufs[order]; | |
1076 | ||
1077 | if ( request.low_mark < 0 || request.low_mark > entry->buf_count ) | |
1078 | return -EINVAL; | |
1079 | if ( request.high_mark < 0 || request.high_mark > entry->buf_count ) | |
1080 | return -EINVAL; | |
1081 | ||
1082 | entry->freelist.low_mark = request.low_mark; | |
1083 | entry->freelist.high_mark = request.high_mark; | |
1084 | ||
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | /** | |
1089 | * Unreserve the buffers in list, previously reserved using drmDMA. | |
1090 | * | |
1091 | * \param inode device inode. | |
1092 | * \param filp file pointer. | |
1093 | * \param cmd command. | |
1094 | * \param arg pointer to a drm_buf_free structure. | |
1095 | * \return zero on success or a negative number on failure. | |
1096 | * | |
1097 | * Calls free_buffer() for each used buffer. | |
1098 | * This function is primarily used for debugging. | |
1099 | */ | |
1100 | int drm_freebufs( struct inode *inode, struct file *filp, | |
1101 | unsigned int cmd, unsigned long arg ) | |
1102 | { | |
1103 | drm_file_t *priv = filp->private_data; | |
1104 | drm_device_t *dev = priv->head->dev; | |
1105 | drm_device_dma_t *dma = dev->dma; | |
1106 | drm_buf_free_t request; | |
1107 | int i; | |
1108 | int idx; | |
1109 | drm_buf_t *buf; | |
1110 | ||
1111 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1112 | return -EINVAL; | |
1113 | ||
1114 | if ( !dma ) return -EINVAL; | |
1115 | ||
1116 | if ( copy_from_user( &request, | |
1117 | (drm_buf_free_t __user *)arg, | |
1118 | sizeof(request) ) ) | |
1119 | return -EFAULT; | |
1120 | ||
1121 | DRM_DEBUG( "%d\n", request.count ); | |
1122 | for ( i = 0 ; i < request.count ; i++ ) { | |
1123 | if ( copy_from_user( &idx, | |
1124 | &request.list[i], | |
1125 | sizeof(idx) ) ) | |
1126 | return -EFAULT; | |
1127 | if ( idx < 0 || idx >= dma->buf_count ) { | |
1128 | DRM_ERROR( "Index %d (of %d max)\n", | |
1129 | idx, dma->buf_count - 1 ); | |
1130 | return -EINVAL; | |
1131 | } | |
1132 | buf = dma->buflist[idx]; | |
1133 | if ( buf->filp != filp ) { | |
1134 | DRM_ERROR( "Process %d freeing buffer not owned\n", | |
1135 | current->pid ); | |
1136 | return -EINVAL; | |
1137 | } | |
1138 | drm_free_buffer( dev, buf ); | |
1139 | } | |
1140 | ||
1141 | return 0; | |
1142 | } | |
1143 | ||
1144 | /** | |
1145 | * Maps all of the DMA buffers into client-virtual space (ioctl). | |
1146 | * | |
1147 | * \param inode device inode. | |
1148 | * \param filp file pointer. | |
1149 | * \param cmd command. | |
1150 | * \param arg pointer to a drm_buf_map structure. | |
1151 | * \return zero on success or a negative number on failure. | |
1152 | * | |
1153 | * Maps the AGP or SG buffer region with do_mmap(), and copies information | |
1154 | * about each buffer into user space. The PCI buffers are already mapped on the | |
1155 | * addbufs_pci() call. | |
1156 | */ | |
1157 | int drm_mapbufs( struct inode *inode, struct file *filp, | |
1158 | unsigned int cmd, unsigned long arg ) | |
1159 | { | |
1160 | drm_file_t *priv = filp->private_data; | |
1161 | drm_device_t *dev = priv->head->dev; | |
1162 | drm_device_dma_t *dma = dev->dma; | |
1163 | drm_buf_map_t __user *argp = (void __user *)arg; | |
1164 | int retcode = 0; | |
1165 | const int zero = 0; | |
1166 | unsigned long virtual; | |
1167 | unsigned long address; | |
1168 | drm_buf_map_t request; | |
1169 | int i; | |
1170 | ||
1171 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1172 | return -EINVAL; | |
1173 | ||
1174 | if ( !dma ) return -EINVAL; | |
1175 | ||
1176 | spin_lock( &dev->count_lock ); | |
1177 | if ( atomic_read( &dev->buf_alloc ) ) { | |
1178 | spin_unlock( &dev->count_lock ); | |
1179 | return -EBUSY; | |
1180 | } | |
1181 | dev->buf_use++; /* Can't allocate more after this call */ | |
1182 | spin_unlock( &dev->count_lock ); | |
1183 | ||
1184 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
1185 | return -EFAULT; | |
1186 | ||
1187 | if ( request.count >= dma->buf_count ) { | |
1188 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || | |
1189 | (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { | |
1190 | drm_map_t *map = dev->agp_buffer_map; | |
1191 | ||
1192 | if ( !map ) { | |
1193 | retcode = -EINVAL; | |
1194 | goto done; | |
1195 | } | |
1196 | ||
1197 | #if LINUX_VERSION_CODE <= 0x020402 | |
1198 | down( ¤t->mm->mmap_sem ); | |
1199 | #else | |
1200 | down_write( ¤t->mm->mmap_sem ); | |
1201 | #endif | |
1202 | virtual = do_mmap( filp, 0, map->size, | |
1203 | PROT_READ | PROT_WRITE, | |
1204 | MAP_SHARED, | |
1205 | (unsigned long)map->offset ); | |
1206 | #if LINUX_VERSION_CODE <= 0x020402 | |
1207 | up( ¤t->mm->mmap_sem ); | |
1208 | #else | |
1209 | up_write( ¤t->mm->mmap_sem ); | |
1210 | #endif | |
1211 | } else { | |
1212 | #if LINUX_VERSION_CODE <= 0x020402 | |
1213 | down( ¤t->mm->mmap_sem ); | |
1214 | #else | |
1215 | down_write( ¤t->mm->mmap_sem ); | |
1216 | #endif | |
1217 | virtual = do_mmap( filp, 0, dma->byte_count, | |
1218 | PROT_READ | PROT_WRITE, | |
1219 | MAP_SHARED, 0 ); | |
1220 | #if LINUX_VERSION_CODE <= 0x020402 | |
1221 | up( ¤t->mm->mmap_sem ); | |
1222 | #else | |
1223 | up_write( ¤t->mm->mmap_sem ); | |
1224 | #endif | |
1225 | } | |
1226 | if ( virtual > -1024UL ) { | |
1227 | /* Real error */ | |
1228 | retcode = (signed long)virtual; | |
1229 | goto done; | |
1230 | } | |
1231 | request.virtual = (void __user *)virtual; | |
1232 | ||
1233 | for ( i = 0 ; i < dma->buf_count ; i++ ) { | |
1234 | if ( copy_to_user( &request.list[i].idx, | |
1235 | &dma->buflist[i]->idx, | |
1236 | sizeof(request.list[0].idx) ) ) { | |
1237 | retcode = -EFAULT; | |
1238 | goto done; | |
1239 | } | |
1240 | if ( copy_to_user( &request.list[i].total, | |
1241 | &dma->buflist[i]->total, | |
1242 | sizeof(request.list[0].total) ) ) { | |
1243 | retcode = -EFAULT; | |
1244 | goto done; | |
1245 | } | |
1246 | if ( copy_to_user( &request.list[i].used, | |
1247 | &zero, | |
1248 | sizeof(zero) ) ) { | |
1249 | retcode = -EFAULT; | |
1250 | goto done; | |
1251 | } | |
1252 | address = virtual + dma->buflist[i]->offset; /* *** */ | |
1253 | if ( copy_to_user( &request.list[i].address, | |
1254 | &address, | |
1255 | sizeof(address) ) ) { | |
1256 | retcode = -EFAULT; | |
1257 | goto done; | |
1258 | } | |
1259 | } | |
1260 | } | |
1261 | done: | |
1262 | request.count = dma->buf_count; | |
1263 | DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); | |
1264 | ||
1265 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
1266 | return -EFAULT; | |
1267 | ||
1268 | return retcode; | |
1269 | } | |
1270 |