]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/drm/gamma_context.h
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / char / drm / gamma_context.h
1 /* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
2 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * ChangeLog:
31 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
32 * added context constructor/destructor hooks,
33 * needed by SiS driver's memory management.
34 */
35
36 /* ================================================================
37 * Old-style context support -- only used by gamma.
38 */
39
40
41 /* The drm_read and drm_write_string code (especially that which manages
42 the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
43 DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
44
45 ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
46 {
47 drm_file_t *priv = filp->private_data;
48 drm_device_t *dev = priv->dev;
49 int left;
50 int avail;
51 int send;
52 int cur;
53
54 DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
55
56 while (dev->buf_rp == dev->buf_wp) {
57 DRM_DEBUG(" sleeping\n");
58 if (filp->f_flags & O_NONBLOCK) {
59 return -EAGAIN;
60 }
61 interruptible_sleep_on(&dev->buf_readers);
62 if (signal_pending(current)) {
63 DRM_DEBUG(" interrupted\n");
64 return -ERESTARTSYS;
65 }
66 DRM_DEBUG(" awake\n");
67 }
68
69 left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
70 avail = DRM_BSZ - left;
71 send = DRM_MIN(avail, count);
72
73 while (send) {
74 if (dev->buf_wp > dev->buf_rp) {
75 cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
76 } else {
77 cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
78 }
79 if (copy_to_user(buf, dev->buf_rp, cur))
80 return -EFAULT;
81 dev->buf_rp += cur;
82 if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
83 send -= cur;
84 }
85
86 wake_up_interruptible(&dev->buf_writers);
87 return DRM_MIN(avail, count);
88 }
89
90
91 /* In an incredibly convoluted setup, the kernel module actually calls
92 * back into the X server to perform context switches on behalf of the
93 * 3d clients.
94 */
95 int DRM(write_string)(drm_device_t *dev, const char *s)
96 {
97 int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
98 int send = strlen(s);
99 int count;
100
101 DRM_DEBUG("%d left, %d to send (%p, %p)\n",
102 left, send, dev->buf_rp, dev->buf_wp);
103
104 if (left == 1 || dev->buf_wp != dev->buf_rp) {
105 DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
106 left,
107 dev->buf_wp,
108 dev->buf_rp);
109 }
110
111 while (send) {
112 if (dev->buf_wp >= dev->buf_rp) {
113 count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
114 if (count == left) --count; /* Leave a hole */
115 } else {
116 count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
117 }
118 strncpy(dev->buf_wp, s, count);
119 dev->buf_wp += count;
120 if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
121 send -= count;
122 }
123
124 if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
125
126 DRM_DEBUG("waking\n");
127 wake_up_interruptible(&dev->buf_readers);
128 return 0;
129 }
130
131 unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
132 {
133 drm_file_t *priv = filp->private_data;
134 drm_device_t *dev = priv->dev;
135
136 poll_wait(filp, &dev->buf_readers, wait);
137 if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
138 return 0;
139 }
140
141 int DRM(context_switch)(drm_device_t *dev, int old, int new)
142 {
143 char buf[64];
144 drm_queue_t *q;
145
146 if (test_and_set_bit(0, &dev->context_flag)) {
147 DRM_ERROR("Reentering -- FIXME\n");
148 return -EBUSY;
149 }
150
151 DRM_DEBUG("Context switch from %d to %d\n", old, new);
152
153 if (new >= dev->queue_count) {
154 clear_bit(0, &dev->context_flag);
155 return -EINVAL;
156 }
157
158 if (new == dev->last_context) {
159 clear_bit(0, &dev->context_flag);
160 return 0;
161 }
162
163 q = dev->queuelist[new];
164 atomic_inc(&q->use_count);
165 if (atomic_read(&q->use_count) == 1) {
166 atomic_dec(&q->use_count);
167 clear_bit(0, &dev->context_flag);
168 return -EINVAL;
169 }
170
171 /* This causes the X server to wake up & do a bunch of hardware
172 * interaction to actually effect the context switch.
173 */
174 sprintf(buf, "C %d %d\n", old, new);
175 DRM(write_string)(dev, buf);
176
177 atomic_dec(&q->use_count);
178
179 return 0;
180 }
181
182 int DRM(context_switch_complete)(drm_device_t *dev, int new)
183 {
184 drm_device_dma_t *dma = dev->dma;
185
186 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
187 dev->last_switch = jiffies;
188
189 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
190 DRM_ERROR("Lock isn't held after context switch\n");
191 }
192
193 if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
194 if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
195 DRM_KERNEL_CONTEXT)) {
196 DRM_ERROR("Cannot free lock\n");
197 }
198 }
199
200 clear_bit(0, &dev->context_flag);
201 wake_up_interruptible(&dev->context_wait);
202
203 return 0;
204 }
205
206 static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
207 {
208 DRM_DEBUG("\n");
209
210 if (atomic_read(&q->use_count) != 1
211 || atomic_read(&q->finalization)
212 || atomic_read(&q->block_count)) {
213 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
214 atomic_read(&q->use_count),
215 atomic_read(&q->finalization),
216 atomic_read(&q->block_count));
217 }
218
219 atomic_set(&q->finalization, 0);
220 atomic_set(&q->block_count, 0);
221 atomic_set(&q->block_read, 0);
222 atomic_set(&q->block_write, 0);
223 atomic_set(&q->total_queued, 0);
224 atomic_set(&q->total_flushed, 0);
225 atomic_set(&q->total_locks, 0);
226
227 init_waitqueue_head(&q->write_queue);
228 init_waitqueue_head(&q->read_queue);
229 init_waitqueue_head(&q->flush_queue);
230
231 q->flags = ctx->flags;
232
233 DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
234
235 return 0;
236 }
237
238
239 /* drm_alloc_queue:
240 PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
241 disappear (so all deallocation must be done after IOCTLs are off)
242 2) dev->queue_count < dev->queue_slots
243 3) dev->queuelist[i].use_count == 0 and
244 dev->queuelist[i].finalization == 0 if i not in use
245 POST: 1) dev->queuelist[i].use_count == 1
246 2) dev->queue_count < dev->queue_slots */
247
248 static int DRM(alloc_queue)(drm_device_t *dev)
249 {
250 int i;
251 drm_queue_t *queue;
252 int oldslots;
253 int newslots;
254 /* Check for a free queue */
255 for (i = 0; i < dev->queue_count; i++) {
256 atomic_inc(&dev->queuelist[i]->use_count);
257 if (atomic_read(&dev->queuelist[i]->use_count) == 1
258 && !atomic_read(&dev->queuelist[i]->finalization)) {
259 DRM_DEBUG("%d (free)\n", i);
260 return i;
261 }
262 atomic_dec(&dev->queuelist[i]->use_count);
263 }
264 /* Allocate a new queue */
265 down(&dev->struct_sem);
266
267 queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
268 memset(queue, 0, sizeof(*queue));
269 atomic_set(&queue->use_count, 1);
270
271 ++dev->queue_count;
272 if (dev->queue_count >= dev->queue_slots) {
273 oldslots = dev->queue_slots * sizeof(*dev->queuelist);
274 if (!dev->queue_slots) dev->queue_slots = 1;
275 dev->queue_slots *= 2;
276 newslots = dev->queue_slots * sizeof(*dev->queuelist);
277
278 dev->queuelist = DRM(realloc)(dev->queuelist,
279 oldslots,
280 newslots,
281 DRM_MEM_QUEUES);
282 if (!dev->queuelist) {
283 up(&dev->struct_sem);
284 DRM_DEBUG("out of memory\n");
285 return -ENOMEM;
286 }
287 }
288 dev->queuelist[dev->queue_count-1] = queue;
289
290 up(&dev->struct_sem);
291 DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
292 return dev->queue_count - 1;
293 }
294
295 int DRM(resctx)(struct inode *inode, struct file *filp,
296 unsigned int cmd, unsigned long arg)
297 {
298 drm_ctx_res_t __user *argp = (void __user *)arg;
299 drm_ctx_res_t res;
300 drm_ctx_t ctx;
301 int i;
302
303 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
304 if (copy_from_user(&res, argp, sizeof(res)))
305 return -EFAULT;
306 if (res.count >= DRM_RESERVED_CONTEXTS) {
307 memset(&ctx, 0, sizeof(ctx));
308 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
309 ctx.handle = i;
310 if (copy_to_user(&res.contexts[i],
311 &i,
312 sizeof(i)))
313 return -EFAULT;
314 }
315 }
316 res.count = DRM_RESERVED_CONTEXTS;
317 if (copy_to_user(argp, &res, sizeof(res)))
318 return -EFAULT;
319 return 0;
320 }
321
322 int DRM(addctx)(struct inode *inode, struct file *filp,
323 unsigned int cmd, unsigned long arg)
324 {
325 drm_file_t *priv = filp->private_data;
326 drm_device_t *dev = priv->dev;
327 drm_ctx_t ctx;
328 drm_ctx_t __user *argp = (void __user *)arg;
329
330 if (copy_from_user(&ctx, argp, sizeof(ctx)))
331 return -EFAULT;
332 if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
333 /* Init kernel's context and get a new one. */
334 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
335 ctx.handle = DRM(alloc_queue)(dev);
336 }
337 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
338 DRM_DEBUG("%d\n", ctx.handle);
339 if (copy_to_user(argp, &ctx, sizeof(ctx)))
340 return -EFAULT;
341 return 0;
342 }
343
344 int DRM(modctx)(struct inode *inode, struct file *filp,
345 unsigned int cmd, unsigned long arg)
346 {
347 drm_file_t *priv = filp->private_data;
348 drm_device_t *dev = priv->dev;
349 drm_ctx_t ctx;
350 drm_queue_t *q;
351
352 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
353 return -EFAULT;
354
355 DRM_DEBUG("%d\n", ctx.handle);
356
357 if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
358 q = dev->queuelist[ctx.handle];
359
360 atomic_inc(&q->use_count);
361 if (atomic_read(&q->use_count) == 1) {
362 /* No longer in use */
363 atomic_dec(&q->use_count);
364 return -EINVAL;
365 }
366
367 if (DRM_BUFCOUNT(&q->waitlist)) {
368 atomic_dec(&q->use_count);
369 return -EBUSY;
370 }
371
372 q->flags = ctx.flags;
373
374 atomic_dec(&q->use_count);
375 return 0;
376 }
377
378 int DRM(getctx)(struct inode *inode, struct file *filp,
379 unsigned int cmd, unsigned long arg)
380 {
381 drm_file_t *priv = filp->private_data;
382 drm_device_t *dev = priv->dev;
383 drm_ctx_t __user *argp = (void __user *)arg;
384 drm_ctx_t ctx;
385 drm_queue_t *q;
386
387 if (copy_from_user(&ctx, argp, sizeof(ctx)))
388 return -EFAULT;
389
390 DRM_DEBUG("%d\n", ctx.handle);
391
392 if (ctx.handle >= dev->queue_count) return -EINVAL;
393 q = dev->queuelist[ctx.handle];
394
395 atomic_inc(&q->use_count);
396 if (atomic_read(&q->use_count) == 1) {
397 /* No longer in use */
398 atomic_dec(&q->use_count);
399 return -EINVAL;
400 }
401
402 ctx.flags = q->flags;
403 atomic_dec(&q->use_count);
404
405 if (copy_to_user(argp, &ctx, sizeof(ctx)))
406 return -EFAULT;
407
408 return 0;
409 }
410
411 int DRM(switchctx)(struct inode *inode, struct file *filp,
412 unsigned int cmd, unsigned long arg)
413 {
414 drm_file_t *priv = filp->private_data;
415 drm_device_t *dev = priv->dev;
416 drm_ctx_t ctx;
417
418 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
419 return -EFAULT;
420 DRM_DEBUG("%d\n", ctx.handle);
421 return DRM(context_switch)(dev, dev->last_context, ctx.handle);
422 }
423
424 int DRM(newctx)(struct inode *inode, struct file *filp,
425 unsigned int cmd, unsigned long arg)
426 {
427 drm_file_t *priv = filp->private_data;
428 drm_device_t *dev = priv->dev;
429 drm_ctx_t ctx;
430
431 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
432 return -EFAULT;
433 DRM_DEBUG("%d\n", ctx.handle);
434 DRM(context_switch_complete)(dev, ctx.handle);
435
436 return 0;
437 }
438
439 int DRM(rmctx)(struct inode *inode, struct file *filp,
440 unsigned int cmd, unsigned long arg)
441 {
442 drm_file_t *priv = filp->private_data;
443 drm_device_t *dev = priv->dev;
444 drm_ctx_t ctx;
445 drm_queue_t *q;
446 drm_buf_t *buf;
447
448 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
449 return -EFAULT;
450 DRM_DEBUG("%d\n", ctx.handle);
451
452 if (ctx.handle >= dev->queue_count) return -EINVAL;
453 q = dev->queuelist[ctx.handle];
454
455 atomic_inc(&q->use_count);
456 if (atomic_read(&q->use_count) == 1) {
457 /* No longer in use */
458 atomic_dec(&q->use_count);
459 return -EINVAL;
460 }
461
462 atomic_inc(&q->finalization); /* Mark queue in finalization state */
463 atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
464 finalization) */
465
466 while (test_and_set_bit(0, &dev->interrupt_flag)) {
467 schedule();
468 if (signal_pending(current)) {
469 clear_bit(0, &dev->interrupt_flag);
470 return -EINTR;
471 }
472 }
473 /* Remove queued buffers */
474 while ((buf = DRM(waitlist_get)(&q->waitlist))) {
475 DRM(free_buffer)(dev, buf);
476 }
477 clear_bit(0, &dev->interrupt_flag);
478
479 /* Wakeup blocked processes */
480 wake_up_interruptible(&q->read_queue);
481 wake_up_interruptible(&q->write_queue);
482 wake_up_interruptible(&q->flush_queue);
483
484 /* Finalization over. Queue is made
485 available when both use_count and
486 finalization become 0, which won't
487 happen until all the waiting processes
488 stop waiting. */
489 atomic_dec(&q->finalization);
490 return 0;
491 }
492