]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/drm_irq.c
Linux 2.6.28
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_irq.c
1 /**
2 * \file drm_irq.c
3 * IRQ support
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include "drmP.h"
37
38 #include <linux/interrupt.h> /* For task queue support */
39
40 /**
41 * Get interrupt from bus id.
42 *
43 * \param inode device inode.
44 * \param file_priv DRM file private.
45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
48 *
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
52 */
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
55 {
56 struct drm_irq_busid *p = data;
57
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
59 return -EINVAL;
60
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64 return -EINVAL;
65
66 p->irq = dev->pdev->irq;
67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq);
70
71 return 0;
72 }
73
74 static void vblank_disable_fn(unsigned long arg)
75 {
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
78 int i;
79
80 if (!dev->vblank_disable_allowed)
81 return;
82
83 for (i = 0; i < dev->num_crtcs; i++) {
84 spin_lock_irqsave(&dev->vbl_lock, irqflags);
85 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
86 dev->vblank_enabled[i]) {
87 DRM_DEBUG("disabling vblank on crtc %d\n", i);
88 dev->last_vblank[i] =
89 dev->driver->get_vblank_counter(dev, i);
90 dev->driver->disable_vblank(dev, i);
91 dev->vblank_enabled[i] = 0;
92 }
93 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
94 }
95 }
96
97 void drm_vblank_cleanup(struct drm_device *dev)
98 {
99 /* Bail if the driver didn't call drm_vblank_init() */
100 if (dev->num_crtcs == 0)
101 return;
102
103 del_timer(&dev->vblank_disable_timer);
104
105 vblank_disable_fn((unsigned long)dev);
106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
108 DRM_MEM_DRIVER);
109 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
110 DRM_MEM_DRIVER);
111 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
112 dev->num_crtcs, DRM_MEM_DRIVER);
113 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
116 dev->num_crtcs, DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
118 DRM_MEM_DRIVER);
119 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
120 dev->num_crtcs, DRM_MEM_DRIVER);
121
122 dev->num_crtcs = 0;
123 }
124
125 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
126 {
127 int i, ret = -ENOMEM;
128
129 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
130 (unsigned long)dev);
131 spin_lock_init(&dev->vbl_lock);
132 atomic_set(&dev->vbl_signal_pending, 0);
133 dev->num_crtcs = num_crtcs;
134
135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
136 DRM_MEM_DRIVER);
137 if (!dev->vbl_queue)
138 goto err;
139
140 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
141 DRM_MEM_DRIVER);
142 if (!dev->vbl_sigs)
143 goto err;
144
145 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
146 DRM_MEM_DRIVER);
147 if (!dev->_vblank_count)
148 goto err;
149
150 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
151 DRM_MEM_DRIVER);
152 if (!dev->vblank_refcount)
153 goto err;
154
155 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
156 DRM_MEM_DRIVER);
157 if (!dev->vblank_enabled)
158 goto err;
159
160 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
161 if (!dev->last_vblank)
162 goto err;
163
164 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
165 DRM_MEM_DRIVER);
166 if (!dev->vblank_inmodeset)
167 goto err;
168
169 /* Zero per-crtc vblank stuff */
170 for (i = 0; i < num_crtcs; i++) {
171 init_waitqueue_head(&dev->vbl_queue[i]);
172 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
173 atomic_set(&dev->_vblank_count[i], 0);
174 atomic_set(&dev->vblank_refcount[i], 0);
175 }
176
177 dev->vblank_disable_allowed = 0;
178
179 return 0;
180
181 err:
182 drm_vblank_cleanup(dev);
183 return ret;
184 }
185 EXPORT_SYMBOL(drm_vblank_init);
186
187 /**
188 * Install IRQ handler.
189 *
190 * \param dev DRM device.
191 *
192 * Initializes the IRQ related data. Installs the handler, calling the driver
193 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
194 * before and after the installation.
195 */
196 int drm_irq_install(struct drm_device *dev)
197 {
198 int ret = 0;
199 unsigned long sh_flags = 0;
200
201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
202 return -EINVAL;
203
204 if (dev->pdev->irq == 0)
205 return -EINVAL;
206
207 mutex_lock(&dev->struct_mutex);
208
209 /* Driver must have been initialized */
210 if (!dev->dev_private) {
211 mutex_unlock(&dev->struct_mutex);
212 return -EINVAL;
213 }
214
215 if (dev->irq_enabled) {
216 mutex_unlock(&dev->struct_mutex);
217 return -EBUSY;
218 }
219 dev->irq_enabled = 1;
220 mutex_unlock(&dev->struct_mutex);
221
222 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
223
224 /* Before installing handler */
225 dev->driver->irq_preinstall(dev);
226
227 /* Install handler */
228 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
229 sh_flags = IRQF_SHARED;
230
231 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
232 sh_flags, dev->devname, dev);
233
234 if (ret < 0) {
235 mutex_lock(&dev->struct_mutex);
236 dev->irq_enabled = 0;
237 mutex_unlock(&dev->struct_mutex);
238 return ret;
239 }
240
241 /* After installing handler */
242 ret = dev->driver->irq_postinstall(dev);
243 if (ret < 0) {
244 mutex_lock(&dev->struct_mutex);
245 dev->irq_enabled = 0;
246 mutex_unlock(&dev->struct_mutex);
247 }
248
249 return ret;
250 }
251 EXPORT_SYMBOL(drm_irq_install);
252
253 /**
254 * Uninstall the IRQ handler.
255 *
256 * \param dev DRM device.
257 *
258 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
259 */
260 int drm_irq_uninstall(struct drm_device * dev)
261 {
262 int irq_enabled;
263
264 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
265 return -EINVAL;
266
267 mutex_lock(&dev->struct_mutex);
268 irq_enabled = dev->irq_enabled;
269 dev->irq_enabled = 0;
270 mutex_unlock(&dev->struct_mutex);
271
272 if (!irq_enabled)
273 return -EINVAL;
274
275 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
276
277 dev->driver->irq_uninstall(dev);
278
279 free_irq(dev->pdev->irq, dev);
280
281 return 0;
282 }
283 EXPORT_SYMBOL(drm_irq_uninstall);
284
285 /**
286 * IRQ control ioctl.
287 *
288 * \param inode device inode.
289 * \param file_priv DRM file private.
290 * \param cmd command.
291 * \param arg user argument, pointing to a drm_control structure.
292 * \return zero on success or a negative number on failure.
293 *
294 * Calls irq_install() or irq_uninstall() according to \p arg.
295 */
296 int drm_control(struct drm_device *dev, void *data,
297 struct drm_file *file_priv)
298 {
299 struct drm_control *ctl = data;
300
301 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
302
303
304 switch (ctl->func) {
305 case DRM_INST_HANDLER:
306 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
307 return 0;
308 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
309 ctl->irq != dev->pdev->irq)
310 return -EINVAL;
311 return drm_irq_install(dev);
312 case DRM_UNINST_HANDLER:
313 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
314 return 0;
315 return drm_irq_uninstall(dev);
316 default:
317 return -EINVAL;
318 }
319 }
320
321 /**
322 * drm_vblank_count - retrieve "cooked" vblank counter value
323 * @dev: DRM device
324 * @crtc: which counter to retrieve
325 *
326 * Fetches the "cooked" vblank count value that represents the number of
327 * vblank events since the system was booted, including lost events due to
328 * modesetting activity.
329 */
330 u32 drm_vblank_count(struct drm_device *dev, int crtc)
331 {
332 return atomic_read(&dev->_vblank_count[crtc]);
333 }
334 EXPORT_SYMBOL(drm_vblank_count);
335
336 /**
337 * drm_update_vblank_count - update the master vblank counter
338 * @dev: DRM device
339 * @crtc: counter to update
340 *
341 * Call back into the driver to update the appropriate vblank counter
342 * (specified by @crtc). Deal with wraparound, if it occurred, and
343 * update the last read value so we can deal with wraparound on the next
344 * call if necessary.
345 *
346 * Only necessary when going from off->on, to account for frames we
347 * didn't get an interrupt for.
348 *
349 * Note: caller must hold dev->vbl_lock since this reads & writes
350 * device vblank fields.
351 */
352 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
353 {
354 u32 cur_vblank, diff;
355
356 /*
357 * Interrupts were disabled prior to this call, so deal with counter
358 * wrap if needed.
359 * NOTE! It's possible we lost a full dev->max_vblank_count events
360 * here if the register is small or we had vblank interrupts off for
361 * a long time.
362 */
363 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
364 diff = cur_vblank - dev->last_vblank[crtc];
365 if (cur_vblank < dev->last_vblank[crtc]) {
366 diff += dev->max_vblank_count;
367
368 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
369 crtc, dev->last_vblank[crtc], cur_vblank, diff);
370 }
371
372 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
373 crtc, diff);
374
375 atomic_add(diff, &dev->_vblank_count[crtc]);
376 }
377
378 /**
379 * drm_vblank_get - get a reference count on vblank events
380 * @dev: DRM device
381 * @crtc: which CRTC to own
382 *
383 * Acquire a reference count on vblank events to avoid having them disabled
384 * while in use.
385 *
386 * RETURNS
387 * Zero on success, nonzero on failure.
388 */
389 int drm_vblank_get(struct drm_device *dev, int crtc)
390 {
391 unsigned long irqflags;
392 int ret = 0;
393
394 spin_lock_irqsave(&dev->vbl_lock, irqflags);
395 /* Going from 0->1 means we have to enable interrupts again */
396 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
397 !dev->vblank_enabled[crtc]) {
398 ret = dev->driver->enable_vblank(dev, crtc);
399 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
400 if (ret)
401 atomic_dec(&dev->vblank_refcount[crtc]);
402 else {
403 dev->vblank_enabled[crtc] = 1;
404 drm_update_vblank_count(dev, crtc);
405 }
406 }
407 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
408
409 return ret;
410 }
411 EXPORT_SYMBOL(drm_vblank_get);
412
413 /**
414 * drm_vblank_put - give up ownership of vblank events
415 * @dev: DRM device
416 * @crtc: which counter to give up
417 *
418 * Release ownership of a given vblank counter, turning off interrupts
419 * if possible.
420 */
421 void drm_vblank_put(struct drm_device *dev, int crtc)
422 {
423 /* Last user schedules interrupt disable */
424 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
425 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
426 }
427 EXPORT_SYMBOL(drm_vblank_put);
428
429 /**
430 * drm_modeset_ctl - handle vblank event counter changes across mode switch
431 * @DRM_IOCTL_ARGS: standard ioctl arguments
432 *
433 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
434 * ioctls around modesetting so that any lost vblank events are accounted for.
435 *
436 * Generally the counter will reset across mode sets. If interrupts are
437 * enabled around this call, we don't have to do anything since the counter
438 * will have already been incremented.
439 */
440 int drm_modeset_ctl(struct drm_device *dev, void *data,
441 struct drm_file *file_priv)
442 {
443 struct drm_modeset_ctl *modeset = data;
444 unsigned long irqflags;
445 int crtc, ret = 0;
446
447 /* If drm_vblank_init() hasn't been called yet, just no-op */
448 if (!dev->num_crtcs)
449 goto out;
450
451 crtc = modeset->crtc;
452 if (crtc >= dev->num_crtcs) {
453 ret = -EINVAL;
454 goto out;
455 }
456
457 /*
458 * To avoid all the problems that might happen if interrupts
459 * were enabled/disabled around or between these calls, we just
460 * have the kernel take a reference on the CRTC (just once though
461 * to avoid corrupting the count if multiple, mismatch calls occur),
462 * so that interrupts remain enabled in the interim.
463 */
464 switch (modeset->cmd) {
465 case _DRM_PRE_MODESET:
466 if (!dev->vblank_inmodeset[crtc]) {
467 dev->vblank_inmodeset[crtc] = 1;
468 drm_vblank_get(dev, crtc);
469 }
470 break;
471 case _DRM_POST_MODESET:
472 if (dev->vblank_inmodeset[crtc]) {
473 spin_lock_irqsave(&dev->vbl_lock, irqflags);
474 dev->vblank_disable_allowed = 1;
475 dev->vblank_inmodeset[crtc] = 0;
476 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
477 drm_vblank_put(dev, crtc);
478 }
479 break;
480 default:
481 ret = -EINVAL;
482 break;
483 }
484
485 out:
486 return ret;
487 }
488
489 /**
490 * Wait for VBLANK.
491 *
492 * \param inode device inode.
493 * \param file_priv DRM file private.
494 * \param cmd command.
495 * \param data user argument, pointing to a drm_wait_vblank structure.
496 * \return zero on success or a negative number on failure.
497 *
498 * Verifies the IRQ is installed.
499 *
500 * If a signal is requested checks if this task has already scheduled the same signal
501 * for the same vblank sequence number - nothing to be done in
502 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
503 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
504 * task.
505 *
506 * If a signal is not requested, then calls vblank_wait().
507 */
508 int drm_wait_vblank(struct drm_device *dev, void *data,
509 struct drm_file *file_priv)
510 {
511 union drm_wait_vblank *vblwait = data;
512 int ret = 0;
513 unsigned int flags, seq, crtc;
514
515 if ((!dev->pdev->irq) || (!dev->irq_enabled))
516 return -EINVAL;
517
518 if (vblwait->request.type &
519 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
520 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
521 vblwait->request.type,
522 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
523 return -EINVAL;
524 }
525
526 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
527 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
528
529 if (crtc >= dev->num_crtcs)
530 return -EINVAL;
531
532 ret = drm_vblank_get(dev, crtc);
533 if (ret) {
534 DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
535 return ret;
536 }
537 seq = drm_vblank_count(dev, crtc);
538
539 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
540 case _DRM_VBLANK_RELATIVE:
541 vblwait->request.sequence += seq;
542 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
543 case _DRM_VBLANK_ABSOLUTE:
544 break;
545 default:
546 ret = -EINVAL;
547 goto done;
548 }
549
550 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
551 (seq - vblwait->request.sequence) <= (1<<23)) {
552 vblwait->request.sequence = seq + 1;
553 }
554
555 if (flags & _DRM_VBLANK_SIGNAL) {
556 unsigned long irqflags;
557 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
558 struct drm_vbl_sig *vbl_sig;
559
560 spin_lock_irqsave(&dev->vbl_lock, irqflags);
561
562 /* Check if this task has already scheduled the same signal
563 * for the same vblank sequence number; nothing to be done in
564 * that case
565 */
566 list_for_each_entry(vbl_sig, vbl_sigs, head) {
567 if (vbl_sig->sequence == vblwait->request.sequence
568 && vbl_sig->info.si_signo ==
569 vblwait->request.signal
570 && vbl_sig->task == current) {
571 spin_unlock_irqrestore(&dev->vbl_lock,
572 irqflags);
573 vblwait->reply.sequence = seq;
574 goto done;
575 }
576 }
577
578 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
579 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
580 ret = -EBUSY;
581 goto done;
582 }
583
584 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
585
586 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
587 DRM_MEM_DRIVER);
588 if (!vbl_sig) {
589 ret = -ENOMEM;
590 goto done;
591 }
592
593 /* Get a refcount on the vblank, which will be released by
594 * drm_vbl_send_signals().
595 */
596 ret = drm_vblank_get(dev, crtc);
597 if (ret) {
598 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
599 DRM_MEM_DRIVER);
600 goto done;
601 }
602
603 atomic_inc(&dev->vbl_signal_pending);
604
605 vbl_sig->sequence = vblwait->request.sequence;
606 vbl_sig->info.si_signo = vblwait->request.signal;
607 vbl_sig->task = current;
608
609 spin_lock_irqsave(&dev->vbl_lock, irqflags);
610
611 list_add_tail(&vbl_sig->head, vbl_sigs);
612
613 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
614
615 vblwait->reply.sequence = seq;
616 } else {
617 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
618 vblwait->request.sequence, crtc);
619 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
620 ((drm_vblank_count(dev, crtc)
621 - vblwait->request.sequence) <= (1 << 23)));
622
623 if (ret != -EINTR) {
624 struct timeval now;
625
626 do_gettimeofday(&now);
627
628 vblwait->reply.tval_sec = now.tv_sec;
629 vblwait->reply.tval_usec = now.tv_usec;
630 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
631 DRM_DEBUG("returning %d to client\n",
632 vblwait->reply.sequence);
633 } else {
634 DRM_DEBUG("vblank wait interrupted by signal\n");
635 }
636 }
637
638 done:
639 drm_vblank_put(dev, crtc);
640 return ret;
641 }
642
643 /**
644 * Send the VBLANK signals.
645 *
646 * \param dev DRM device.
647 * \param crtc CRTC where the vblank event occurred
648 *
649 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
650 *
651 * If a signal is not requested, then calls vblank_wait().
652 */
653 static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
654 {
655 struct drm_vbl_sig *vbl_sig, *tmp;
656 struct list_head *vbl_sigs;
657 unsigned int vbl_seq;
658 unsigned long flags;
659
660 spin_lock_irqsave(&dev->vbl_lock, flags);
661
662 vbl_sigs = &dev->vbl_sigs[crtc];
663 vbl_seq = drm_vblank_count(dev, crtc);
664
665 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
666 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
667 vbl_sig->info.si_code = vbl_seq;
668 send_sig_info(vbl_sig->info.si_signo,
669 &vbl_sig->info, vbl_sig->task);
670
671 list_del(&vbl_sig->head);
672
673 drm_free(vbl_sig, sizeof(*vbl_sig),
674 DRM_MEM_DRIVER);
675 atomic_dec(&dev->vbl_signal_pending);
676 drm_vblank_put(dev, crtc);
677 }
678 }
679
680 spin_unlock_irqrestore(&dev->vbl_lock, flags);
681 }
682
683 /**
684 * drm_handle_vblank - handle a vblank event
685 * @dev: DRM device
686 * @crtc: where this event occurred
687 *
688 * Drivers should call this routine in their vblank interrupt handlers to
689 * update the vblank counter and send any signals that may be pending.
690 */
691 void drm_handle_vblank(struct drm_device *dev, int crtc)
692 {
693 atomic_inc(&dev->_vblank_count[crtc]);
694 DRM_WAKEUP(&dev->vbl_queue[crtc]);
695 drm_vbl_send_signals(dev, crtc);
696 }
697 EXPORT_SYMBOL(drm_handle_vblank);