]>
Commit | Line | Data |
---|---|---|
443448d0 DA |
1 | /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro |
2 | * | |
3 | * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the | |
13 | * next paragraph) shall be included in all copies or substantial portions | |
14 | * of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
22 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: | |
25 | * Thomas Hellstrom. | |
26 | * Partially based on code obtained from Digeo Inc. | |
27 | */ | |
28 | ||
29 | ||
30 | /* | |
31 | * Unmaps the DMA mappings. | |
32 | * FIXME: Is this a NoOp on x86? Also | |
33 | * FIXME: What happens if this one is called and a pending blit has previously done | |
34 | * the same DMA mappings? | |
35 | */ | |
36 | ||
37 | #include "drmP.h" | |
38 | #include "via_drm.h" | |
39 | #include "via_drv.h" | |
40 | #include "via_dmablit.h" | |
41 | ||
42 | #include <linux/pagemap.h> | |
43 | ||
d40c8533 DA |
44 | #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) |
45 | #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) | |
46 | #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) | |
443448d0 DA |
47 | |
48 | typedef struct _drm_via_descriptor { | |
49 | uint32_t mem_addr; | |
50 | uint32_t dev_addr; | |
51 | uint32_t size; | |
52 | uint32_t next; | |
53 | } drm_via_descriptor_t; | |
54 | ||
55 | ||
56 | /* | |
57 | * Unmap a DMA mapping. | |
58 | */ | |
59 | ||
60 | ||
61 | ||
62 | static void | |
63 | via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) | |
64 | { | |
65 | int num_desc = vsg->num_desc; | |
66 | unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; | |
67 | unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; | |
68 | drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + | |
69 | descriptor_this_page; | |
70 | dma_addr_t next = vsg->chain_start; | |
71 | ||
72 | while(num_desc--) { | |
73 | if (descriptor_this_page-- == 0) { | |
74 | cur_descriptor_page--; | |
75 | descriptor_this_page = vsg->descriptors_per_page - 1; | |
76 | desc_ptr = vsg->desc_pages[cur_descriptor_page] + | |
77 | descriptor_this_page; | |
78 | } | |
79 | dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); | |
80 | dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); | |
81 | next = (dma_addr_t) desc_ptr->next; | |
82 | desc_ptr--; | |
83 | } | |
84 | } | |
85 | ||
86 | /* | |
87 | * If mode = 0, count how many descriptors are needed. | |
88 | * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. | |
89 | * Descriptors are run in reverse order by the hardware because we are not allowed to update the | |
90 | * 'next' field without syncing calls when the descriptor is already mapped. | |
91 | */ | |
92 | ||
93 | static void | |
94 | via_map_blit_for_device(struct pci_dev *pdev, | |
95 | const drm_via_dmablit_t *xfer, | |
96 | drm_via_sg_info_t *vsg, | |
97 | int mode) | |
98 | { | |
99 | unsigned cur_descriptor_page = 0; | |
100 | unsigned num_descriptors_this_page = 0; | |
101 | unsigned char *mem_addr = xfer->mem_addr; | |
102 | unsigned char *cur_mem; | |
103 | unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); | |
104 | uint32_t fb_addr = xfer->fb_addr; | |
105 | uint32_t cur_fb; | |
106 | unsigned long line_len; | |
107 | unsigned remaining_len; | |
108 | int num_desc = 0; | |
109 | int cur_line; | |
110 | dma_addr_t next = 0 | VIA_DMA_DPR_EC; | |
339363c4 | 111 | drm_via_descriptor_t *desc_ptr = NULL; |
443448d0 DA |
112 | |
113 | if (mode == 1) | |
114 | desc_ptr = vsg->desc_pages[cur_descriptor_page]; | |
115 | ||
116 | for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { | |
117 | ||
118 | line_len = xfer->line_length; | |
119 | cur_fb = fb_addr; | |
120 | cur_mem = mem_addr; | |
121 | ||
122 | while (line_len > 0) { | |
123 | ||
d40c8533 | 124 | remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); |
443448d0 DA |
125 | line_len -= remaining_len; |
126 | ||
127 | if (mode == 1) { | |
d40c8533 | 128 | desc_ptr->mem_addr = |
443448d0 DA |
129 | dma_map_page(&pdev->dev, |
130 | vsg->pages[VIA_PFN(cur_mem) - | |
131 | VIA_PFN(first_addr)], | |
132 | VIA_PGOFF(cur_mem), remaining_len, | |
133 | vsg->direction); | |
d40c8533 | 134 | desc_ptr->dev_addr = cur_fb; |
443448d0 | 135 | |
d40c8533 | 136 | desc_ptr->size = remaining_len; |
443448d0 DA |
137 | desc_ptr->next = (uint32_t) next; |
138 | next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), | |
139 | DMA_TO_DEVICE); | |
140 | desc_ptr++; | |
141 | if (++num_descriptors_this_page >= vsg->descriptors_per_page) { | |
142 | num_descriptors_this_page = 0; | |
143 | desc_ptr = vsg->desc_pages[++cur_descriptor_page]; | |
144 | } | |
145 | } | |
146 | ||
147 | num_desc++; | |
148 | cur_mem += remaining_len; | |
149 | cur_fb += remaining_len; | |
150 | } | |
151 | ||
152 | mem_addr += xfer->mem_stride; | |
153 | fb_addr += xfer->fb_stride; | |
154 | } | |
155 | ||
156 | if (mode == 1) { | |
157 | vsg->chain_start = next; | |
158 | vsg->state = dr_via_device_mapped; | |
159 | } | |
160 | vsg->num_desc = num_desc; | |
161 | } | |
162 | ||
163 | /* | |
164 | * Function that frees up all resources for a blit. It is usable even if the | |
d40c8533 | 165 | * blit info has only been partially built as long as the status enum is consistent |
443448d0 DA |
166 | * with the actual status of the used resources. |
167 | */ | |
168 | ||
169 | ||
ce60fe02 | 170 | static void |
443448d0 DA |
171 | via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) |
172 | { | |
173 | struct page *page; | |
174 | int i; | |
175 | ||
176 | switch(vsg->state) { | |
177 | case dr_via_device_mapped: | |
178 | via_unmap_blit_from_device(pdev, vsg); | |
179 | case dr_via_desc_pages_alloc: | |
180 | for (i=0; i<vsg->num_desc_pages; ++i) { | |
181 | if (vsg->desc_pages[i] != NULL) | |
182 | free_page((unsigned long)vsg->desc_pages[i]); | |
183 | } | |
184 | kfree(vsg->desc_pages); | |
185 | case dr_via_pages_locked: | |
186 | for (i=0; i<vsg->num_pages; ++i) { | |
187 | if ( NULL != (page = vsg->pages[i])) { | |
188 | if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) | |
189 | SetPageDirty(page); | |
190 | page_cache_release(page); | |
191 | } | |
192 | } | |
193 | case dr_via_pages_alloc: | |
194 | vfree(vsg->pages); | |
195 | default: | |
196 | vsg->state = dr_via_sg_init; | |
197 | } | |
198 | if (vsg->bounce_buffer) { | |
199 | vfree(vsg->bounce_buffer); | |
200 | vsg->bounce_buffer = NULL; | |
201 | } | |
202 | vsg->free_on_sequence = 0; | |
203 | } | |
204 | ||
205 | /* | |
206 | * Fire a blit engine. | |
207 | */ | |
208 | ||
209 | static void | |
84b1fd10 | 210 | via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) |
443448d0 DA |
211 | { |
212 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
213 | ||
214 | VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); | |
215 | VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); | |
216 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | | |
217 | VIA_DMA_CSR_DE); | |
218 | VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); | |
219 | VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); | |
220 | VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); | |
76f62551 | 221 | DRM_WRITEMEMORYBARRIER(); |
443448d0 | 222 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); |
76f62551 | 223 | VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); |
443448d0 DA |
224 | } |
225 | ||
226 | /* | |
227 | * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will | |
228 | * occur here if the calling user does not have access to the submitted address. | |
229 | */ | |
230 | ||
231 | static int | |
232 | via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |
233 | { | |
234 | int ret; | |
235 | unsigned long first_pfn = VIA_PFN(xfer->mem_addr); | |
236 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - | |
237 | first_pfn + 1; | |
238 | ||
239 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) | |
240 | return DRM_ERR(ENOMEM); | |
241 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); | |
242 | down_read(¤t->mm->mmap_sem); | |
d40c8533 DA |
243 | ret = get_user_pages(current, current->mm, |
244 | (unsigned long)xfer->mem_addr, | |
245 | vsg->num_pages, | |
246 | (vsg->direction == DMA_FROM_DEVICE), | |
247 | 0, vsg->pages, NULL); | |
443448d0 DA |
248 | |
249 | up_read(¤t->mm->mmap_sem); | |
250 | if (ret != vsg->num_pages) { | |
251 | if (ret < 0) | |
252 | return ret; | |
253 | vsg->state = dr_via_pages_locked; | |
254 | return DRM_ERR(EINVAL); | |
255 | } | |
256 | vsg->state = dr_via_pages_locked; | |
257 | DRM_DEBUG("DMA pages locked\n"); | |
258 | return 0; | |
259 | } | |
260 | ||
261 | /* | |
262 | * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the | |
263 | * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be | |
264 | * quite large for some blits, and pages don't need to be contingous. | |
265 | */ | |
266 | ||
267 | static int | |
268 | via_alloc_desc_pages(drm_via_sg_info_t *vsg) | |
269 | { | |
270 | int i; | |
271 | ||
272 | vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); | |
273 | vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / | |
274 | vsg->descriptors_per_page; | |
275 | ||
276 | if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) | |
277 | return DRM_ERR(ENOMEM); | |
278 | ||
279 | memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); | |
280 | vsg->state = dr_via_desc_pages_alloc; | |
281 | for (i=0; i<vsg->num_desc_pages; ++i) { | |
282 | if (NULL == (vsg->desc_pages[i] = | |
283 | (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) | |
284 | return DRM_ERR(ENOMEM); | |
285 | } | |
286 | DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, | |
287 | vsg->num_desc); | |
288 | return 0; | |
289 | } | |
290 | ||
291 | static void | |
84b1fd10 | 292 | via_abort_dmablit(struct drm_device *dev, int engine) |
443448d0 DA |
293 | { |
294 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
295 | ||
296 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); | |
297 | } | |
298 | ||
299 | static void | |
84b1fd10 | 300 | via_dmablit_engine_off(struct drm_device *dev, int engine) |
443448d0 DA |
301 | { |
302 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
303 | ||
304 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); | |
305 | } | |
306 | ||
307 | ||
308 | ||
309 | /* | |
310 | * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. | |
311 | * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue | |
312 | * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while | |
313 | * the workqueue task takes care of processing associated with the old blit. | |
314 | */ | |
315 | ||
316 | void | |
84b1fd10 | 317 | via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) |
443448d0 DA |
318 | { |
319 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
320 | drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; | |
321 | int cur; | |
322 | int done_transfer; | |
323 | unsigned long irqsave=0; | |
324 | uint32_t status = 0; | |
325 | ||
326 | DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", | |
327 | engine, from_irq, (unsigned long) blitq); | |
328 | ||
329 | if (from_irq) { | |
330 | spin_lock(&blitq->blit_lock); | |
331 | } else { | |
332 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
333 | } | |
334 | ||
335 | done_transfer = blitq->is_active && | |
336 | (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); | |
337 | done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); | |
338 | ||
339 | cur = blitq->cur; | |
340 | if (done_transfer) { | |
341 | ||
342 | blitq->blits[cur]->aborted = blitq->aborting; | |
343 | blitq->done_blit_handle++; | |
344 | DRM_WAKEUP(blitq->blit_queue + cur); | |
345 | ||
346 | cur++; | |
347 | if (cur >= VIA_NUM_BLIT_SLOTS) | |
348 | cur = 0; | |
349 | blitq->cur = cur; | |
350 | ||
351 | /* | |
352 | * Clear transfer done flag. | |
353 | */ | |
354 | ||
355 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); | |
356 | ||
357 | blitq->is_active = 0; | |
358 | blitq->aborting = 0; | |
359 | schedule_work(&blitq->wq); | |
360 | ||
361 | } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { | |
362 | ||
363 | /* | |
364 | * Abort transfer after one second. | |
365 | */ | |
366 | ||
367 | via_abort_dmablit(dev, engine); | |
368 | blitq->aborting = 1; | |
369 | blitq->end = jiffies + DRM_HZ; | |
370 | } | |
371 | ||
372 | if (!blitq->is_active) { | |
373 | if (blitq->num_outstanding) { | |
374 | via_fire_dmablit(dev, blitq->blits[cur], engine); | |
375 | blitq->is_active = 1; | |
376 | blitq->cur = cur; | |
377 | blitq->num_outstanding--; | |
378 | blitq->end = jiffies + DRM_HZ; | |
40565f19 JS |
379 | if (!timer_pending(&blitq->poll_timer)) |
380 | mod_timer(&blitq->poll_timer, jiffies + 1); | |
443448d0 DA |
381 | } else { |
382 | if (timer_pending(&blitq->poll_timer)) { | |
383 | del_timer(&blitq->poll_timer); | |
384 | } | |
385 | via_dmablit_engine_off(dev, engine); | |
386 | } | |
387 | } | |
388 | ||
389 | if (from_irq) { | |
390 | spin_unlock(&blitq->blit_lock); | |
391 | } else { | |
392 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
393 | } | |
394 | } | |
395 | ||
396 | ||
397 | ||
398 | /* | |
399 | * Check whether this blit is still active, performing necessary locking. | |
400 | */ | |
401 | ||
402 | static int | |
403 | via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) | |
404 | { | |
405 | unsigned long irqsave; | |
406 | uint32_t slot; | |
407 | int active; | |
408 | ||
409 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
410 | ||
411 | /* | |
412 | * Allow for handle wraparounds. | |
413 | */ | |
414 | ||
415 | active = ((blitq->done_blit_handle - handle) > (1 << 23)) && | |
416 | ((blitq->cur_blit_handle - handle) <= (1 << 23)); | |
417 | ||
418 | if (queue && active) { | |
419 | slot = handle - blitq->done_blit_handle + blitq->cur -1; | |
420 | if (slot >= VIA_NUM_BLIT_SLOTS) { | |
421 | slot -= VIA_NUM_BLIT_SLOTS; | |
422 | } | |
423 | *queue = blitq->blit_queue + slot; | |
424 | } | |
425 | ||
426 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
427 | ||
428 | return active; | |
429 | } | |
430 | ||
431 | /* | |
432 | * Sync. Wait for at least three seconds for the blit to be performed. | |
433 | */ | |
434 | ||
435 | static int | |
84b1fd10 | 436 | via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) |
443448d0 DA |
437 | { |
438 | ||
439 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
440 | drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; | |
441 | wait_queue_head_t *queue; | |
442 | int ret = 0; | |
443 | ||
444 | if (via_dmablit_active(blitq, engine, handle, &queue)) { | |
445 | DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, | |
446 | !via_dmablit_active(blitq, engine, handle, NULL)); | |
447 | } | |
448 | DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", | |
449 | handle, engine, ret); | |
450 | ||
451 | return ret; | |
452 | } | |
453 | ||
454 | ||
455 | /* | |
456 | * A timer that regularly polls the blit engine in cases where we don't have interrupts: | |
457 | * a) Broken hardware (typically those that don't have any video capture facility). | |
458 | * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. | |
459 | * The timer and hardware IRQ's can and do work in parallel. If the hardware has | |
460 | * irqs, it will shorten the latency somewhat. | |
461 | */ | |
462 | ||
463 | ||
464 | ||
465 | static void | |
466 | via_dmablit_timer(unsigned long data) | |
467 | { | |
468 | drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; | |
84b1fd10 | 469 | struct drm_device *dev = blitq->dev; |
443448d0 DA |
470 | int engine = (int) |
471 | (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); | |
472 | ||
473 | DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, | |
474 | (unsigned long) jiffies); | |
475 | ||
476 | via_dmablit_handler(dev, engine, 0); | |
477 | ||
478 | if (!timer_pending(&blitq->poll_timer)) { | |
40565f19 | 479 | mod_timer(&blitq->poll_timer, jiffies + 1); |
443448d0 | 480 | |
d40c8533 DA |
481 | /* |
482 | * Rerun handler to delete timer if engines are off, and | |
483 | * to shorten abort latency. This is a little nasty. | |
484 | */ | |
485 | ||
486 | via_dmablit_handler(dev, engine, 0); | |
487 | ||
488 | } | |
443448d0 DA |
489 | } |
490 | ||
491 | ||
492 | ||
493 | ||
494 | /* | |
495 | * Workqueue task that frees data and mappings associated with a blit. | |
496 | * Also wakes up waiting processes. Each of these tasks handles one | |
497 | * blit engine only and may not be called on each interrupt. | |
498 | */ | |
499 | ||
500 | ||
501 | static void | |
c4028958 | 502 | via_dmablit_workqueue(struct work_struct *work) |
443448d0 | 503 | { |
c4028958 | 504 | drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); |
84b1fd10 | 505 | struct drm_device *dev = blitq->dev; |
443448d0 DA |
506 | unsigned long irqsave; |
507 | drm_via_sg_info_t *cur_sg; | |
508 | int cur_released; | |
509 | ||
510 | ||
511 | DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) | |
512 | (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); | |
513 | ||
514 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
515 | ||
516 | while(blitq->serviced != blitq->cur) { | |
517 | ||
518 | cur_released = blitq->serviced++; | |
519 | ||
520 | DRM_DEBUG("Releasing blit slot %d\n", cur_released); | |
521 | ||
522 | if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) | |
523 | blitq->serviced = 0; | |
524 | ||
525 | cur_sg = blitq->blits[cur_released]; | |
526 | blitq->num_free++; | |
527 | ||
528 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
529 | ||
530 | DRM_WAKEUP(&blitq->busy_queue); | |
531 | ||
532 | via_free_sg_info(dev->pdev, cur_sg); | |
533 | kfree(cur_sg); | |
534 | ||
535 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
536 | } | |
537 | ||
538 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
539 | } | |
540 | ||
541 | ||
542 | /* | |
543 | * Init all blit engines. Currently we use two, but some hardware have 4. | |
544 | */ | |
545 | ||
546 | ||
547 | void | |
84b1fd10 | 548 | via_init_dmablit(struct drm_device *dev) |
443448d0 DA |
549 | { |
550 | int i,j; | |
551 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
552 | drm_via_blitq_t *blitq; | |
553 | ||
554 | pci_set_master(dev->pdev); | |
555 | ||
556 | for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { | |
557 | blitq = dev_priv->blit_queues + i; | |
558 | blitq->dev = dev; | |
559 | blitq->cur_blit_handle = 0; | |
560 | blitq->done_blit_handle = 0; | |
561 | blitq->head = 0; | |
562 | blitq->cur = 0; | |
563 | blitq->serviced = 0; | |
564 | blitq->num_free = VIA_NUM_BLIT_SLOTS; | |
565 | blitq->num_outstanding = 0; | |
566 | blitq->is_active = 0; | |
567 | blitq->aborting = 0; | |
34af946a | 568 | spin_lock_init(&blitq->blit_lock); |
443448d0 DA |
569 | for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { |
570 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); | |
571 | } | |
572 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); | |
c4028958 | 573 | INIT_WORK(&blitq->wq, via_dmablit_workqueue); |
40565f19 JS |
574 | setup_timer(&blitq->poll_timer, via_dmablit_timer, |
575 | (unsigned long)blitq); | |
443448d0 DA |
576 | } |
577 | } | |
578 | ||
579 | /* | |
580 | * Build all info and do all mappings required for a blit. | |
581 | */ | |
582 | ||
583 | ||
584 | static int | |
84b1fd10 | 585 | via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) |
443448d0 DA |
586 | { |
587 | int draw = xfer->to_fb; | |
588 | int ret = 0; | |
589 | ||
590 | vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | |
339363c4 | 591 | vsg->bounce_buffer = NULL; |
443448d0 DA |
592 | |
593 | vsg->state = dr_via_sg_init; | |
594 | ||
595 | if (xfer->num_lines <= 0 || xfer->line_length <= 0) { | |
596 | DRM_ERROR("Zero size bitblt.\n"); | |
597 | return DRM_ERR(EINVAL); | |
598 | } | |
599 | ||
600 | /* | |
601 | * Below check is a driver limitation, not a hardware one. We | |
602 | * don't want to lock unused pages, and don't want to incoporate the | |
603 | * extra logic of avoiding them. Make sure there are no. | |
604 | * (Not a big limitation anyway.) | |
605 | */ | |
606 | ||
d40c8533 | 607 | if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { |
443448d0 DA |
608 | DRM_ERROR("Too large system memory stride. Stride: %d, " |
609 | "Length: %d\n", xfer->mem_stride, xfer->line_length); | |
610 | return DRM_ERR(EINVAL); | |
611 | } | |
612 | ||
d40c8533 DA |
613 | if ((xfer->mem_stride == xfer->line_length) && |
614 | (xfer->fb_stride == xfer->line_length)) { | |
615 | xfer->mem_stride *= xfer->num_lines; | |
616 | xfer->line_length = xfer->mem_stride; | |
617 | xfer->fb_stride = xfer->mem_stride; | |
618 | xfer->num_lines = 1; | |
619 | } | |
620 | ||
621 | /* | |
622 | * Don't lock an arbitrary large number of pages, since that causes a | |
623 | * DOS security hole. | |
624 | */ | |
625 | ||
626 | if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { | |
627 | DRM_ERROR("Too large PCI DMA bitblt.\n"); | |
443448d0 DA |
628 | return DRM_ERR(EINVAL); |
629 | } | |
630 | ||
631 | /* | |
632 | * we allow a negative fb stride to allow flipping of images in | |
633 | * transfer. | |
634 | */ | |
635 | ||
636 | if (xfer->mem_stride < xfer->line_length || | |
637 | abs(xfer->fb_stride) < xfer->line_length) { | |
638 | DRM_ERROR("Invalid frame-buffer / memory stride.\n"); | |
639 | return DRM_ERR(EINVAL); | |
640 | } | |
641 | ||
642 | /* | |
643 | * A hardware bug seems to be worked around if system memory addresses start on | |
644 | * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted | |
645 | * about this. Meanwhile, impose the following restrictions: | |
646 | */ | |
647 | ||
648 | #ifdef VIA_BUGFREE | |
649 | if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || | |
d40c8533 | 650 | ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { |
443448d0 | 651 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); |
d40c8533 | 652 | return DRM_ERR(EINVAL); |
443448d0 DA |
653 | } |
654 | #else | |
655 | if ((((unsigned long)xfer->mem_addr & 15) || | |
d40c8533 DA |
656 | ((unsigned long)xfer->fb_addr & 3)) || |
657 | ((xfer->num_lines > 1) && | |
658 | ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { | |
443448d0 | 659 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); |
d40c8533 | 660 | return DRM_ERR(EINVAL); |
443448d0 DA |
661 | } |
662 | #endif | |
663 | ||
664 | if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { | |
665 | DRM_ERROR("Could not lock DMA pages.\n"); | |
666 | via_free_sg_info(dev->pdev, vsg); | |
667 | return ret; | |
668 | } | |
669 | ||
670 | via_map_blit_for_device(dev->pdev, xfer, vsg, 0); | |
671 | if (0 != (ret = via_alloc_desc_pages(vsg))) { | |
672 | DRM_ERROR("Could not allocate DMA descriptor pages.\n"); | |
673 | via_free_sg_info(dev->pdev, vsg); | |
674 | return ret; | |
675 | } | |
676 | via_map_blit_for_device(dev->pdev, xfer, vsg, 1); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | ||
681 | ||
682 | /* | |
683 | * Reserve one free slot in the blit queue. Will wait for one second for one | |
684 | * to become available. Otherwise -EBUSY is returned. | |
685 | */ | |
686 | ||
687 | static int | |
688 | via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) | |
689 | { | |
690 | int ret=0; | |
691 | unsigned long irqsave; | |
692 | ||
693 | DRM_DEBUG("Num free is %d\n", blitq->num_free); | |
694 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
695 | while(blitq->num_free == 0) { | |
696 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
697 | ||
698 | DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); | |
699 | if (ret) { | |
700 | return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; | |
701 | } | |
702 | ||
703 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
704 | } | |
705 | ||
706 | blitq->num_free--; | |
707 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
712 | /* | |
713 | * Hand back a free slot if we changed our mind. | |
714 | */ | |
715 | ||
716 | static void | |
717 | via_dmablit_release_slot(drm_via_blitq_t *blitq) | |
718 | { | |
719 | unsigned long irqsave; | |
720 | ||
721 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
722 | blitq->num_free++; | |
723 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
724 | DRM_WAKEUP( &blitq->busy_queue ); | |
725 | } | |
726 | ||
727 | /* | |
728 | * Grab a free slot. Build blit info and queue a blit. | |
729 | */ | |
730 | ||
731 | ||
732 | static int | |
84b1fd10 | 733 | via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) |
443448d0 DA |
734 | { |
735 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | |
736 | drm_via_sg_info_t *vsg; | |
737 | drm_via_blitq_t *blitq; | |
d40c8533 | 738 | int ret; |
443448d0 DA |
739 | int engine; |
740 | unsigned long irqsave; | |
741 | ||
742 | if (dev_priv == NULL) { | |
743 | DRM_ERROR("Called without initialization.\n"); | |
744 | return DRM_ERR(EINVAL); | |
745 | } | |
746 | ||
747 | engine = (xfer->to_fb) ? 0 : 1; | |
748 | blitq = dev_priv->blit_queues + engine; | |
749 | if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { | |
750 | return ret; | |
751 | } | |
752 | if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { | |
753 | via_dmablit_release_slot(blitq); | |
754 | return DRM_ERR(ENOMEM); | |
755 | } | |
756 | if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { | |
757 | via_dmablit_release_slot(blitq); | |
758 | kfree(vsg); | |
759 | return ret; | |
760 | } | |
761 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | |
762 | ||
763 | blitq->blits[blitq->head++] = vsg; | |
764 | if (blitq->head >= VIA_NUM_BLIT_SLOTS) | |
765 | blitq->head = 0; | |
766 | blitq->num_outstanding++; | |
767 | xfer->sync.sync_handle = ++blitq->cur_blit_handle; | |
768 | ||
769 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | |
770 | xfer->sync.engine = engine; | |
771 | ||
772 | via_dmablit_handler(dev, engine, 0); | |
773 | ||
774 | return 0; | |
775 | } | |
776 | ||
777 | /* | |
778 | * Sync on a previously submitted blit. Note that the X server use signals extensively, and | |
d40c8533 | 779 | * that there is a very big probability that this IOCTL will be interrupted by a signal. In that |
443448d0 DA |
780 | * case it returns with -EAGAIN for the signal to be delivered. |
781 | * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). | |
782 | */ | |
783 | ||
784 | int | |
785 | via_dma_blit_sync( DRM_IOCTL_ARGS ) | |
786 | { | |
787 | drm_via_blitsync_t sync; | |
788 | int err; | |
789 | DRM_DEVICE; | |
790 | ||
791 | DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); | |
792 | ||
793 | if (sync.engine >= VIA_NUM_BLIT_ENGINES) | |
794 | return DRM_ERR(EINVAL); | |
795 | ||
796 | err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); | |
797 | ||
798 | if (DRM_ERR(EINTR) == err) | |
799 | err = DRM_ERR(EAGAIN); | |
800 | ||
801 | return err; | |
802 | } | |
803 | ||
804 | ||
805 | /* | |
806 | * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal | |
807 | * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should | |
808 | * be reissued. See the above IOCTL code. | |
809 | */ | |
810 | ||
811 | int | |
812 | via_dma_blit( DRM_IOCTL_ARGS ) | |
813 | { | |
814 | drm_via_dmablit_t xfer; | |
815 | int err; | |
816 | DRM_DEVICE; | |
817 | ||
818 | DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); | |
819 | ||
820 | err = via_dmablit(dev, &xfer); | |
821 | ||
822 | DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); | |
823 | ||
824 | return err; | |
825 | } |