]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*- |
2 | * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com | |
3 | * | |
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | |
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
6 | * All Rights Reserved. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the "Software"), | |
10 | * to deal in the Software without restriction, including without limitation | |
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
12 | * and/or sell copies of the Software, and to permit persons to whom the | |
13 | * Software is furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the next | |
16 | * paragraph) shall be included in all copies or substantial portions of the | |
17 | * Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
25 | * DEALINGS IN THE SOFTWARE. | |
6795c985 DA |
26 | */ |
27 | ||
28 | /** | |
29 | * \file mga_dma.c | |
30 | * DMA support for MGA G200 / G400. | |
b5e89ed5 | 31 | * |
6795c985 DA |
32 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
33 | * \author Jeff Hartmann <jhartmann@valinux.com> | |
34 | * \author Keith Whitwell <keith@tungstengraphics.com> | |
35 | * \author Gareth Hughes <gareth@valinux.com> | |
1da177e4 LT |
36 | */ |
37 | ||
38 | #include "drmP.h" | |
39 | #include "drm.h" | |
6795c985 | 40 | #include "drm_sarea.h" |
1da177e4 LT |
41 | #include "mga_drm.h" |
42 | #include "mga_drv.h" | |
43 | ||
44 | #define MGA_DEFAULT_USEC_TIMEOUT 10000 | |
45 | #define MGA_FREELIST_DEBUG 0 | |
46 | ||
7ccf800e DA |
47 | #define MINIMAL_CLEANUP 0 |
48 | #define FULL_CLEANUP 1 | |
eddca551 | 49 | static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); |
1da177e4 LT |
50 | |
51 | /* ================================================================ | |
52 | * Engine control | |
53 | */ | |
54 | ||
b5e89ed5 | 55 | int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) |
1da177e4 LT |
56 | { |
57 | u32 status = 0; | |
58 | int i; | |
b5e89ed5 | 59 | DRM_DEBUG("\n"); |
1da177e4 | 60 | |
b5e89ed5 DA |
61 | for (i = 0; i < dev_priv->usec_timeout; i++) { |
62 | status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; | |
63 | if (status == MGA_ENDPRDMASTS) { | |
64 | MGA_WRITE8(MGA_CRTC_INDEX, 0); | |
1da177e4 LT |
65 | return 0; |
66 | } | |
b5e89ed5 | 67 | DRM_UDELAY(1); |
1da177e4 LT |
68 | } |
69 | ||
70 | #if MGA_DMA_DEBUG | |
b5e89ed5 DA |
71 | DRM_ERROR("failed!\n"); |
72 | DRM_INFO(" status=0x%08x\n", status); | |
1da177e4 | 73 | #endif |
20caafa6 | 74 | return -EBUSY; |
1da177e4 LT |
75 | } |
76 | ||
b5e89ed5 | 77 | static int mga_do_dma_reset(drm_mga_private_t * dev_priv) |
1da177e4 LT |
78 | { |
79 | drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; | |
80 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | |
81 | ||
b5e89ed5 | 82 | DRM_DEBUG("\n"); |
1da177e4 LT |
83 | |
84 | /* The primary DMA stream should look like new right about now. | |
85 | */ | |
86 | primary->tail = 0; | |
87 | primary->space = primary->size; | |
88 | primary->last_flush = 0; | |
89 | ||
90 | sarea_priv->last_wrap = 0; | |
91 | ||
92 | /* FIXME: Reset counters, buffer ages etc... | |
93 | */ | |
94 | ||
95 | /* FIXME: What else do we need to reinitialize? WARP stuff? | |
96 | */ | |
97 | ||
98 | return 0; | |
99 | } | |
100 | ||
101 | /* ================================================================ | |
102 | * Primary DMA stream | |
103 | */ | |
104 | ||
b5e89ed5 | 105 | void mga_do_dma_flush(drm_mga_private_t * dev_priv) |
1da177e4 LT |
106 | { |
107 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | |
108 | u32 head, tail; | |
109 | u32 status = 0; | |
110 | int i; | |
b5e89ed5 DA |
111 | DMA_LOCALS; |
112 | DRM_DEBUG("\n"); | |
113 | ||
114 | /* We need to wait so that we can do an safe flush */ | |
115 | for (i = 0; i < dev_priv->usec_timeout; i++) { | |
116 | status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; | |
117 | if (status == MGA_ENDPRDMASTS) | |
118 | break; | |
119 | DRM_UDELAY(1); | |
1da177e4 LT |
120 | } |
121 | ||
b5e89ed5 DA |
122 | if (primary->tail == primary->last_flush) { |
123 | DRM_DEBUG(" bailing out...\n"); | |
1da177e4 LT |
124 | return; |
125 | } | |
126 | ||
127 | tail = primary->tail + dev_priv->primary->offset; | |
128 | ||
129 | /* We need to pad the stream between flushes, as the card | |
130 | * actually (partially?) reads the first of these commands. | |
131 | * See page 4-16 in the G400 manual, middle of the page or so. | |
132 | */ | |
b5e89ed5 | 133 | BEGIN_DMA(1); |
1da177e4 | 134 | |
b5e89ed5 DA |
135 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, |
136 | MGA_DMAPAD, 0x00000000, | |
137 | MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); | |
1da177e4 LT |
138 | |
139 | ADVANCE_DMA(); | |
140 | ||
141 | primary->last_flush = primary->tail; | |
142 | ||
b5e89ed5 | 143 | head = MGA_READ(MGA_PRIMADDRESS); |
1da177e4 | 144 | |
b5e89ed5 | 145 | if (head <= tail) { |
1da177e4 LT |
146 | primary->space = primary->size - primary->tail; |
147 | } else { | |
148 | primary->space = head - tail; | |
149 | } | |
150 | ||
b5e89ed5 DA |
151 | DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); |
152 | DRM_DEBUG(" tail = 0x%06lx\n", tail - dev_priv->primary->offset); | |
153 | DRM_DEBUG(" space = 0x%06x\n", primary->space); | |
1da177e4 LT |
154 | |
155 | mga_flush_write_combine(); | |
6795c985 | 156 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); |
1da177e4 | 157 | |
b5e89ed5 | 158 | DRM_DEBUG("done.\n"); |
1da177e4 LT |
159 | } |
160 | ||
b5e89ed5 | 161 | void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) |
1da177e4 LT |
162 | { |
163 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | |
164 | u32 head, tail; | |
165 | DMA_LOCALS; | |
b5e89ed5 | 166 | DRM_DEBUG("\n"); |
1da177e4 LT |
167 | |
168 | BEGIN_DMA_WRAP(); | |
169 | ||
b5e89ed5 DA |
170 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, |
171 | MGA_DMAPAD, 0x00000000, | |
172 | MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); | |
1da177e4 LT |
173 | |
174 | ADVANCE_DMA(); | |
175 | ||
176 | tail = primary->tail + dev_priv->primary->offset; | |
177 | ||
178 | primary->tail = 0; | |
179 | primary->last_flush = 0; | |
180 | primary->last_wrap++; | |
181 | ||
b5e89ed5 | 182 | head = MGA_READ(MGA_PRIMADDRESS); |
1da177e4 | 183 | |
b5e89ed5 | 184 | if (head == dev_priv->primary->offset) { |
1da177e4 LT |
185 | primary->space = primary->size; |
186 | } else { | |
187 | primary->space = head - dev_priv->primary->offset; | |
188 | } | |
189 | ||
b5e89ed5 DA |
190 | DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); |
191 | DRM_DEBUG(" tail = 0x%06x\n", primary->tail); | |
192 | DRM_DEBUG(" wrap = %d\n", primary->last_wrap); | |
193 | DRM_DEBUG(" space = 0x%06x\n", primary->space); | |
1da177e4 LT |
194 | |
195 | mga_flush_write_combine(); | |
6795c985 | 196 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); |
1da177e4 | 197 | |
b5e89ed5 DA |
198 | set_bit(0, &primary->wrapped); |
199 | DRM_DEBUG("done.\n"); | |
1da177e4 LT |
200 | } |
201 | ||
b5e89ed5 | 202 | void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) |
1da177e4 LT |
203 | { |
204 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | |
205 | drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; | |
206 | u32 head = dev_priv->primary->offset; | |
b5e89ed5 | 207 | DRM_DEBUG("\n"); |
1da177e4 LT |
208 | |
209 | sarea_priv->last_wrap++; | |
b5e89ed5 | 210 | DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap); |
1da177e4 LT |
211 | |
212 | mga_flush_write_combine(); | |
b5e89ed5 | 213 | MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); |
1da177e4 | 214 | |
b5e89ed5 DA |
215 | clear_bit(0, &primary->wrapped); |
216 | DRM_DEBUG("done.\n"); | |
1da177e4 LT |
217 | } |
218 | ||
1da177e4 LT |
219 | /* ================================================================ |
220 | * Freelist management | |
221 | */ | |
222 | ||
223 | #define MGA_BUFFER_USED ~0 | |
224 | #define MGA_BUFFER_FREE 0 | |
225 | ||
226 | #if MGA_FREELIST_DEBUG | |
eddca551 | 227 | static void mga_freelist_print(struct drm_device * dev) |
1da177e4 LT |
228 | { |
229 | drm_mga_private_t *dev_priv = dev->dev_private; | |
230 | drm_mga_freelist_t *entry; | |
231 | ||
b5e89ed5 DA |
232 | DRM_INFO("\n"); |
233 | DRM_INFO("current dispatch: last=0x%x done=0x%x\n", | |
234 | dev_priv->sarea_priv->last_dispatch, | |
235 | (unsigned int)(MGA_READ(MGA_PRIMADDRESS) - | |
236 | dev_priv->primary->offset)); | |
237 | DRM_INFO("current freelist:\n"); | |
238 | ||
239 | for (entry = dev_priv->head->next; entry; entry = entry->next) { | |
240 | DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n", | |
241 | entry, entry->buf->idx, entry->age.head, | |
242 | entry->age.head - dev_priv->primary->offset); | |
1da177e4 | 243 | } |
b5e89ed5 | 244 | DRM_INFO("\n"); |
1da177e4 LT |
245 | } |
246 | #endif | |
247 | ||
eddca551 | 248 | static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) |
1da177e4 | 249 | { |
cdd55a29 | 250 | struct drm_device_dma *dma = dev->dma; |
056219e2 | 251 | struct drm_buf *buf; |
1da177e4 LT |
252 | drm_mga_buf_priv_t *buf_priv; |
253 | drm_mga_freelist_t *entry; | |
254 | int i; | |
b5e89ed5 | 255 | DRM_DEBUG("count=%d\n", dma->buf_count); |
1da177e4 | 256 | |
b5e89ed5 DA |
257 | dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); |
258 | if (dev_priv->head == NULL) | |
20caafa6 | 259 | return -ENOMEM; |
1da177e4 | 260 | |
b5e89ed5 DA |
261 | memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); |
262 | SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); | |
1da177e4 | 263 | |
b5e89ed5 | 264 | for (i = 0; i < dma->buf_count; i++) { |
1da177e4 | 265 | buf = dma->buflist[i]; |
b5e89ed5 | 266 | buf_priv = buf->dev_private; |
1da177e4 | 267 | |
b5e89ed5 DA |
268 | entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); |
269 | if (entry == NULL) | |
20caafa6 | 270 | return -ENOMEM; |
1da177e4 | 271 | |
b5e89ed5 | 272 | memset(entry, 0, sizeof(drm_mga_freelist_t)); |
1da177e4 LT |
273 | |
274 | entry->next = dev_priv->head->next; | |
275 | entry->prev = dev_priv->head; | |
b5e89ed5 | 276 | SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); |
1da177e4 LT |
277 | entry->buf = buf; |
278 | ||
b5e89ed5 | 279 | if (dev_priv->head->next != NULL) |
1da177e4 | 280 | dev_priv->head->next->prev = entry; |
b5e89ed5 | 281 | if (entry->next == NULL) |
1da177e4 LT |
282 | dev_priv->tail = entry; |
283 | ||
284 | buf_priv->list_entry = entry; | |
285 | buf_priv->discard = 0; | |
286 | buf_priv->dispatched = 0; | |
287 | ||
288 | dev_priv->head->next = entry; | |
289 | } | |
290 | ||
291 | return 0; | |
292 | } | |
293 | ||
eddca551 | 294 | static void mga_freelist_cleanup(struct drm_device * dev) |
1da177e4 LT |
295 | { |
296 | drm_mga_private_t *dev_priv = dev->dev_private; | |
297 | drm_mga_freelist_t *entry; | |
298 | drm_mga_freelist_t *next; | |
b5e89ed5 | 299 | DRM_DEBUG("\n"); |
1da177e4 LT |
300 | |
301 | entry = dev_priv->head; | |
b5e89ed5 | 302 | while (entry) { |
1da177e4 | 303 | next = entry->next; |
b5e89ed5 | 304 | drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); |
1da177e4 LT |
305 | entry = next; |
306 | } | |
307 | ||
308 | dev_priv->head = dev_priv->tail = NULL; | |
309 | } | |
310 | ||
311 | #if 0 | |
312 | /* FIXME: Still needed? | |
313 | */ | |
eddca551 | 314 | static void mga_freelist_reset(struct drm_device * dev) |
1da177e4 | 315 | { |
cdd55a29 | 316 | struct drm_device_dma *dma = dev->dma; |
056219e2 | 317 | struct drm_buf *buf; |
1da177e4 LT |
318 | drm_mga_buf_priv_t *buf_priv; |
319 | int i; | |
320 | ||
b5e89ed5 | 321 | for (i = 0; i < dma->buf_count; i++) { |
1da177e4 | 322 | buf = dma->buflist[i]; |
b5e89ed5 DA |
323 | buf_priv = buf->dev_private; |
324 | SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0); | |
1da177e4 LT |
325 | } |
326 | } | |
327 | #endif | |
328 | ||
056219e2 | 329 | static struct drm_buf *mga_freelist_get(struct drm_device * dev) |
1da177e4 LT |
330 | { |
331 | drm_mga_private_t *dev_priv = dev->dev_private; | |
332 | drm_mga_freelist_t *next; | |
333 | drm_mga_freelist_t *prev; | |
334 | drm_mga_freelist_t *tail = dev_priv->tail; | |
335 | u32 head, wrap; | |
b5e89ed5 | 336 | DRM_DEBUG("\n"); |
1da177e4 | 337 | |
b5e89ed5 | 338 | head = MGA_READ(MGA_PRIMADDRESS); |
1da177e4 LT |
339 | wrap = dev_priv->sarea_priv->last_wrap; |
340 | ||
b5e89ed5 DA |
341 | DRM_DEBUG(" tail=0x%06lx %d\n", |
342 | tail->age.head ? | |
343 | tail->age.head - dev_priv->primary->offset : 0, | |
344 | tail->age.wrap); | |
345 | DRM_DEBUG(" head=0x%06lx %d\n", | |
346 | head - dev_priv->primary->offset, wrap); | |
1da177e4 | 347 | |
b5e89ed5 | 348 | if (TEST_AGE(&tail->age, head, wrap)) { |
1da177e4 LT |
349 | prev = dev_priv->tail->prev; |
350 | next = dev_priv->tail; | |
351 | prev->next = NULL; | |
352 | next->prev = next->next = NULL; | |
353 | dev_priv->tail = prev; | |
b5e89ed5 | 354 | SET_AGE(&next->age, MGA_BUFFER_USED, 0); |
1da177e4 LT |
355 | return next->buf; |
356 | } | |
357 | ||
b5e89ed5 | 358 | DRM_DEBUG("returning NULL!\n"); |
1da177e4 LT |
359 | return NULL; |
360 | } | |
361 | ||
056219e2 | 362 | int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) |
1da177e4 LT |
363 | { |
364 | drm_mga_private_t *dev_priv = dev->dev_private; | |
365 | drm_mga_buf_priv_t *buf_priv = buf->dev_private; | |
366 | drm_mga_freelist_t *head, *entry, *prev; | |
367 | ||
b5e89ed5 DA |
368 | DRM_DEBUG("age=0x%06lx wrap=%d\n", |
369 | buf_priv->list_entry->age.head - | |
370 | dev_priv->primary->offset, buf_priv->list_entry->age.wrap); | |
1da177e4 LT |
371 | |
372 | entry = buf_priv->list_entry; | |
373 | head = dev_priv->head; | |
374 | ||
b5e89ed5 DA |
375 | if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { |
376 | SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); | |
1da177e4 LT |
377 | prev = dev_priv->tail; |
378 | prev->next = entry; | |
379 | entry->prev = prev; | |
380 | entry->next = NULL; | |
381 | } else { | |
382 | prev = head->next; | |
383 | head->next = entry; | |
384 | prev->prev = entry; | |
385 | entry->prev = head; | |
386 | entry->next = prev; | |
387 | } | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
1da177e4 LT |
392 | /* ================================================================ |
393 | * DMA initialization, cleanup | |
394 | */ | |
395 | ||
eddca551 | 396 | int mga_driver_load(struct drm_device * dev, unsigned long flags) |
6795c985 | 397 | { |
b5e89ed5 | 398 | drm_mga_private_t *dev_priv; |
6795c985 DA |
399 | |
400 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | |
401 | if (!dev_priv) | |
20caafa6 | 402 | return -ENOMEM; |
6795c985 DA |
403 | |
404 | dev->dev_private = (void *)dev_priv; | |
405 | memset(dev_priv, 0, sizeof(drm_mga_private_t)); | |
406 | ||
407 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | |
408 | dev_priv->chipset = flags; | |
409 | ||
22eae947 DA |
410 | dev_priv->mmio_base = drm_get_resource_start(dev, 1); |
411 | dev_priv->mmio_size = drm_get_resource_len(dev, 1); | |
412 | ||
413 | dev->counters += 3; | |
414 | dev->types[6] = _DRM_STAT_IRQ; | |
415 | dev->types[7] = _DRM_STAT_PRIMARY; | |
416 | dev->types[8] = _DRM_STAT_SECONDARY; | |
417 | ||
6795c985 DA |
418 | return 0; |
419 | } | |
420 | ||
908f9c48 | 421 | #if __OS_HAS_AGP |
6795c985 DA |
422 | /** |
423 | * Bootstrap the driver for AGP DMA. | |
b5e89ed5 | 424 | * |
6795c985 DA |
425 | * \todo |
426 | * Investigate whether there is any benifit to storing the WARP microcode in | |
427 | * AGP memory. If not, the microcode may as well always be put in PCI | |
428 | * memory. | |
429 | * | |
430 | * \todo | |
431 | * This routine needs to set dma_bs->agp_mode to the mode actually configured | |
432 | * in the hardware. Looking just at the Linux AGP driver code, I don't see | |
433 | * an easy way to determine this. | |
434 | * | |
435 | * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap | |
436 | */ | |
eddca551 | 437 | static int mga_do_agp_dma_bootstrap(struct drm_device * dev, |
6795c985 DA |
438 | drm_mga_dma_bootstrap_t * dma_bs) |
439 | { | |
b5e89ed5 DA |
440 | drm_mga_private_t *const dev_priv = |
441 | (drm_mga_private_t *) dev->dev_private; | |
11909d64 | 442 | unsigned int warp_size = mga_warp_microcode_size(dev_priv); |
6795c985 | 443 | int err; |
b5e89ed5 | 444 | unsigned offset; |
6795c985 | 445 | const unsigned secondary_size = dma_bs->secondary_bin_count |
b5e89ed5 | 446 | * dma_bs->secondary_bin_size; |
6795c985 | 447 | const unsigned agp_size = (dma_bs->agp_size << 20); |
eddca551 DA |
448 | struct drm_buf_desc req; |
449 | struct drm_agp_mode mode; | |
450 | struct drm_agp_info info; | |
451 | struct drm_agp_buffer agp_req; | |
452 | struct drm_agp_binding bind_req; | |
6795c985 | 453 | |
6795c985 DA |
454 | /* Acquire AGP. */ |
455 | err = drm_agp_acquire(dev); | |
456 | if (err) { | |
7ccf800e | 457 | DRM_ERROR("Unable to acquire AGP: %d\n", err); |
6795c985 DA |
458 | return err; |
459 | } | |
460 | ||
461 | err = drm_agp_info(dev, &info); | |
462 | if (err) { | |
7ccf800e | 463 | DRM_ERROR("Unable to get AGP info: %d\n", err); |
6795c985 DA |
464 | return err; |
465 | } | |
466 | ||
467 | mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; | |
468 | err = drm_agp_enable(dev, mode); | |
469 | if (err) { | |
470 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); | |
471 | return err; | |
472 | } | |
473 | ||
6795c985 DA |
474 | /* In addition to the usual AGP mode configuration, the G200 AGP cards |
475 | * need to have the AGP mode "manually" set. | |
476 | */ | |
477 | ||
478 | if (dev_priv->chipset == MGA_CARD_TYPE_G200) { | |
479 | if (mode.mode & 0x02) { | |
480 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); | |
b5e89ed5 | 481 | } else { |
6795c985 DA |
482 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); |
483 | } | |
484 | } | |
485 | ||
6795c985 | 486 | /* Allocate and bind AGP memory. */ |
7ccf800e DA |
487 | agp_req.size = agp_size; |
488 | agp_req.type = 0; | |
489 | err = drm_agp_alloc(dev, &agp_req); | |
490 | if (err) { | |
491 | dev_priv->agp_size = 0; | |
6795c985 DA |
492 | DRM_ERROR("Unable to allocate %uMB AGP memory\n", |
493 | dma_bs->agp_size); | |
7ccf800e | 494 | return err; |
6795c985 | 495 | } |
bc5f4523 | 496 | |
7ccf800e DA |
497 | dev_priv->agp_size = agp_size; |
498 | dev_priv->agp_handle = agp_req.handle; | |
b5e89ed5 | 499 | |
7ccf800e DA |
500 | bind_req.handle = agp_req.handle; |
501 | bind_req.offset = 0; | |
502 | err = drm_agp_bind(dev, &bind_req); | |
6795c985 | 503 | if (err) { |
7ccf800e | 504 | DRM_ERROR("Unable to bind AGP memory: %d\n", err); |
6795c985 DA |
505 | return err; |
506 | } | |
507 | ||
11909d64 DA |
508 | /* Make drm_addbufs happy by not trying to create a mapping for less |
509 | * than a page. | |
510 | */ | |
511 | if (warp_size < PAGE_SIZE) | |
512 | warp_size = PAGE_SIZE; | |
513 | ||
6795c985 | 514 | offset = 0; |
b5e89ed5 DA |
515 | err = drm_addmap(dev, offset, warp_size, |
516 | _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); | |
6795c985 | 517 | if (err) { |
7ccf800e | 518 | DRM_ERROR("Unable to map WARP microcode: %d\n", err); |
6795c985 DA |
519 | return err; |
520 | } | |
521 | ||
522 | offset += warp_size; | |
b5e89ed5 DA |
523 | err = drm_addmap(dev, offset, dma_bs->primary_size, |
524 | _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); | |
6795c985 | 525 | if (err) { |
7ccf800e | 526 | DRM_ERROR("Unable to map primary DMA region: %d\n", err); |
6795c985 DA |
527 | return err; |
528 | } | |
529 | ||
530 | offset += dma_bs->primary_size; | |
b5e89ed5 DA |
531 | err = drm_addmap(dev, offset, secondary_size, |
532 | _DRM_AGP, 0, &dev->agp_buffer_map); | |
6795c985 | 533 | if (err) { |
7ccf800e | 534 | DRM_ERROR("Unable to map secondary DMA region: %d\n", err); |
6795c985 DA |
535 | return err; |
536 | } | |
537 | ||
b5e89ed5 | 538 | (void)memset(&req, 0, sizeof(req)); |
6795c985 DA |
539 | req.count = dma_bs->secondary_bin_count; |
540 | req.size = dma_bs->secondary_bin_size; | |
541 | req.flags = _DRM_AGP_BUFFER; | |
542 | req.agp_start = offset; | |
543 | ||
b5e89ed5 | 544 | err = drm_addbufs_agp(dev, &req); |
6795c985 | 545 | if (err) { |
7ccf800e | 546 | DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); |
6795c985 DA |
547 | return err; |
548 | } | |
549 | ||
7ccf800e | 550 | { |
55910517 | 551 | struct drm_map_list *_entry; |
7ccf800e | 552 | unsigned long agp_token = 0; |
bc5f4523 | 553 | |
bd1b331f | 554 | list_for_each_entry(_entry, &dev->maplist, head) { |
7ccf800e DA |
555 | if (_entry->map == dev->agp_buffer_map) |
556 | agp_token = _entry->user_token; | |
557 | } | |
558 | if (!agp_token) | |
559 | return -EFAULT; | |
560 | ||
561 | dev->agp_buffer_token = agp_token; | |
562 | } | |
563 | ||
6795c985 | 564 | offset += secondary_size; |
b5e89ed5 DA |
565 | err = drm_addmap(dev, offset, agp_size - offset, |
566 | _DRM_AGP, 0, &dev_priv->agp_textures); | |
6795c985 | 567 | if (err) { |
7ccf800e | 568 | DRM_ERROR("Unable to map AGP texture region %d\n", err); |
6795c985 DA |
569 | return err; |
570 | } | |
571 | ||
572 | drm_core_ioremap(dev_priv->warp, dev); | |
573 | drm_core_ioremap(dev_priv->primary, dev); | |
574 | drm_core_ioremap(dev->agp_buffer_map, dev); | |
575 | ||
576 | if (!dev_priv->warp->handle || | |
577 | !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { | |
578 | DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", | |
579 | dev_priv->warp->handle, dev_priv->primary->handle, | |
580 | dev->agp_buffer_map->handle); | |
20caafa6 | 581 | return -ENOMEM; |
6795c985 DA |
582 | } |
583 | ||
584 | dev_priv->dma_access = MGA_PAGPXFER; | |
585 | dev_priv->wagp_enable = MGA_WAGP_ENABLE; | |
586 | ||
587 | DRM_INFO("Initialized card for AGP DMA.\n"); | |
588 | return 0; | |
589 | } | |
908f9c48 | 590 | #else |
eddca551 | 591 | static int mga_do_agp_dma_bootstrap(struct drm_device * dev, |
908f9c48 DA |
592 | drm_mga_dma_bootstrap_t * dma_bs) |
593 | { | |
594 | return -EINVAL; | |
595 | } | |
596 | #endif | |
6795c985 DA |
597 | |
598 | /** | |
599 | * Bootstrap the driver for PCI DMA. | |
b5e89ed5 | 600 | * |
6795c985 DA |
601 | * \todo |
602 | * The algorithm for decreasing the size of the primary DMA buffer could be | |
603 | * better. The size should be rounded up to the nearest page size, then | |
604 | * decrease the request size by a single page each pass through the loop. | |
605 | * | |
606 | * \todo | |
607 | * Determine whether the maximum address passed to drm_pci_alloc is correct. | |
608 | * The same goes for drm_addbufs_pci. | |
b5e89ed5 | 609 | * |
6795c985 DA |
610 | * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap |
611 | */ | |
eddca551 | 612 | static int mga_do_pci_dma_bootstrap(struct drm_device * dev, |
6795c985 DA |
613 | drm_mga_dma_bootstrap_t * dma_bs) |
614 | { | |
b5e89ed5 DA |
615 | drm_mga_private_t *const dev_priv = |
616 | (drm_mga_private_t *) dev->dev_private; | |
11909d64 | 617 | unsigned int warp_size = mga_warp_microcode_size(dev_priv); |
6795c985 DA |
618 | unsigned int primary_size; |
619 | unsigned int bin_count; | |
620 | int err; | |
eddca551 | 621 | struct drm_buf_desc req; |
6795c985 | 622 | |
6795c985 DA |
623 | if (dev->dma == NULL) { |
624 | DRM_ERROR("dev->dma is NULL\n"); | |
20caafa6 | 625 | return -EFAULT; |
6795c985 DA |
626 | } |
627 | ||
11909d64 DA |
628 | /* Make drm_addbufs happy by not trying to create a mapping for less |
629 | * than a page. | |
630 | */ | |
631 | if (warp_size < PAGE_SIZE) | |
632 | warp_size = PAGE_SIZE; | |
633 | ||
6795c985 DA |
634 | /* The proper alignment is 0x100 for this mapping */ |
635 | err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, | |
636 | _DRM_READ_ONLY, &dev_priv->warp); | |
637 | if (err != 0) { | |
7ccf800e DA |
638 | DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", |
639 | err); | |
6795c985 DA |
640 | return err; |
641 | } | |
642 | ||
643 | /* Other than the bottom two bits being used to encode other | |
644 | * information, there don't appear to be any restrictions on the | |
645 | * alignment of the primary or secondary DMA buffers. | |
646 | */ | |
647 | ||
b5e89ed5 DA |
648 | for (primary_size = dma_bs->primary_size; primary_size != 0; |
649 | primary_size >>= 1) { | |
6795c985 DA |
650 | /* The proper alignment for this mapping is 0x04 */ |
651 | err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, | |
652 | _DRM_READ_ONLY, &dev_priv->primary); | |
653 | if (!err) | |
654 | break; | |
655 | } | |
656 | ||
657 | if (err != 0) { | |
7ccf800e | 658 | DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); |
20caafa6 | 659 | return -ENOMEM; |
6795c985 DA |
660 | } |
661 | ||
662 | if (dev_priv->primary->size != dma_bs->primary_size) { | |
663 | DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", | |
b5e89ed5 DA |
664 | dma_bs->primary_size, |
665 | (unsigned)dev_priv->primary->size); | |
6795c985 DA |
666 | dma_bs->primary_size = dev_priv->primary->size; |
667 | } | |
668 | ||
b5e89ed5 DA |
669 | for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; |
670 | bin_count--) { | |
671 | (void)memset(&req, 0, sizeof(req)); | |
6795c985 DA |
672 | req.count = bin_count; |
673 | req.size = dma_bs->secondary_bin_size; | |
674 | ||
b5e89ed5 | 675 | err = drm_addbufs_pci(dev, &req); |
6795c985 DA |
676 | if (!err) { |
677 | break; | |
678 | } | |
679 | } | |
b5e89ed5 | 680 | |
6795c985 | 681 | if (bin_count == 0) { |
7ccf800e | 682 | DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); |
6795c985 DA |
683 | return err; |
684 | } | |
685 | ||
686 | if (bin_count != dma_bs->secondary_bin_count) { | |
687 | DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " | |
688 | "to %u.\n", dma_bs->secondary_bin_count, bin_count); | |
689 | ||
690 | dma_bs->secondary_bin_count = bin_count; | |
691 | } | |
692 | ||
693 | dev_priv->dma_access = 0; | |
694 | dev_priv->wagp_enable = 0; | |
695 | ||
696 | dma_bs->agp_mode = 0; | |
697 | ||
698 | DRM_INFO("Initialized card for PCI DMA.\n"); | |
699 | return 0; | |
700 | } | |
701 | ||
eddca551 | 702 | static int mga_do_dma_bootstrap(struct drm_device * dev, |
6795c985 DA |
703 | drm_mga_dma_bootstrap_t * dma_bs) |
704 | { | |
705 | const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); | |
706 | int err; | |
b5e89ed5 DA |
707 | drm_mga_private_t *const dev_priv = |
708 | (drm_mga_private_t *) dev->dev_private; | |
6795c985 DA |
709 | |
710 | dev_priv->used_new_dma_init = 1; | |
711 | ||
712 | /* The first steps are the same for both PCI and AGP based DMA. Map | |
713 | * the cards MMIO registers and map a status page. | |
714 | */ | |
b5e89ed5 DA |
715 | err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, |
716 | _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); | |
6795c985 | 717 | if (err) { |
7ccf800e | 718 | DRM_ERROR("Unable to map MMIO region: %d\n", err); |
6795c985 DA |
719 | return err; |
720 | } | |
721 | ||
b5e89ed5 DA |
722 | err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, |
723 | _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, | |
724 | &dev_priv->status); | |
6795c985 | 725 | if (err) { |
7ccf800e | 726 | DRM_ERROR("Unable to map status region: %d\n", err); |
6795c985 DA |
727 | return err; |
728 | } | |
729 | ||
6795c985 DA |
730 | /* The DMA initialization procedure is slightly different for PCI and |
731 | * AGP cards. AGP cards just allocate a large block of AGP memory and | |
732 | * carve off portions of it for internal uses. The remaining memory | |
733 | * is returned to user-mode to be used for AGP textures. | |
734 | */ | |
6795c985 DA |
735 | if (is_agp) { |
736 | err = mga_do_agp_dma_bootstrap(dev, dma_bs); | |
737 | } | |
b5e89ed5 | 738 | |
6795c985 DA |
739 | /* If we attempted to initialize the card for AGP DMA but failed, |
740 | * clean-up any mess that may have been created. | |
741 | */ | |
742 | ||
743 | if (err) { | |
7ccf800e | 744 | mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); |
6795c985 DA |
745 | } |
746 | ||
6795c985 DA |
747 | /* Not only do we want to try and initialized PCI cards for PCI DMA, |
748 | * but we also try to initialized AGP cards that could not be | |
749 | * initialized for AGP DMA. This covers the case where we have an AGP | |
750 | * card in a system with an unsupported AGP chipset. In that case the | |
751 | * card will be detected as AGP, but we won't be able to allocate any | |
752 | * AGP memory, etc. | |
753 | */ | |
754 | ||
755 | if (!is_agp || err) { | |
756 | err = mga_do_pci_dma_bootstrap(dev, dma_bs); | |
757 | } | |
758 | ||
6795c985 DA |
759 | return err; |
760 | } | |
761 | ||
c153f45f EA |
762 | int mga_dma_bootstrap(struct drm_device *dev, void *data, |
763 | struct drm_file *file_priv) | |
6795c985 | 764 | { |
c153f45f | 765 | drm_mga_dma_bootstrap_t *bootstrap = data; |
6795c985 | 766 | int err; |
7ccf800e DA |
767 | static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; |
768 | const drm_mga_private_t *const dev_priv = | |
769 | (drm_mga_private_t *) dev->dev_private; | |
6795c985 | 770 | |
c153f45f | 771 | err = mga_do_dma_bootstrap(dev, bootstrap); |
7ccf800e DA |
772 | if (err) { |
773 | mga_do_cleanup_dma(dev, FULL_CLEANUP); | |
774 | return err; | |
775 | } | |
6795c985 | 776 | |
7ccf800e | 777 | if (dev_priv->agp_textures != NULL) { |
c153f45f EA |
778 | bootstrap->texture_handle = dev_priv->agp_textures->offset; |
779 | bootstrap->texture_size = dev_priv->agp_textures->size; | |
b5e89ed5 | 780 | } else { |
c153f45f EA |
781 | bootstrap->texture_handle = 0; |
782 | bootstrap->texture_size = 0; | |
6795c985 DA |
783 | } |
784 | ||
c153f45f | 785 | bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; |
7ccf800e | 786 | |
6795c985 DA |
787 | return err; |
788 | } | |
789 | ||
eddca551 | 790 | static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) |
1da177e4 LT |
791 | { |
792 | drm_mga_private_t *dev_priv; | |
793 | int ret; | |
b5e89ed5 | 794 | DRM_DEBUG("\n"); |
1da177e4 | 795 | |
6795c985 | 796 | dev_priv = dev->dev_private; |
1da177e4 | 797 | |
6795c985 | 798 | if (init->sgram) { |
1da177e4 LT |
799 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; |
800 | } else { | |
801 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; | |
802 | } | |
b5e89ed5 | 803 | dev_priv->maccess = init->maccess; |
1da177e4 | 804 | |
b5e89ed5 DA |
805 | dev_priv->fb_cpp = init->fb_cpp; |
806 | dev_priv->front_offset = init->front_offset; | |
807 | dev_priv->front_pitch = init->front_pitch; | |
808 | dev_priv->back_offset = init->back_offset; | |
809 | dev_priv->back_pitch = init->back_pitch; | |
1da177e4 | 810 | |
b5e89ed5 DA |
811 | dev_priv->depth_cpp = init->depth_cpp; |
812 | dev_priv->depth_offset = init->depth_offset; | |
813 | dev_priv->depth_pitch = init->depth_pitch; | |
1da177e4 LT |
814 | |
815 | /* FIXME: Need to support AGP textures... | |
816 | */ | |
817 | dev_priv->texture_offset = init->texture_offset[0]; | |
818 | dev_priv->texture_size = init->texture_size[0]; | |
819 | ||
da509d7a | 820 | dev_priv->sarea = drm_getsarea(dev); |
6795c985 DA |
821 | if (!dev_priv->sarea) { |
822 | DRM_ERROR("failed to find sarea!\n"); | |
20caafa6 | 823 | return -EINVAL; |
1da177e4 LT |
824 | } |
825 | ||
b5e89ed5 | 826 | if (!dev_priv->used_new_dma_init) { |
11909d64 DA |
827 | |
828 | dev_priv->dma_access = MGA_PAGPXFER; | |
829 | dev_priv->wagp_enable = MGA_WAGP_ENABLE; | |
830 | ||
6795c985 DA |
831 | dev_priv->status = drm_core_findmap(dev, init->status_offset); |
832 | if (!dev_priv->status) { | |
833 | DRM_ERROR("failed to find status page!\n"); | |
20caafa6 | 834 | return -EINVAL; |
6795c985 DA |
835 | } |
836 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); | |
837 | if (!dev_priv->mmio) { | |
838 | DRM_ERROR("failed to find mmio region!\n"); | |
20caafa6 | 839 | return -EINVAL; |
6795c985 DA |
840 | } |
841 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); | |
842 | if (!dev_priv->warp) { | |
843 | DRM_ERROR("failed to find warp microcode region!\n"); | |
20caafa6 | 844 | return -EINVAL; |
6795c985 DA |
845 | } |
846 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); | |
847 | if (!dev_priv->primary) { | |
848 | DRM_ERROR("failed to find primary dma region!\n"); | |
20caafa6 | 849 | return -EINVAL; |
6795c985 | 850 | } |
d1f2b55a | 851 | dev->agp_buffer_token = init->buffers_offset; |
b5e89ed5 DA |
852 | dev->agp_buffer_map = |
853 | drm_core_findmap(dev, init->buffers_offset); | |
6795c985 DA |
854 | if (!dev->agp_buffer_map) { |
855 | DRM_ERROR("failed to find dma buffer region!\n"); | |
20caafa6 | 856 | return -EINVAL; |
6795c985 DA |
857 | } |
858 | ||
859 | drm_core_ioremap(dev_priv->warp, dev); | |
860 | drm_core_ioremap(dev_priv->primary, dev); | |
861 | drm_core_ioremap(dev->agp_buffer_map, dev); | |
1da177e4 LT |
862 | } |
863 | ||
864 | dev_priv->sarea_priv = | |
b5e89ed5 DA |
865 | (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + |
866 | init->sarea_priv_offset); | |
1da177e4 | 867 | |
6795c985 DA |
868 | if (!dev_priv->warp->handle || |
869 | !dev_priv->primary->handle || | |
870 | ((dev_priv->dma_access != 0) && | |
871 | ((dev->agp_buffer_map == NULL) || | |
872 | (dev->agp_buffer_map->handle == NULL)))) { | |
873 | DRM_ERROR("failed to ioremap agp regions!\n"); | |
20caafa6 | 874 | return -ENOMEM; |
1da177e4 LT |
875 | } |
876 | ||
6795c985 DA |
877 | ret = mga_warp_install_microcode(dev_priv); |
878 | if (ret < 0) { | |
7ccf800e | 879 | DRM_ERROR("failed to install WARP ucode!: %d\n", ret); |
1da177e4 LT |
880 | return ret; |
881 | } | |
882 | ||
6795c985 DA |
883 | ret = mga_warp_init(dev_priv); |
884 | if (ret < 0) { | |
7ccf800e | 885 | DRM_ERROR("failed to init WARP engine!: %d\n", ret); |
1da177e4 LT |
886 | return ret; |
887 | } | |
888 | ||
b5e89ed5 | 889 | dev_priv->prim.status = (u32 *) dev_priv->status->handle; |
1da177e4 | 890 | |
b5e89ed5 | 891 | mga_do_wait_for_idle(dev_priv); |
1da177e4 LT |
892 | |
893 | /* Init the primary DMA registers. | |
894 | */ | |
b5e89ed5 | 895 | MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); |
1da177e4 | 896 | #if 0 |
b5e89ed5 DA |
897 | MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */ |
898 | MGA_PRIMPTREN1); /* DWGSYNC */ | |
1da177e4 LT |
899 | #endif |
900 | ||
b5e89ed5 DA |
901 | dev_priv->prim.start = (u8 *) dev_priv->primary->handle; |
902 | dev_priv->prim.end = ((u8 *) dev_priv->primary->handle | |
1da177e4 LT |
903 | + dev_priv->primary->size); |
904 | dev_priv->prim.size = dev_priv->primary->size; | |
905 | ||
906 | dev_priv->prim.tail = 0; | |
907 | dev_priv->prim.space = dev_priv->prim.size; | |
908 | dev_priv->prim.wrapped = 0; | |
909 | ||
910 | dev_priv->prim.last_flush = 0; | |
911 | dev_priv->prim.last_wrap = 0; | |
912 | ||
913 | dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; | |
914 | ||
915 | dev_priv->prim.status[0] = dev_priv->primary->offset; | |
916 | dev_priv->prim.status[1] = 0; | |
917 | ||
918 | dev_priv->sarea_priv->last_wrap = 0; | |
919 | dev_priv->sarea_priv->last_frame.head = 0; | |
920 | dev_priv->sarea_priv->last_frame.wrap = 0; | |
921 | ||
6795c985 DA |
922 | if (mga_freelist_init(dev, dev_priv) < 0) { |
923 | DRM_ERROR("could not initialize freelist\n"); | |
20caafa6 | 924 | return -ENOMEM; |
1da177e4 LT |
925 | } |
926 | ||
1da177e4 LT |
927 | return 0; |
928 | } | |
929 | ||
eddca551 | 930 | static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) |
1da177e4 | 931 | { |
6795c985 DA |
932 | int err = 0; |
933 | DRM_DEBUG("\n"); | |
1da177e4 LT |
934 | |
935 | /* Make sure interrupts are disabled here because the uninstall ioctl | |
936 | * may not have been called from userspace and after dev_private | |
937 | * is freed, it's too late. | |
938 | */ | |
b5e89ed5 DA |
939 | if (dev->irq_enabled) |
940 | drm_irq_uninstall(dev); | |
1da177e4 | 941 | |
b5e89ed5 | 942 | if (dev->dev_private) { |
1da177e4 LT |
943 | drm_mga_private_t *dev_priv = dev->dev_private; |
944 | ||
b5e89ed5 | 945 | if ((dev_priv->warp != NULL) |
11909d64 | 946 | && (dev_priv->warp->type != _DRM_CONSISTENT)) |
6795c985 DA |
947 | drm_core_ioremapfree(dev_priv->warp, dev); |
948 | ||
b5e89ed5 | 949 | if ((dev_priv->primary != NULL) |
6795c985 DA |
950 | && (dev_priv->primary->type != _DRM_CONSISTENT)) |
951 | drm_core_ioremapfree(dev_priv->primary, dev); | |
1da177e4 | 952 | |
6795c985 DA |
953 | if (dev->agp_buffer_map != NULL) |
954 | drm_core_ioremapfree(dev->agp_buffer_map, dev); | |
955 | ||
956 | if (dev_priv->used_new_dma_init) { | |
908f9c48 | 957 | #if __OS_HAS_AGP |
7ccf800e | 958 | if (dev_priv->agp_handle != 0) { |
eddca551 DA |
959 | struct drm_agp_binding unbind_req; |
960 | struct drm_agp_buffer free_req; | |
6795c985 | 961 | |
7ccf800e DA |
962 | unbind_req.handle = dev_priv->agp_handle; |
963 | drm_agp_unbind(dev, &unbind_req); | |
964 | ||
965 | free_req.handle = dev_priv->agp_handle; | |
966 | drm_agp_free(dev, &free_req); | |
bc5f4523 | 967 | |
7ccf800e DA |
968 | dev_priv->agp_textures = NULL; |
969 | dev_priv->agp_size = 0; | |
970 | dev_priv->agp_handle = 0; | |
6795c985 DA |
971 | } |
972 | ||
973 | if ((dev->agp != NULL) && dev->agp->acquired) { | |
974 | err = drm_agp_release(dev); | |
975 | } | |
908f9c48 | 976 | #endif |
1da177e4 LT |
977 | } |
978 | ||
6795c985 DA |
979 | dev_priv->warp = NULL; |
980 | dev_priv->primary = NULL; | |
6795c985 DA |
981 | dev_priv->sarea = NULL; |
982 | dev_priv->sarea_priv = NULL; | |
983 | dev->agp_buffer_map = NULL; | |
984 | ||
7ccf800e DA |
985 | if (full_cleanup) { |
986 | dev_priv->mmio = NULL; | |
987 | dev_priv->status = NULL; | |
988 | dev_priv->used_new_dma_init = 0; | |
989 | } | |
990 | ||
6795c985 DA |
991 | memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); |
992 | dev_priv->warp_pipe = 0; | |
b5e89ed5 DA |
993 | memset(dev_priv->warp_pipe_phys, 0, |
994 | sizeof(dev_priv->warp_pipe_phys)); | |
6795c985 DA |
995 | |
996 | if (dev_priv->head != NULL) { | |
997 | mga_freelist_cleanup(dev); | |
998 | } | |
1da177e4 LT |
999 | } |
1000 | ||
a96ca105 | 1001 | return err; |
1da177e4 LT |
1002 | } |
1003 | ||
c153f45f EA |
1004 | int mga_dma_init(struct drm_device *dev, void *data, |
1005 | struct drm_file *file_priv) | |
1da177e4 | 1006 | { |
c153f45f | 1007 | drm_mga_init_t *init = data; |
6795c985 | 1008 | int err; |
1da177e4 | 1009 | |
6c340eac | 1010 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 1011 | |
c153f45f | 1012 | switch (init->func) { |
1da177e4 | 1013 | case MGA_INIT_DMA: |
c153f45f | 1014 | err = mga_do_init_dma(dev, init); |
6795c985 | 1015 | if (err) { |
7ccf800e | 1016 | (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); |
6795c985 DA |
1017 | } |
1018 | return err; | |
1da177e4 | 1019 | case MGA_CLEANUP_DMA: |
7ccf800e | 1020 | return mga_do_cleanup_dma(dev, FULL_CLEANUP); |
1da177e4 LT |
1021 | } |
1022 | ||
20caafa6 | 1023 | return -EINVAL; |
1da177e4 LT |
1024 | } |
1025 | ||
1da177e4 LT |
1026 | /* ================================================================ |
1027 | * Primary DMA stream management | |
1028 | */ | |
1029 | ||
c153f45f EA |
1030 | int mga_dma_flush(struct drm_device *dev, void *data, |
1031 | struct drm_file *file_priv) | |
1da177e4 | 1032 | { |
b5e89ed5 | 1033 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
c153f45f | 1034 | struct drm_lock *lock = data; |
1da177e4 | 1035 | |
6c340eac | 1036 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 1037 | |
b5e89ed5 | 1038 | DRM_DEBUG("%s%s%s\n", |
c153f45f EA |
1039 | (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", |
1040 | (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", | |
1041 | (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); | |
1da177e4 | 1042 | |
b5e89ed5 | 1043 | WRAP_WAIT_WITH_RETURN(dev_priv); |
1da177e4 | 1044 | |
c153f45f | 1045 | if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { |
b5e89ed5 | 1046 | mga_do_dma_flush(dev_priv); |
1da177e4 LT |
1047 | } |
1048 | ||
c153f45f | 1049 | if (lock->flags & _DRM_LOCK_QUIESCENT) { |
1da177e4 | 1050 | #if MGA_DMA_DEBUG |
b5e89ed5 DA |
1051 | int ret = mga_do_wait_for_idle(dev_priv); |
1052 | if (ret < 0) | |
3e684eae | 1053 | DRM_INFO("-EBUSY\n"); |
1da177e4 LT |
1054 | return ret; |
1055 | #else | |
b5e89ed5 | 1056 | return mga_do_wait_for_idle(dev_priv); |
1da177e4 LT |
1057 | #endif |
1058 | } else { | |
1059 | return 0; | |
1060 | } | |
1061 | } | |
1062 | ||
c153f45f EA |
1063 | int mga_dma_reset(struct drm_device *dev, void *data, |
1064 | struct drm_file *file_priv) | |
1da177e4 | 1065 | { |
b5e89ed5 | 1066 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
1da177e4 | 1067 | |
6c340eac | 1068 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 1069 | |
b5e89ed5 | 1070 | return mga_do_dma_reset(dev_priv); |
1da177e4 LT |
1071 | } |
1072 | ||
1da177e4 LT |
1073 | /* ================================================================ |
1074 | * DMA buffer management | |
1075 | */ | |
1076 | ||
6c340eac EA |
1077 | static int mga_dma_get_buffers(struct drm_device * dev, |
1078 | struct drm_file *file_priv, struct drm_dma * d) | |
1da177e4 | 1079 | { |
056219e2 | 1080 | struct drm_buf *buf; |
1da177e4 LT |
1081 | int i; |
1082 | ||
b5e89ed5 DA |
1083 | for (i = d->granted_count; i < d->request_count; i++) { |
1084 | buf = mga_freelist_get(dev); | |
1085 | if (!buf) | |
20caafa6 | 1086 | return -EAGAIN; |
1da177e4 | 1087 | |
6c340eac | 1088 | buf->file_priv = file_priv; |
1da177e4 | 1089 | |
b5e89ed5 DA |
1090 | if (DRM_COPY_TO_USER(&d->request_indices[i], |
1091 | &buf->idx, sizeof(buf->idx))) | |
20caafa6 | 1092 | return -EFAULT; |
b5e89ed5 DA |
1093 | if (DRM_COPY_TO_USER(&d->request_sizes[i], |
1094 | &buf->total, sizeof(buf->total))) | |
20caafa6 | 1095 | return -EFAULT; |
1da177e4 LT |
1096 | |
1097 | d->granted_count++; | |
1098 | } | |
1099 | return 0; | |
1100 | } | |
1101 | ||
c153f45f EA |
1102 | int mga_dma_buffers(struct drm_device *dev, void *data, |
1103 | struct drm_file *file_priv) | |
1da177e4 | 1104 | { |
cdd55a29 | 1105 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 | 1106 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
c153f45f | 1107 | struct drm_dma *d = data; |
1da177e4 LT |
1108 | int ret = 0; |
1109 | ||
6c340eac | 1110 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 1111 | |
1da177e4 LT |
1112 | /* Please don't send us buffers. |
1113 | */ | |
c153f45f | 1114 | if (d->send_count != 0) { |
b5e89ed5 | 1115 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", |
c153f45f | 1116 | DRM_CURRENTPID, d->send_count); |
20caafa6 | 1117 | return -EINVAL; |
1da177e4 LT |
1118 | } |
1119 | ||
1120 | /* We'll send you buffers. | |
1121 | */ | |
c153f45f | 1122 | if (d->request_count < 0 || d->request_count > dma->buf_count) { |
b5e89ed5 | 1123 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", |
c153f45f | 1124 | DRM_CURRENTPID, d->request_count, dma->buf_count); |
20caafa6 | 1125 | return -EINVAL; |
1da177e4 LT |
1126 | } |
1127 | ||
b5e89ed5 | 1128 | WRAP_TEST_WITH_RETURN(dev_priv); |
1da177e4 | 1129 | |
c153f45f | 1130 | d->granted_count = 0; |
1da177e4 | 1131 | |
c153f45f EA |
1132 | if (d->request_count) { |
1133 | ret = mga_dma_get_buffers(dev, file_priv, d); | |
1da177e4 LT |
1134 | } |
1135 | ||
1da177e4 LT |
1136 | return ret; |
1137 | } | |
1138 | ||
6795c985 DA |
1139 | /** |
1140 | * Called just before the module is unloaded. | |
1141 | */ | |
eddca551 | 1142 | int mga_driver_unload(struct drm_device * dev) |
6795c985 DA |
1143 | { |
1144 | drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | |
1145 | dev->dev_private = NULL; | |
1146 | ||
1147 | return 0; | |
1148 | } | |
1149 | ||
1150 | /** | |
1151 | * Called when the last opener of the device is closed. | |
1152 | */ | |
eddca551 | 1153 | void mga_driver_lastclose(struct drm_device * dev) |
1da177e4 | 1154 | { |
7ccf800e | 1155 | mga_do_cleanup_dma(dev, FULL_CLEANUP); |
1da177e4 LT |
1156 | } |
1157 | ||
eddca551 | 1158 | int mga_driver_dma_quiescent(struct drm_device * dev) |
1da177e4 LT |
1159 | { |
1160 | drm_mga_private_t *dev_priv = dev->dev_private; | |
b5e89ed5 | 1161 | return mga_do_wait_for_idle(dev_priv); |
1da177e4 | 1162 | } |