]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/char/agp/generic.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / drivers / char / agp / generic.c
CommitLineData
1da177e4
LT
1/*
2 * AGPGART driver.
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2005 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */
1da177e4
LT
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/pagemap.h>
34#include <linux/miscdevice.h>
35#include <linux/pm.h>
36#include <linux/agp_backend.h>
37#include <linux/vmalloc.h>
38#include <linux/dma-mapping.h>
39#include <linux/mm.h>
e8edc6e0 40#include <linux/sched.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42#include <asm/io.h>
43#include <asm/cacheflush.h>
44#include <asm/pgtable.h>
45#include "agp.h"
46
47__u32 *agp_gatt_table;
48int agp_memory_reserved;
49
50/*
51 * Needed by the Nforce GART driver for the time being. Would be
52 * nice to do this some other way instead of needing this export.
53 */
54EXPORT_SYMBOL_GPL(agp_memory_reserved);
55
1da177e4
LT
56/*
57 * Generic routines for handling agp_memory structures -
58 * They use the basic page allocation routines to do the brunt of the work.
59 */
60
61void agp_free_key(int key)
62{
63 if (key < 0)
64 return;
65
66 if (key < MAXKEY)
67 clear_bit(key, agp_bridge->key_list);
68}
69EXPORT_SYMBOL(agp_free_key);
70
71
72static int agp_get_key(void)
73{
74 int bit;
75
76 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
77 if (bit < MAXKEY) {
78 set_bit(bit, agp_bridge->key_list);
79 return bit;
80 }
81 return -1;
82}
83
a13af4b4
DA
84void agp_flush_chipset(struct agp_bridge_data *bridge)
85{
86 if (bridge->driver->chipset_flush)
87 bridge->driver->chipset_flush(bridge);
88}
89EXPORT_SYMBOL(agp_flush_chipset);
90
a030ce44
TH
91/*
92 * Use kmalloc if possible for the page list. Otherwise fall back to
93 * vmalloc. This speeds things up and also saves memory for small AGP
94 * regions.
95 */
96
97void agp_alloc_page_array(size_t size, struct agp_memory *mem)
98{
07613ba2 99 mem->pages = NULL;
9516b030 100 mem->vmalloc_flag = false;
a030ce44 101
1c14cfbb 102 if (size <= 2*PAGE_SIZE)
07613ba2
DA
103 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
104 if (mem->pages == NULL) {
105 mem->pages = vmalloc(size);
9516b030 106 mem->vmalloc_flag = true;
a030ce44
TH
107 }
108}
109EXPORT_SYMBOL(agp_alloc_page_array);
110
111void agp_free_page_array(struct agp_memory *mem)
112{
113 if (mem->vmalloc_flag) {
07613ba2 114 vfree(mem->pages);
a030ce44 115 } else {
07613ba2 116 kfree(mem->pages);
a030ce44
TH
117 }
118}
119EXPORT_SYMBOL(agp_free_page_array);
120
121
122static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
123{
124 struct agp_memory *new;
125 unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
126
1c14cfbb 127 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
a030ce44
TH
128 if (new == NULL)
129 return NULL;
130
a030ce44
TH
131 new->key = agp_get_key();
132
133 if (new->key < 0) {
134 kfree(new);
135 return NULL;
136 }
137
138 agp_alloc_page_array(alloc_size, new);
139
07613ba2 140 if (new->pages == NULL) {
a030ce44
TH
141 agp_free_key(new->key);
142 kfree(new);
143 return NULL;
144 }
145 new->num_scratch_pages = 0;
146 return new;
147}
148
1da177e4
LT
149struct agp_memory *agp_create_memory(int scratch_pages)
150{
151 struct agp_memory *new;
152
0ea27d9f 153 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
1da177e4
LT
154 if (new == NULL)
155 return NULL;
156
1da177e4
LT
157 new->key = agp_get_key();
158
159 if (new->key < 0) {
160 kfree(new);
161 return NULL;
162 }
a030ce44
TH
163
164 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
1da177e4 165
07613ba2 166 if (new->pages == NULL) {
1da177e4
LT
167 agp_free_key(new->key);
168 kfree(new);
169 return NULL;
170 }
171 new->num_scratch_pages = scratch_pages;
a030ce44 172 new->type = AGP_NORMAL_MEMORY;
1da177e4
LT
173 return new;
174}
175EXPORT_SYMBOL(agp_create_memory);
176
177/**
178 * agp_free_memory - free memory associated with an agp_memory pointer.
179 *
180 * @curr: agp_memory pointer to be freed.
181 *
182 * It is the only function that can be called when the backend is not owned
183 * by the caller. (So it can free memory on client death.)
184 */
185void agp_free_memory(struct agp_memory *curr)
186{
187 size_t i;
188
189 if (curr == NULL)
190 return;
191
c7258012 192 if (curr->is_bound)
1da177e4
LT
193 agp_unbind_memory(curr);
194
a030ce44
TH
195 if (curr->type >= AGP_USER_TYPES) {
196 agp_generic_free_by_type(curr);
197 return;
198 }
199
1da177e4
LT
200 if (curr->type != 0) {
201 curr->bridge->driver->free_by_type(curr);
202 return;
203 }
204 if (curr->page_count != 0) {
bd07928c
SL
205 if (curr->bridge->driver->agp_destroy_pages) {
206 curr->bridge->driver->agp_destroy_pages(curr);
207 } else {
208
209 for (i = 0; i < curr->page_count; i++) {
bd07928c 210 curr->bridge->driver->agp_destroy_page(
07613ba2 211 curr->pages[i],
bd07928c
SL
212 AGP_PAGE_DESTROY_UNMAP);
213 }
214 for (i = 0; i < curr->page_count; i++) {
215 curr->bridge->driver->agp_destroy_page(
07613ba2 216 curr->pages[i],
bd07928c
SL
217 AGP_PAGE_DESTROY_FREE);
218 }
a2721e99 219 }
1da177e4
LT
220 }
221 agp_free_key(curr->key);
a030ce44 222 agp_free_page_array(curr);
1da177e4
LT
223 kfree(curr);
224}
225EXPORT_SYMBOL(agp_free_memory);
226
227#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
228
229/**
230 * agp_allocate_memory - allocate a group of pages of a certain type.
231 *
232 * @page_count: size_t argument of the number of pages
233 * @type: u32 argument of the type of memory to be allocated.
234 *
235 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
236 * maps to physical ram. Any other type is device dependent.
237 *
238 * It returns NULL whenever memory is unavailable.
239 */
240struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
241 size_t page_count, u32 type)
242{
243 int scratch_pages;
244 struct agp_memory *new;
245 size_t i;
246
247 if (!bridge)
248 return NULL;
249
250 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
251 return NULL;
252
a030ce44
TH
253 if (type >= AGP_USER_TYPES) {
254 new = agp_generic_alloc_user(page_count, type);
255 if (new)
256 new->bridge = bridge;
257 return new;
258 }
259
1da177e4
LT
260 if (type != 0) {
261 new = bridge->driver->alloc_by_type(page_count, type);
262 if (new)
263 new->bridge = bridge;
264 return new;
265 }
266
267 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
268
269 new = agp_create_memory(scratch_pages);
270
271 if (new == NULL)
272 return NULL;
273
37acee10
SL
274 if (bridge->driver->agp_alloc_pages) {
275 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
276 agp_free_memory(new);
277 return NULL;
278 }
279 new->bridge = bridge;
280 return new;
281 }
282
1da177e4 283 for (i = 0; i < page_count; i++) {
07613ba2 284 struct page *page = bridge->driver->agp_alloc_page(bridge);
1da177e4 285
07613ba2 286 if (page == NULL) {
1da177e4
LT
287 agp_free_memory(new);
288 return NULL;
289 }
07613ba2 290 new->pages[i] = page;
1da177e4
LT
291 new->page_count++;
292 }
88d51967 293 new->bridge = bridge;
1da177e4 294
1da177e4
LT
295 return new;
296}
297EXPORT_SYMBOL(agp_allocate_memory);
298
299
300/* End - Generic routines for handling agp_memory structures */
301
302
303static int agp_return_size(void)
304{
305 int current_size;
306 void *temp;
307
308 temp = agp_bridge->current_size;
309
310 switch (agp_bridge->driver->size_type) {
311 case U8_APER_SIZE:
312 current_size = A_SIZE_8(temp)->size;
313 break;
314 case U16_APER_SIZE:
315 current_size = A_SIZE_16(temp)->size;
316 break;
317 case U32_APER_SIZE:
318 current_size = A_SIZE_32(temp)->size;
319 break;
320 case LVL2_APER_SIZE:
321 current_size = A_SIZE_LVL2(temp)->size;
322 break;
323 case FIXED_APER_SIZE:
324 current_size = A_SIZE_FIX(temp)->size;
325 break;
326 default:
327 current_size = 0;
328 break;
329 }
330
331 current_size -= (agp_memory_reserved / (1024*1024));
332 if (current_size <0)
333 current_size = 0;
334 return current_size;
335}
336
337
338int agp_num_entries(void)
339{
340 int num_entries;
341 void *temp;
342
343 temp = agp_bridge->current_size;
344
345 switch (agp_bridge->driver->size_type) {
346 case U8_APER_SIZE:
347 num_entries = A_SIZE_8(temp)->num_entries;
348 break;
349 case U16_APER_SIZE:
350 num_entries = A_SIZE_16(temp)->num_entries;
351 break;
352 case U32_APER_SIZE:
353 num_entries = A_SIZE_32(temp)->num_entries;
354 break;
355 case LVL2_APER_SIZE:
356 num_entries = A_SIZE_LVL2(temp)->num_entries;
357 break;
358 case FIXED_APER_SIZE:
359 num_entries = A_SIZE_FIX(temp)->num_entries;
360 break;
361 default:
362 num_entries = 0;
363 break;
364 }
365
366 num_entries -= agp_memory_reserved>>PAGE_SHIFT;
367 if (num_entries<0)
368 num_entries = 0;
369 return num_entries;
370}
371EXPORT_SYMBOL_GPL(agp_num_entries);
372
373
1da177e4
LT
374/**
375 * agp_copy_info - copy bridge state information
376 *
6a92a4e0 377 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
1da177e4
LT
378 *
379 * This function copies information about the agp bridge device and the state of
380 * the agp backend into an agp_kern_info pointer.
381 */
382int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
383{
384 memset(info, 0, sizeof(struct agp_kern_info));
385 if (!bridge) {
386 info->chipset = NOT_SUPPORTED;
387 return -EIO;
388 }
389
390 info->version.major = bridge->version->major;
391 info->version.minor = bridge->version->minor;
392 info->chipset = SUPPORTED;
393 info->device = bridge->dev;
66bb8bf8 394 if (bridge->mode & AGPSTAT_MODE_3_0)
1da177e4
LT
395 info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
396 else
397 info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
1da177e4
LT
398 info->aper_base = bridge->gart_bus_addr;
399 info->aper_size = agp_return_size();
400 info->max_memory = bridge->max_memory_agp;
401 info->current_memory = atomic_read(&bridge->current_memory_agp);
402 info->cant_use_aperture = bridge->driver->cant_use_aperture;
403 info->vm_ops = bridge->vm_ops;
404 info->page_mask = ~0UL;
405 return 0;
406}
407EXPORT_SYMBOL(agp_copy_info);
408
409/* End - Routine to copy over information structure */
410
411/*
412 * Routines for handling swapping of agp_memory into the GATT -
413 * These routines take agp_memory and insert them into the GATT.
414 * They call device specific routines to actually write to the GATT.
415 */
416
417/**
418 * agp_bind_memory - Bind an agp_memory structure into the GATT.
419 *
420 * @curr: agp_memory pointer
421 * @pg_start: an offset into the graphics aperture translation table
422 *
423 * It returns -EINVAL if the pointer == NULL.
424 * It returns -EBUSY if the area of the table requested is already in use.
425 */
426int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
427{
428 int ret_val;
429
430 if (curr == NULL)
431 return -EINVAL;
432
c7258012 433 if (curr->is_bound) {
8c8b8385 434 printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
1da177e4
LT
435 return -EINVAL;
436 }
c7258012 437 if (!curr->is_flushed) {
1da177e4 438 curr->bridge->driver->cache_flush();
c7258012 439 curr->is_flushed = true;
1da177e4 440 }
ff663cf8
ZW
441
442 if (curr->bridge->driver->agp_map_memory) {
443 ret_val = curr->bridge->driver->agp_map_memory(curr);
444 if (ret_val)
445 return ret_val;
446 }
1da177e4
LT
447 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
448
449 if (ret_val != 0)
450 return ret_val;
451
c7258012 452 curr->is_bound = true;
1da177e4 453 curr->pg_start = pg_start;
a8c84df9
KP
454 spin_lock(&agp_bridge->mapped_lock);
455 list_add(&curr->mapped_list, &agp_bridge->mapped_list);
456 spin_unlock(&agp_bridge->mapped_lock);
457
1da177e4
LT
458 return 0;
459}
460EXPORT_SYMBOL(agp_bind_memory);
461
462
463/**
464 * agp_unbind_memory - Removes an agp_memory structure from the GATT
465 *
466 * @curr: agp_memory pointer to be removed from the GATT.
467 *
468 * It returns -EINVAL if this piece of agp_memory is not currently bound to
469 * the graphics aperture translation table or if the agp_memory pointer == NULL
470 */
471int agp_unbind_memory(struct agp_memory *curr)
472{
473 int ret_val;
474
475 if (curr == NULL)
476 return -EINVAL;
477
c7258012 478 if (!curr->is_bound) {
8c8b8385 479 printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
1da177e4
LT
480 return -EINVAL;
481 }
482
483 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
484
485 if (ret_val != 0)
486 return ret_val;
487
ff663cf8
ZW
488 if (curr->bridge->driver->agp_unmap_memory)
489 curr->bridge->driver->agp_unmap_memory(curr);
490
c7258012 491 curr->is_bound = false;
1da177e4 492 curr->pg_start = 0;
a8c84df9
KP
493 spin_lock(&curr->bridge->mapped_lock);
494 list_del(&curr->mapped_list);
495 spin_unlock(&curr->bridge->mapped_lock);
1da177e4
LT
496 return 0;
497}
498EXPORT_SYMBOL(agp_unbind_memory);
499
a8c84df9
KP
500/**
501 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
502 */
503int agp_rebind_memory(void)
504{
505 struct agp_memory *curr;
506 int ret_val = 0;
507
508 spin_lock(&agp_bridge->mapped_lock);
509 list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
510 ret_val = curr->bridge->driver->insert_memory(curr,
511 curr->pg_start,
512 curr->type);
513 if (ret_val != 0)
514 break;
515 }
516 spin_unlock(&agp_bridge->mapped_lock);
517 return ret_val;
518}
519EXPORT_SYMBOL(agp_rebind_memory);
520
1da177e4
LT
521/* End - Routines for handling swapping of agp_memory into the GATT */
522
523
524/* Generic Agp routines - Start */
525static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
526{
527 u32 tmp;
528
529 if (*requested_mode & AGP2_RESERVED_MASK) {
c4dd4582
DJ
530 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
531 *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
1da177e4
LT
532 *requested_mode &= ~AGP2_RESERVED_MASK;
533 }
534
28af24bb
DJ
535 /*
536 * Some dumb bridges are programmed to disobey the AGP2 spec.
537 * This is likely a BIOS misprogramming rather than poweron default, or
538 * it would be a lot more common.
539 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
540 * AGPv2 spec 6.1.9 states:
541 * The RATE field indicates the data transfer rates supported by this
542 * device. A.G.P. devices must report all that apply.
543 * Fix them up as best we can.
544 */
545 switch (*bridge_agpstat & 7) {
546 case 4:
547 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
548 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
549 "Fixing up support for x2 & x1\n");
550 break;
551 case 2:
552 *bridge_agpstat |= AGPSTAT2_1X;
553 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
554 "Fixing up support for x1\n");
555 break;
556 default:
557 break;
558 }
559
1da177e4
LT
560 /* Check the speed bits make sense. Only one should be set. */
561 tmp = *requested_mode & 7;
562 switch (tmp) {
563 case 0:
8c8b8385 564 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
1da177e4
LT
565 *requested_mode |= AGPSTAT2_1X;
566 break;
567 case 1:
568 case 2:
569 break;
570 case 3:
571 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
572 break;
573 case 4:
574 break;
575 case 5:
576 case 6:
577 case 7:
578 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
579 break;
580 }
581
582 /* disable SBA if it's not supported */
583 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
584 *bridge_agpstat &= ~AGPSTAT_SBA;
585
586 /* Set rate */
587 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
588 *bridge_agpstat &= ~AGPSTAT2_4X;
589
590 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
591 *bridge_agpstat &= ~AGPSTAT2_2X;
592
593 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
594 *bridge_agpstat &= ~AGPSTAT2_1X;
595
596 /* Now we know what mode it should be, clear out the unwanted bits. */
597 if (*bridge_agpstat & AGPSTAT2_4X)
598 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
599
600 if (*bridge_agpstat & AGPSTAT2_2X)
601 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
602
603 if (*bridge_agpstat & AGPSTAT2_1X)
604 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
605
606 /* Apply any errata. */
607 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
608 *bridge_agpstat &= ~AGPSTAT_FW;
609
610 if (agp_bridge->flags & AGP_ERRATA_SBA)
611 *bridge_agpstat &= ~AGPSTAT_SBA;
612
613 if (agp_bridge->flags & AGP_ERRATA_1X) {
614 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
615 *bridge_agpstat |= AGPSTAT2_1X;
616 }
617
618 /* If we've dropped down to 1X, disable fast writes. */
619 if (*bridge_agpstat & AGPSTAT2_1X)
620 *bridge_agpstat &= ~AGPSTAT_FW;
621}
622
623/*
624 * requested_mode = Mode requested by (typically) X.
625 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
626 * vga_agpstat = PCI_AGP_STATUS from graphic card.
627 */
628static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
629{
630 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
631 u32 tmp;
632
633 if (*requested_mode & AGP3_RESERVED_MASK) {
c4dd4582
DJ
634 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
635 *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
1da177e4
LT
636 *requested_mode &= ~AGP3_RESERVED_MASK;
637 }
638
639 /* Check the speed bits make sense. */
640 tmp = *requested_mode & 7;
641 if (tmp == 0) {
8c8b8385 642 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
1da177e4
LT
643 *requested_mode |= AGPSTAT3_4X;
644 }
645 if (tmp >= 3) {
8c8b8385 646 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
1da177e4
LT
647 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
648 }
649
650 /* ARQSZ - Set the value to the maximum one.
651 * Don't allow the mode register to override values. */
652 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
653 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
654
655 /* Calibration cycle.
656 * Don't allow the mode register to override values. */
657 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
658 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
659
660 /* SBA *must* be supported for AGP v3 */
661 *bridge_agpstat |= AGPSTAT_SBA;
662
663 /*
664 * Set speed.
665 * Check for invalid speeds. This can happen when applications
666 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
667 */
668 if (*requested_mode & AGPSTAT_MODE_3_0) {
669 /*
670 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
671 * have been passed a 3.0 mode, but with 2.x speed bits set.
672 * AGP2.x 4x -> AGP3.0 4x.
673 */
674 if (*requested_mode & AGPSTAT2_4X) {
8c8b8385 675 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
1da177e4
LT
676 current->comm, *requested_mode);
677 *requested_mode &= ~AGPSTAT2_4X;
678 *requested_mode |= AGPSTAT3_4X;
679 }
680 } else {
681 /*
682 * The caller doesn't know what they are doing. We are in 3.0 mode,
683 * but have been passed an AGP 2.x mode.
684 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
685 */
8c8b8385 686 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
1da177e4
LT
687 current->comm, *requested_mode);
688 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
689 *requested_mode |= AGPSTAT3_4X;
690 }
691
692 if (*requested_mode & AGPSTAT3_8X) {
693 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
694 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
695 *bridge_agpstat |= AGPSTAT3_4X;
8c8b8385 696 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
1da177e4
LT
697 return;
698 }
699 if (!(*vga_agpstat & AGPSTAT3_8X)) {
700 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
701 *bridge_agpstat |= AGPSTAT3_4X;
8c8b8385 702 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
1da177e4
LT
703 return;
704 }
705 /* All set, bridge & device can do AGP x8*/
706 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
707 goto done;
708
edf03fb0
DJ
709 } else if (*requested_mode & AGPSTAT3_4X) {
710 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
711 *bridge_agpstat |= AGPSTAT3_4X;
712 goto done;
713
1da177e4
LT
714 } else {
715
716 /*
edf03fb0
DJ
717 * If we didn't specify an AGP mode, we see if both
718 * the graphics card, and the bridge can do x8, and use if so.
719 * If not, we fall back to x4 mode.
1da177e4 720 */
edf03fb0 721 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
2cc1a413
DJ
722 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
723 "supported by bridge & card (x8).\n");
edf03fb0
DJ
724 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
725 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
726 } else {
727 printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
728 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
2cc1a413
DJ
729 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
730 *bridge_agpstat, origbridge);
edf03fb0
DJ
731 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
732 *bridge_agpstat |= AGPSTAT3_4X;
733 }
734 if (!(*vga_agpstat & AGPSTAT3_8X)) {
2cc1a413
DJ
735 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
736 *vga_agpstat, origvga);
edf03fb0
DJ
737 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
738 *vga_agpstat |= AGPSTAT3_4X;
739 }
1da177e4
LT
740 }
741 }
742
743done:
744 /* Apply any errata. */
745 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
746 *bridge_agpstat &= ~AGPSTAT_FW;
747
748 if (agp_bridge->flags & AGP_ERRATA_SBA)
749 *bridge_agpstat &= ~AGPSTAT_SBA;
750
751 if (agp_bridge->flags & AGP_ERRATA_1X) {
752 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
753 *bridge_agpstat |= AGPSTAT2_1X;
754 }
755}
756
757
758/**
759 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
760 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
761 * @requested_mode: requested agp_stat from userspace (Typically from X)
762 * @bridge_agpstat: current agp_stat from AGP bridge.
763 *
764 * This function will hunt for an AGP graphics card, and try to match
765 * the requested mode to the capabilities of both the bridge and the card.
766 */
767u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
768{
769 struct pci_dev *device = NULL;
770 u32 vga_agpstat;
771 u8 cap_ptr;
772
773 for (;;) {
774 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
775 if (!device) {
8c8b8385 776 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
1da177e4
LT
777 return 0;
778 }
779 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
780 if (cap_ptr)
781 break;
782 }
783
784 /*
785 * Ok, here we have a AGP device. Disable impossible
786 * settings, and adjust the readqueue to the minimum.
787 */
788 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
789
790 /* adjust RQ depth */
791 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
792 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
793 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
794
795 /* disable FW if it's not supported */
796 if (!((bridge_agpstat & AGPSTAT_FW) &&
797 (vga_agpstat & AGPSTAT_FW) &&
798 (requested_mode & AGPSTAT_FW)))
799 bridge_agpstat &= ~AGPSTAT_FW;
800
801 /* Check to see if we are operating in 3.0 mode */
66bb8bf8 802 if (agp_bridge->mode & AGPSTAT_MODE_3_0)
1da177e4
LT
803 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
804 else
805 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
806
807 pci_dev_put(device);
808 return bridge_agpstat;
809}
810EXPORT_SYMBOL(agp_collect_device_status);
811
812
c7258012 813void agp_device_command(u32 bridge_agpstat, bool agp_v3)
1da177e4
LT
814{
815 struct pci_dev *device = NULL;
816 int mode;
817
818 mode = bridge_agpstat & 0x7;
819 if (agp_v3)
820 mode *= 4;
821
822 for_each_pci_dev(device) {
823 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
824 if (!agp)
825 continue;
826
e3cf6951
BH
827 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
828 agp_v3 ? 3 : 2, mode);
1da177e4
LT
829 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
830 }
831}
832EXPORT_SYMBOL(agp_device_command);
833
834
835void get_agp_version(struct agp_bridge_data *bridge)
836{
837 u32 ncapid;
838
839 /* Exit early if already set by errata workarounds. */
840 if (bridge->major_version != 0)
841 return;
842
843 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
844 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
845 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
846}
847EXPORT_SYMBOL(get_agp_version);
848
849
850void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
851{
852 u32 bridge_agpstat, temp;
853
854 get_agp_version(agp_bridge);
855
e3cf6951
BH
856 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
857 agp_bridge->major_version, agp_bridge->minor_version);
1da177e4
LT
858
859 pci_read_config_dword(agp_bridge->dev,
860 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
861
862 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
863 if (bridge_agpstat == 0)
864 /* Something bad happened. FIXME: Return error code? */
865 return;
866
867 bridge_agpstat |= AGPSTAT_AGP_ENABLE;
868
869 /* Do AGP version specific frobbing. */
870 if (bridge->major_version >= 3) {
66bb8bf8 871 if (bridge->mode & AGPSTAT_MODE_3_0) {
1da177e4
LT
872 /* If we have 3.5, we can do the isoch stuff. */
873 if (bridge->minor_version >= 5)
874 agp_3_5_enable(bridge);
c7258012 875 agp_device_command(bridge_agpstat, true);
1da177e4
LT
876 return;
877 } else {
878 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
879 bridge_agpstat &= ~(7<<10) ;
880 pci_read_config_dword(bridge->dev,
881 bridge->capndx+AGPCTRL, &temp);
882 temp |= (1<<9);
883 pci_write_config_dword(bridge->dev,
884 bridge->capndx+AGPCTRL, temp);
885
e3cf6951 886 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
1da177e4
LT
887 }
888 }
889
890 /* AGP v<3 */
c7258012 891 agp_device_command(bridge_agpstat, false);
1da177e4
LT
892}
893EXPORT_SYMBOL(agp_generic_enable);
894
895
896int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
897{
898 char *table;
899 char *table_end;
900 int size;
901 int page_order;
902 int num_entries;
903 int i;
904 void *temp;
905 struct page *page;
906
907 /* The generic routines can't handle 2 level gatt's */
908 if (bridge->driver->size_type == LVL2_APER_SIZE)
909 return -EINVAL;
910
911 table = NULL;
912 i = bridge->aperture_size_idx;
913 temp = bridge->current_size;
914 size = page_order = num_entries = 0;
915
916 if (bridge->driver->size_type != FIXED_APER_SIZE) {
917 do {
918 switch (bridge->driver->size_type) {
919 case U8_APER_SIZE:
920 size = A_SIZE_8(temp)->size;
921 page_order =
922 A_SIZE_8(temp)->page_order;
923 num_entries =
924 A_SIZE_8(temp)->num_entries;
925 break;
926 case U16_APER_SIZE:
927 size = A_SIZE_16(temp)->size;
928 page_order = A_SIZE_16(temp)->page_order;
929 num_entries = A_SIZE_16(temp)->num_entries;
930 break;
931 case U32_APER_SIZE:
932 size = A_SIZE_32(temp)->size;
933 page_order = A_SIZE_32(temp)->page_order;
934 num_entries = A_SIZE_32(temp)->num_entries;
935 break;
936 /* This case will never really happen. */
937 case FIXED_APER_SIZE:
938 case LVL2_APER_SIZE:
939 default:
940 size = page_order = num_entries = 0;
941 break;
942 }
943
07eee78e 944 table = alloc_gatt_pages(page_order);
1da177e4
LT
945
946 if (table == NULL) {
947 i++;
948 switch (bridge->driver->size_type) {
949 case U8_APER_SIZE:
950 bridge->current_size = A_IDX8(bridge);
951 break;
952 case U16_APER_SIZE:
953 bridge->current_size = A_IDX16(bridge);
954 break;
955 case U32_APER_SIZE:
956 bridge->current_size = A_IDX32(bridge);
957 break;
89197e34 958 /* These cases will never really happen. */
1da177e4
LT
959 case FIXED_APER_SIZE:
960 case LVL2_APER_SIZE:
961 default:
1da177e4
LT
962 break;
963 }
964 temp = bridge->current_size;
965 } else {
966 bridge->aperture_size_idx = i;
967 }
968 } while (!table && (i < bridge->driver->num_aperture_sizes));
969 } else {
970 size = ((struct aper_size_info_fixed *) temp)->size;
971 page_order = ((struct aper_size_info_fixed *) temp)->page_order;
972 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
07eee78e 973 table = alloc_gatt_pages(page_order);
1da177e4
LT
974 }
975
976 if (table == NULL)
977 return -ENOMEM;
978
979 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
980
981 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
982 SetPageReserved(page);
983
984 bridge->gatt_table_real = (u32 *) table;
985 agp_gatt_table = (void *)table;
986
987 bridge->driver->cache_flush();
fcea424d
AV
988#ifdef CONFIG_X86
989 set_memory_uc((unsigned long)table, 1 << page_order);
990 bridge->gatt_table = (void *)table;
991#else
6a12235c 992 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
1da177e4
LT
993 (PAGE_SIZE * (1 << page_order)));
994 bridge->driver->cache_flush();
fcea424d 995#endif
1da177e4
LT
996
997 if (bridge->gatt_table == NULL) {
998 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
999 ClearPageReserved(page);
1000
07eee78e 1001 free_gatt_pages(table, page_order);
1da177e4
LT
1002
1003 return -ENOMEM;
1004 }
6a12235c 1005 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
1da177e4
LT
1006
1007 /* AK: bogus, should encode addresses > 4GB */
1008 for (i = 0; i < num_entries; i++) {
1009 writel(bridge->scratch_page, bridge->gatt_table+i);
1010 readl(bridge->gatt_table+i); /* PCI Posting. */
1011 }
1012
1013 return 0;
1014}
1015EXPORT_SYMBOL(agp_generic_create_gatt_table);
1016
1017int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
1018{
1019 int page_order;
1020 char *table, *table_end;
1021 void *temp;
1022 struct page *page;
1023
1024 temp = bridge->current_size;
1025
1026 switch (bridge->driver->size_type) {
1027 case U8_APER_SIZE:
1028 page_order = A_SIZE_8(temp)->page_order;
1029 break;
1030 case U16_APER_SIZE:
1031 page_order = A_SIZE_16(temp)->page_order;
1032 break;
1033 case U32_APER_SIZE:
1034 page_order = A_SIZE_32(temp)->page_order;
1035 break;
1036 case FIXED_APER_SIZE:
1037 page_order = A_SIZE_FIX(temp)->page_order;
1038 break;
1039 case LVL2_APER_SIZE:
1040 /* The generic routines can't deal with 2 level gatt's */
1041 return -EINVAL;
1042 break;
1043 default:
1044 page_order = 0;
1045 break;
1046 }
1047
1048 /* Do not worry about freeing memory, because if this is
1049 * called, then all agp memory is deallocated and removed
1050 * from the table. */
1051
fcea424d
AV
1052#ifdef CONFIG_X86
1053 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1054#else
1da177e4 1055 iounmap(bridge->gatt_table);
fcea424d 1056#endif
1da177e4
LT
1057 table = (char *) bridge->gatt_table_real;
1058 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1059
1060 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1061 ClearPageReserved(page);
1062
07eee78e 1063 free_gatt_pages(bridge->gatt_table_real, page_order);
1da177e4
LT
1064
1065 agp_gatt_table = NULL;
1066 bridge->gatt_table = NULL;
1067 bridge->gatt_table_real = NULL;
1068 bridge->gatt_bus_addr = 0;
1069
1070 return 0;
1071}
1072EXPORT_SYMBOL(agp_generic_free_gatt_table);
1073
1074
1075int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1076{
1077 int num_entries;
1078 size_t i;
1079 off_t j;
1080 void *temp;
1081 struct agp_bridge_data *bridge;
a030ce44 1082 int mask_type;
1da177e4
LT
1083
1084 bridge = mem->bridge;
1085 if (!bridge)
1086 return -EINVAL;
1087
5aa80c72
TH
1088 if (mem->page_count == 0)
1089 return 0;
1090
1da177e4
LT
1091 temp = bridge->current_size;
1092
1093 switch (bridge->driver->size_type) {
1094 case U8_APER_SIZE:
1095 num_entries = A_SIZE_8(temp)->num_entries;
1096 break;
1097 case U16_APER_SIZE:
1098 num_entries = A_SIZE_16(temp)->num_entries;
1099 break;
1100 case U32_APER_SIZE:
1101 num_entries = A_SIZE_32(temp)->num_entries;
1102 break;
1103 case FIXED_APER_SIZE:
1104 num_entries = A_SIZE_FIX(temp)->num_entries;
1105 break;
1106 case LVL2_APER_SIZE:
1107 /* The generic routines can't deal with 2 level gatt's */
1108 return -EINVAL;
1109 break;
1110 default:
1111 num_entries = 0;
1112 break;
1113 }
1114
1115 num_entries -= agp_memory_reserved/PAGE_SIZE;
1116 if (num_entries < 0) num_entries = 0;
1117
1c14cfbb 1118 if (type != mem->type)
a030ce44 1119 return -EINVAL;
a030ce44
TH
1120
1121 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1122 if (mask_type != 0) {
1da177e4
LT
1123 /* The generic routines know nothing of memory types */
1124 return -EINVAL;
1125 }
1126
1127 /* AK: could wrap */
1128 if ((pg_start + mem->page_count) > num_entries)
1129 return -EINVAL;
1130
1131 j = pg_start;
1132
1133 while (j < (pg_start + mem->page_count)) {
1134 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1135 return -EBUSY;
1136 j++;
1137 }
1138
c7258012 1139 if (!mem->is_flushed) {
1da177e4 1140 bridge->driver->cache_flush();
c7258012 1141 mem->is_flushed = true;
1da177e4
LT
1142 }
1143
1144 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
2a4ceb6d 1145 writel(bridge->driver->mask_memory(bridge,
6a12235c 1146 page_to_phys(mem->pages[i]),
2a4ceb6d 1147 mask_type),
a030ce44 1148 bridge->gatt_table+j);
1da177e4 1149 }
5aa80c72 1150 readl(bridge->gatt_table+j-1); /* PCI Posting. */
1da177e4
LT
1151
1152 bridge->driver->tlb_flush(mem);
1153 return 0;
1154}
1155EXPORT_SYMBOL(agp_generic_insert_memory);
1156
1157
1158int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1159{
1160 size_t i;
1161 struct agp_bridge_data *bridge;
a030ce44 1162 int mask_type;
1da177e4
LT
1163
1164 bridge = mem->bridge;
1165 if (!bridge)
1166 return -EINVAL;
1167
5aa80c72
TH
1168 if (mem->page_count == 0)
1169 return 0;
1170
a030ce44
TH
1171 if (type != mem->type)
1172 return -EINVAL;
1173
1174 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1175 if (mask_type != 0) {
1da177e4
LT
1176 /* The generic routines know nothing of memory types */
1177 return -EINVAL;
1178 }
1179
1180 /* AK: bogus, should encode addresses > 4GB */
1181 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1182 writel(bridge->scratch_page, bridge->gatt_table+i);
1da177e4 1183 }
5aa80c72 1184 readl(bridge->gatt_table+i-1); /* PCI Posting. */
1da177e4 1185
1da177e4
LT
1186 bridge->driver->tlb_flush(mem);
1187 return 0;
1188}
1189EXPORT_SYMBOL(agp_generic_remove_memory);
1190
1da177e4
LT
1191struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1192{
1193 return NULL;
1194}
1195EXPORT_SYMBOL(agp_generic_alloc_by_type);
1196
1da177e4
LT
1197void agp_generic_free_by_type(struct agp_memory *curr)
1198{
a030ce44 1199 agp_free_page_array(curr);
1da177e4
LT
1200 agp_free_key(curr->key);
1201 kfree(curr);
1202}
1203EXPORT_SYMBOL(agp_generic_free_by_type);
1204
a030ce44
TH
1205struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1206{
1207 struct agp_memory *new;
1208 int i;
1209 int pages;
1210
1211 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1212 new = agp_create_user_memory(page_count);
1213 if (new == NULL)
1214 return NULL;
1215
1c14cfbb 1216 for (i = 0; i < page_count; i++)
07613ba2 1217 new->pages[i] = 0;
a030ce44
TH
1218 new->page_count = 0;
1219 new->type = type;
1220 new->num_scratch_pages = pages;
1221
1222 return new;
1223}
1224EXPORT_SYMBOL(agp_generic_alloc_user);
1225
1da177e4
LT
1226/*
1227 * Basic Page Allocation Routines -
1228 * These routines handle page allocation and by default they reserve the allocated
1229 * memory. They also handle incrementing the current_memory_agp value, Which is checked
1230 * against a maximum value.
1231 */
1232
37acee10
SL
1233int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1234{
1235 struct page * page;
1236 int i, ret = -ENOMEM;
1237
1238 for (i = 0; i < num_pages; i++) {
59de2beb 1239 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
37acee10
SL
1240 /* agp_free_memory() needs gart address */
1241 if (page == NULL)
1242 goto out;
1243
1244#ifndef CONFIG_X86
1245 map_page_into_agp(page);
1246#endif
1247 get_page(page);
1248 atomic_inc(&agp_bridge->current_memory_agp);
1249
07613ba2 1250 mem->pages[i] = page;
37acee10
SL
1251 mem->page_count++;
1252 }
1253
1254#ifdef CONFIG_X86
07613ba2 1255 set_pages_array_uc(mem->pages, num_pages);
37acee10
SL
1256#endif
1257 ret = 0;
1258out:
37acee10
SL
1259 return ret;
1260}
1261EXPORT_SYMBOL(agp_generic_alloc_pages);
1262
07613ba2 1263struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1da177e4
LT
1264{
1265 struct page * page;
1266
59de2beb 1267 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1da177e4
LT
1268 if (page == NULL)
1269 return NULL;
1270
9326d61b 1271 map_page_into_agp(page);
1da177e4
LT
1272
1273 get_page(page);
1da177e4 1274 atomic_inc(&agp_bridge->current_memory_agp);
07613ba2 1275 return page;
1da177e4
LT
1276}
1277EXPORT_SYMBOL(agp_generic_alloc_page);
1278
bd07928c
SL
1279void agp_generic_destroy_pages(struct agp_memory *mem)
1280{
1281 int i;
bd07928c
SL
1282 struct page *page;
1283
1284 if (!mem)
1285 return;
1286
bd07928c 1287#ifdef CONFIG_X86
07613ba2 1288 set_pages_array_wb(mem->pages, mem->page_count);
bd07928c
SL
1289#endif
1290
1291 for (i = 0; i < mem->page_count; i++) {
07613ba2 1292 page = mem->pages[i];
bd07928c
SL
1293
1294#ifndef CONFIG_X86
1295 unmap_page_from_agp(page);
1296#endif
bd07928c 1297 put_page(page);
07613ba2 1298 __free_page(page);
bd07928c 1299 atomic_dec(&agp_bridge->current_memory_agp);
07613ba2 1300 mem->pages[i] = NULL;
bd07928c
SL
1301 }
1302}
1303EXPORT_SYMBOL(agp_generic_destroy_pages);
1da177e4 1304
07613ba2 1305void agp_generic_destroy_page(struct page *page, int flags)
1da177e4 1306{
07613ba2 1307 if (page == NULL)
1da177e4
LT
1308 return;
1309
a2721e99
DA
1310 if (flags & AGP_PAGE_DESTROY_UNMAP)
1311 unmap_page_from_agp(page);
1312
1313 if (flags & AGP_PAGE_DESTROY_FREE) {
1314 put_page(page);
07613ba2 1315 __free_page(page);
a2721e99
DA
1316 atomic_dec(&agp_bridge->current_memory_agp);
1317 }
1da177e4
LT
1318}
1319EXPORT_SYMBOL(agp_generic_destroy_page);
1320
1321/* End Basic Page Allocation Routines */
1322
1323
1324/**
1325 * agp_enable - initialise the agp point-to-point connection.
1326 *
1327 * @mode: agp mode register value to configure with.
1328 */
1329void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1330{
1331 if (!bridge)
1332 return;
1333 bridge->driver->agp_enable(bridge, mode);
1334}
1335EXPORT_SYMBOL(agp_enable);
1336
1337/* When we remove the global variable agp_bridge from all drivers
1338 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1339 */
1340
1341struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1342{
1343 if (list_empty(&agp_bridges))
1344 return NULL;
1345
1346 return agp_bridge;
1347}
1348
1349static void ipi_handler(void *null)
1350{
1351 flush_agp_cache();
1352}
1353
1354void global_cache_flush(void)
1355{
15c8b6c1 1356 if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1da177e4
LT
1357 panic(PFX "timed out waiting for the other CPUs!\n");
1358}
1359EXPORT_SYMBOL(global_cache_flush);
1360
1361unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
2a4ceb6d 1362 dma_addr_t addr, int type)
1da177e4
LT
1363{
1364 /* memory type is ignored in the generic routine */
1365 if (bridge->driver->masks)
1366 return addr | bridge->driver->masks[0].mask;
1367 else
1368 return addr;
1369}
1370EXPORT_SYMBOL(agp_generic_mask_memory);
1371
a030ce44
TH
1372int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1373 int type)
1374{
1375 if (type >= AGP_USER_TYPES)
1376 return 0;
1377 return type;
1378}
1379EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1380
1da177e4
LT
1381/*
1382 * These functions are implemented according to the AGPv3 spec,
1383 * which covers implementation details that had previously been
1384 * left open.
1385 */
1386
1387int agp3_generic_fetch_size(void)
1388{
1389 u16 temp_size;
1390 int i;
1391 struct aper_size_info_16 *values;
1392
1393 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1394 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1395
1396 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1397 if (temp_size == values[i].size_value) {
1398 agp_bridge->previous_size =
1399 agp_bridge->current_size = (void *) (values + i);
1400
1401 agp_bridge->aperture_size_idx = i;
1402 return values[i].size;
1403 }
1404 }
1405 return 0;
1406}
1407EXPORT_SYMBOL(agp3_generic_fetch_size);
1408
1409void agp3_generic_tlbflush(struct agp_memory *mem)
1410{
1411 u32 ctrl;
1412 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1413 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1414 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1415}
1416EXPORT_SYMBOL(agp3_generic_tlbflush);
1417
1418int agp3_generic_configure(void)
1419{
1420 u32 temp;
1421 struct aper_size_info_16 *current_size;
1422
1423 current_size = A_SIZE_16(agp_bridge->current_size);
1424
1425 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1426 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1427
1428 /* set aperture size */
1429 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1430 /* set gart pointer */
1431 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1432 /* enable aperture and GTLB */
1433 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1434 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1435 return 0;
1436}
1437EXPORT_SYMBOL(agp3_generic_configure);
1438
1439void agp3_generic_cleanup(void)
1440{
1441 u32 ctrl;
1442 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1443 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1444}
1445EXPORT_SYMBOL(agp3_generic_cleanup);
1446
e5524f35 1447const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1da177e4
LT
1448{
1449 {4096, 1048576, 10,0x000},
1450 {2048, 524288, 9, 0x800},
1451 {1024, 262144, 8, 0xc00},
1452 { 512, 131072, 7, 0xe00},
1453 { 256, 65536, 6, 0xf00},
1454 { 128, 32768, 5, 0xf20},
1455 { 64, 16384, 4, 0xf30},
1456 { 32, 8192, 3, 0xf38},
1457 { 16, 4096, 2, 0xf3c},
1458 { 8, 2048, 1, 0xf3e},
1459 { 4, 1024, 0, 0xf3f}
1460};
1461EXPORT_SYMBOL(agp3_generic_sizes);
1462