2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
26 #include "intel-agp.h"
27 #include <linux/intel-gtt.h>
28 #include <drm/intel-gtt.h>
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_DMAR).
34 * Only newer chipsets need to bother with this, of course.
37 #define USE_PCI_DMA_API 1
39 #define USE_PCI_DMA_API 0
42 /* Max amount of stolen space, anything above will be returned to Linux */
43 int intel_max_stolen
= 32 * 1024 * 1024;
45 static const struct aper_size_info_fixed intel_i810_sizes
[] =
48 /* The 32M mode still requires a 64k gatt */
52 #define AGP_DCACHE_MEMORY 1
53 #define AGP_PHYS_MEMORY 2
54 #define INTEL_AGP_CACHED_MEMORY 3
56 static struct gatt_mask intel_i810_masks
[] =
58 {.mask
= I810_PTE_VALID
, .type
= 0},
59 {.mask
= (I810_PTE_VALID
| I810_PTE_LOCAL
), .type
= AGP_DCACHE_MEMORY
},
60 {.mask
= I810_PTE_VALID
, .type
= 0},
61 {.mask
= I810_PTE_VALID
| I830_PTE_SYSTEM_CACHED
,
62 .type
= INTEL_AGP_CACHED_MEMORY
}
65 #define INTEL_AGP_UNCACHED_MEMORY 0
66 #define INTEL_AGP_CACHED_MEMORY_LLC 1
67 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
68 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
69 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
71 struct intel_gtt_driver
{
73 unsigned int is_g33
: 1;
74 unsigned int is_pineview
: 1;
75 unsigned int is_ironlake
: 1;
76 unsigned int has_pgtbl_enable
: 1;
77 unsigned int dma_mask_size
: 8;
78 /* Chipset specific GTT setup */
80 /* This should undo anything done in ->setup() save the unmapping
81 * of the mmio register file, that's done in the generic code. */
82 void (*cleanup
)(void);
83 void (*write_entry
)(dma_addr_t addr
, unsigned int entry
, unsigned int flags
);
84 /* Flags is a more or less chipset specific opaque value.
85 * For chipsets that need to support old ums (non-gem) code, this
86 * needs to be identical to the various supported agp memory types! */
87 bool (*check_flags
)(unsigned int flags
);
88 void (*chipset_flush
)(void);
91 static struct _intel_private
{
92 struct intel_gtt base
;
93 const struct intel_gtt_driver
*driver
;
94 struct pci_dev
*pcidev
; /* device one */
95 struct pci_dev
*bridge_dev
;
96 u8 __iomem
*registers
;
97 phys_addr_t gtt_bus_addr
;
98 phys_addr_t gma_bus_addr
;
100 u32 __iomem
*gtt
; /* I915G */
101 int num_dcache_entries
;
103 void __iomem
*i9xx_flush_page
;
104 void *i8xx_flush_page
;
106 struct page
*i8xx_page
;
107 struct resource ifp_resource
;
109 struct page
*scratch_page
;
110 dma_addr_t scratch_page_dma
;
113 #define INTEL_GTT_GEN intel_private.driver->gen
114 #define IS_G33 intel_private.driver->is_g33
115 #define IS_PINEVIEW intel_private.driver->is_pineview
116 #define IS_IRONLAKE intel_private.driver->is_ironlake
117 #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
119 static void intel_agp_free_sglist(struct agp_memory
*mem
)
123 st
.sgl
= mem
->sg_list
;
124 st
.orig_nents
= st
.nents
= mem
->page_count
;
132 static int intel_agp_map_memory(struct agp_memory
*mem
)
135 struct scatterlist
*sg
;
139 return 0; /* already mapped (for e.g. resume */
141 DBG("try mapping %lu pages\n", (unsigned long)mem
->page_count
);
143 if (sg_alloc_table(&st
, mem
->page_count
, GFP_KERNEL
))
146 mem
->sg_list
= sg
= st
.sgl
;
148 for (i
= 0 ; i
< mem
->page_count
; i
++, sg
= sg_next(sg
))
149 sg_set_page(sg
, mem
->pages
[i
], PAGE_SIZE
, 0);
151 mem
->num_sg
= pci_map_sg(intel_private
.pcidev
, mem
->sg_list
,
152 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
153 if (unlikely(!mem
->num_sg
))
163 static void intel_agp_unmap_memory(struct agp_memory
*mem
)
165 DBG("try unmapping %lu pages\n", (unsigned long)mem
->page_count
);
167 pci_unmap_sg(intel_private
.pcidev
, mem
->sg_list
,
168 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
169 intel_agp_free_sglist(mem
);
172 static int intel_i810_fetch_size(void)
175 struct aper_size_info_fixed
*values
;
177 pci_read_config_dword(intel_private
.bridge_dev
,
178 I810_SMRAM_MISCC
, &smram_miscc
);
179 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
181 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
182 dev_warn(&intel_private
.bridge_dev
->dev
, "i810 is disabled\n");
185 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
186 agp_bridge
->current_size
= (void *) (values
+ 1);
187 agp_bridge
->aperture_size_idx
= 1;
188 return values
[1].size
;
190 agp_bridge
->current_size
= (void *) (values
);
191 agp_bridge
->aperture_size_idx
= 0;
192 return values
[0].size
;
198 static int intel_i810_configure(void)
200 struct aper_size_info_fixed
*current_size
;
204 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
206 if (!intel_private
.registers
) {
207 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
210 intel_private
.registers
= ioremap(temp
, 128 * 4096);
211 if (!intel_private
.registers
) {
212 dev_err(&intel_private
.pcidev
->dev
,
213 "can't remap memory\n");
218 if ((readl(intel_private
.registers
+I810_DRAM_CTL
)
219 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
220 /* This will need to be dynamically assigned */
221 dev_info(&intel_private
.pcidev
->dev
,
222 "detected 4MB dedicated video ram\n");
223 intel_private
.num_dcache_entries
= 1024;
225 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
226 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
227 writel(agp_bridge
->gatt_bus_addr
| I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
228 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
230 if (agp_bridge
->driver
->needs_scratch_page
) {
231 for (i
= 0; i
< current_size
->num_entries
; i
++) {
232 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
234 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI posting. */
236 global_cache_flush();
240 static void intel_i810_cleanup(void)
242 writel(0, intel_private
.registers
+I810_PGETBL_CTL
);
243 readl(intel_private
.registers
); /* PCI Posting. */
244 iounmap(intel_private
.registers
);
247 static void intel_fake_agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
252 /* Exists to support ARGB cursors */
253 static struct page
*i8xx_alloc_pages(void)
257 page
= alloc_pages(GFP_KERNEL
| GFP_DMA32
, 2);
261 if (set_pages_uc(page
, 4) < 0) {
262 set_pages_wb(page
, 4);
263 __free_pages(page
, 2);
267 atomic_inc(&agp_bridge
->current_memory_agp
);
271 static void i8xx_destroy_pages(struct page
*page
)
276 set_pages_wb(page
, 4);
278 __free_pages(page
, 2);
279 atomic_dec(&agp_bridge
->current_memory_agp
);
282 static int intel_i810_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
285 int i
, j
, num_entries
;
290 if (mem
->page_count
== 0)
293 temp
= agp_bridge
->current_size
;
294 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
296 if ((pg_start
+ mem
->page_count
) > num_entries
)
300 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
301 if (!PGE_EMPTY(agp_bridge
, readl(agp_bridge
->gatt_table
+j
))) {
307 if (type
!= mem
->type
)
310 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
313 case AGP_DCACHE_MEMORY
:
314 if (!mem
->is_flushed
)
315 global_cache_flush();
316 for (i
= pg_start
; i
< (pg_start
+ mem
->page_count
); i
++) {
317 writel((i
*4096)|I810_PTE_LOCAL
|I810_PTE_VALID
,
318 intel_private
.registers
+I810_PTE_BASE
+(i
*4));
320 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
322 case AGP_PHYS_MEMORY
:
323 case AGP_NORMAL_MEMORY
:
324 if (!mem
->is_flushed
)
325 global_cache_flush();
326 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
327 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
328 page_to_phys(mem
->pages
[i
]), mask_type
),
329 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
331 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
340 mem
->is_flushed
= true;
344 static int intel_i810_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
349 if (mem
->page_count
== 0)
352 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
353 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
355 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
361 * The i810/i830 requires a physical address to program its mouse
362 * pointer into hardware.
363 * However the Xserver still writes to it through the agp aperture.
365 static struct agp_memory
*alloc_agpphysmem_i8xx(size_t pg_count
, int type
)
367 struct agp_memory
*new;
371 case 1: page
= agp_bridge
->driver
->agp_alloc_page(agp_bridge
);
374 /* kludge to get 4 physical pages for ARGB cursor */
375 page
= i8xx_alloc_pages();
384 new = agp_create_memory(pg_count
);
388 new->pages
[0] = page
;
390 /* kludge to get 4 physical pages for ARGB cursor */
391 new->pages
[1] = new->pages
[0] + 1;
392 new->pages
[2] = new->pages
[1] + 1;
393 new->pages
[3] = new->pages
[2] + 1;
395 new->page_count
= pg_count
;
396 new->num_scratch_pages
= pg_count
;
397 new->type
= AGP_PHYS_MEMORY
;
398 new->physical
= page_to_phys(new->pages
[0]);
402 static struct agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
404 struct agp_memory
*new;
406 if (type
== AGP_DCACHE_MEMORY
) {
407 if (pg_count
!= intel_private
.num_dcache_entries
)
410 new = agp_create_memory(1);
414 new->type
= AGP_DCACHE_MEMORY
;
415 new->page_count
= pg_count
;
416 new->num_scratch_pages
= 0;
417 agp_free_page_array(new);
420 if (type
== AGP_PHYS_MEMORY
)
421 return alloc_agpphysmem_i8xx(pg_count
, type
);
425 static void intel_i810_free_by_type(struct agp_memory
*curr
)
427 agp_free_key(curr
->key
);
428 if (curr
->type
== AGP_PHYS_MEMORY
) {
429 if (curr
->page_count
== 4)
430 i8xx_destroy_pages(curr
->pages
[0]);
432 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
433 AGP_PAGE_DESTROY_UNMAP
);
434 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
435 AGP_PAGE_DESTROY_FREE
);
437 agp_free_page_array(curr
);
442 static unsigned long intel_i810_mask_memory(struct agp_bridge_data
*bridge
,
443 dma_addr_t addr
, int type
)
445 /* Type checking must be done elsewhere */
446 return addr
| bridge
->driver
->masks
[type
].mask
;
449 static int intel_gtt_setup_scratch_page(void)
454 page
= alloc_page(GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
);
458 set_pages_uc(page
, 1);
460 if (USE_PCI_DMA_API
&& INTEL_GTT_GEN
> 2) {
461 dma_addr
= pci_map_page(intel_private
.pcidev
, page
, 0,
462 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
463 if (pci_dma_mapping_error(intel_private
.pcidev
, dma_addr
))
466 intel_private
.scratch_page_dma
= dma_addr
;
468 intel_private
.scratch_page_dma
= page_to_phys(page
);
470 intel_private
.scratch_page
= page
;
475 static const struct aper_size_info_fixed
const intel_fake_agp_sizes
[] = {
477 /* The 64M mode still requires a 128k gatt */
483 static unsigned int intel_gtt_stolen_entries(void)
488 static const int ddt
[4] = { 0, 16, 32, 64 };
489 unsigned int overhead_entries
, stolen_entries
;
490 unsigned int stolen_size
= 0;
492 pci_read_config_word(intel_private
.bridge_dev
,
493 I830_GMCH_CTRL
, &gmch_ctrl
);
495 if (INTEL_GTT_GEN
> 4 || IS_PINEVIEW
)
496 overhead_entries
= 0;
498 overhead_entries
= intel_private
.base
.gtt_mappable_entries
501 overhead_entries
+= 1; /* BIOS popup */
503 if (intel_private
.bridge_dev
->device
== PCI_DEVICE_ID_INTEL_82830_HB
||
504 intel_private
.bridge_dev
->device
== PCI_DEVICE_ID_INTEL_82845G_HB
) {
505 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
506 case I830_GMCH_GMS_STOLEN_512
:
507 stolen_size
= KB(512);
509 case I830_GMCH_GMS_STOLEN_1024
:
512 case I830_GMCH_GMS_STOLEN_8192
:
515 case I830_GMCH_GMS_LOCAL
:
516 rdct
= readb(intel_private
.registers
+I830_RDRAM_CHANNEL_TYPE
);
517 stolen_size
= (I830_RDRAM_ND(rdct
) + 1) *
518 MB(ddt
[I830_RDRAM_DDT(rdct
)]);
525 } else if (INTEL_GTT_GEN
== 6) {
527 * SandyBridge has new memory control reg at 0x50.w
530 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
531 switch (snb_gmch_ctl
& SNB_GMCH_GMS_STOLEN_MASK
) {
532 case SNB_GMCH_GMS_STOLEN_32M
:
533 stolen_size
= MB(32);
535 case SNB_GMCH_GMS_STOLEN_64M
:
536 stolen_size
= MB(64);
538 case SNB_GMCH_GMS_STOLEN_96M
:
539 stolen_size
= MB(96);
541 case SNB_GMCH_GMS_STOLEN_128M
:
542 stolen_size
= MB(128);
544 case SNB_GMCH_GMS_STOLEN_160M
:
545 stolen_size
= MB(160);
547 case SNB_GMCH_GMS_STOLEN_192M
:
548 stolen_size
= MB(192);
550 case SNB_GMCH_GMS_STOLEN_224M
:
551 stolen_size
= MB(224);
553 case SNB_GMCH_GMS_STOLEN_256M
:
554 stolen_size
= MB(256);
556 case SNB_GMCH_GMS_STOLEN_288M
:
557 stolen_size
= MB(288);
559 case SNB_GMCH_GMS_STOLEN_320M
:
560 stolen_size
= MB(320);
562 case SNB_GMCH_GMS_STOLEN_352M
:
563 stolen_size
= MB(352);
565 case SNB_GMCH_GMS_STOLEN_384M
:
566 stolen_size
= MB(384);
568 case SNB_GMCH_GMS_STOLEN_416M
:
569 stolen_size
= MB(416);
571 case SNB_GMCH_GMS_STOLEN_448M
:
572 stolen_size
= MB(448);
574 case SNB_GMCH_GMS_STOLEN_480M
:
575 stolen_size
= MB(480);
577 case SNB_GMCH_GMS_STOLEN_512M
:
578 stolen_size
= MB(512);
582 switch (gmch_ctrl
& I855_GMCH_GMS_MASK
) {
583 case I855_GMCH_GMS_STOLEN_1M
:
586 case I855_GMCH_GMS_STOLEN_4M
:
589 case I855_GMCH_GMS_STOLEN_8M
:
592 case I855_GMCH_GMS_STOLEN_16M
:
593 stolen_size
= MB(16);
595 case I855_GMCH_GMS_STOLEN_32M
:
596 stolen_size
= MB(32);
598 case I915_GMCH_GMS_STOLEN_48M
:
599 stolen_size
= MB(48);
601 case I915_GMCH_GMS_STOLEN_64M
:
602 stolen_size
= MB(64);
604 case G33_GMCH_GMS_STOLEN_128M
:
605 stolen_size
= MB(128);
607 case G33_GMCH_GMS_STOLEN_256M
:
608 stolen_size
= MB(256);
610 case INTEL_GMCH_GMS_STOLEN_96M
:
611 stolen_size
= MB(96);
613 case INTEL_GMCH_GMS_STOLEN_160M
:
614 stolen_size
= MB(160);
616 case INTEL_GMCH_GMS_STOLEN_224M
:
617 stolen_size
= MB(224);
619 case INTEL_GMCH_GMS_STOLEN_352M
:
620 stolen_size
= MB(352);
628 if (!local
&& stolen_size
> intel_max_stolen
) {
629 dev_info(&intel_private
.bridge_dev
->dev
,
630 "detected %dK stolen memory, trimming to %dK\n",
631 stolen_size
/ KB(1), intel_max_stolen
/ KB(1));
632 stolen_size
= intel_max_stolen
;
633 } else if (stolen_size
> 0) {
634 dev_info(&intel_private
.bridge_dev
->dev
, "detected %dK %s memory\n",
635 stolen_size
/ KB(1), local
? "local" : "stolen");
637 dev_info(&intel_private
.bridge_dev
->dev
,
638 "no pre-allocated video memory detected\n");
642 stolen_entries
= stolen_size
/KB(4) - overhead_entries
;
644 return stolen_entries
;
647 static void i965_adjust_pgetbl_size(unsigned int size_flag
)
649 u32 pgetbl_ctl
, pgetbl_ctl2
;
651 /* ensure that ppgtt is disabled */
652 pgetbl_ctl2
= readl(intel_private
.registers
+I965_PGETBL_CTL2
);
653 pgetbl_ctl2
&= ~I810_PGETBL_ENABLED
;
654 writel(pgetbl_ctl2
, intel_private
.registers
+I965_PGETBL_CTL2
);
656 /* write the new ggtt size */
657 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
658 pgetbl_ctl
&= ~I965_PGETBL_SIZE_MASK
;
659 pgetbl_ctl
|= size_flag
;
660 writel(pgetbl_ctl
, intel_private
.registers
+I810_PGETBL_CTL
);
663 static unsigned int i965_gtt_total_entries(void)
669 pci_read_config_word(intel_private
.bridge_dev
,
670 I830_GMCH_CTRL
, &gmch_ctl
);
672 if (INTEL_GTT_GEN
== 5) {
673 switch (gmch_ctl
& G4x_GMCH_SIZE_MASK
) {
674 case G4x_GMCH_SIZE_1M
:
675 case G4x_GMCH_SIZE_VT_1M
:
676 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB
);
678 case G4x_GMCH_SIZE_VT_1_5M
:
679 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB
);
681 case G4x_GMCH_SIZE_2M
:
682 case G4x_GMCH_SIZE_VT_2M
:
683 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB
);
688 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
690 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
691 case I965_PGETBL_SIZE_128KB
:
694 case I965_PGETBL_SIZE_256KB
:
697 case I965_PGETBL_SIZE_512KB
:
700 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
701 case I965_PGETBL_SIZE_1MB
:
704 case I965_PGETBL_SIZE_2MB
:
707 case I965_PGETBL_SIZE_1_5MB
:
708 size
= KB(1024 + 512);
711 dev_info(&intel_private
.pcidev
->dev
,
712 "unknown page table size, assuming 512KB\n");
719 static unsigned int intel_gtt_total_entries(void)
723 if (IS_G33
|| INTEL_GTT_GEN
== 4 || INTEL_GTT_GEN
== 5)
724 return i965_gtt_total_entries();
725 else if (INTEL_GTT_GEN
== 6) {
728 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
729 switch (snb_gmch_ctl
& SNB_GTT_SIZE_MASK
) {
731 case SNB_GTT_SIZE_0M
:
732 printk(KERN_ERR
"Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl
);
735 case SNB_GTT_SIZE_1M
:
738 case SNB_GTT_SIZE_2M
:
744 /* On previous hardware, the GTT size was just what was
745 * required to map the aperture.
747 return intel_private
.base
.gtt_mappable_entries
;
751 static unsigned int intel_gtt_mappable_entries(void)
753 unsigned int aperture_size
;
755 if (INTEL_GTT_GEN
== 2) {
758 pci_read_config_word(intel_private
.bridge_dev
,
759 I830_GMCH_CTRL
, &gmch_ctrl
);
761 if ((gmch_ctrl
& I830_GMCH_MEM_MASK
) == I830_GMCH_MEM_64M
)
762 aperture_size
= MB(64);
764 aperture_size
= MB(128);
766 /* 9xx supports large sizes, just look at the length */
767 aperture_size
= pci_resource_len(intel_private
.pcidev
, 2);
770 return aperture_size
>> PAGE_SHIFT
;
773 static void intel_gtt_teardown_scratch_page(void)
775 set_pages_wb(intel_private
.scratch_page
, 1);
776 pci_unmap_page(intel_private
.pcidev
, intel_private
.scratch_page_dma
,
777 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
778 put_page(intel_private
.scratch_page
);
779 __free_page(intel_private
.scratch_page
);
782 static void intel_gtt_cleanup(void)
784 intel_private
.driver
->cleanup();
786 iounmap(intel_private
.gtt
);
787 iounmap(intel_private
.registers
);
789 intel_gtt_teardown_scratch_page();
792 static int intel_gtt_init(void)
797 ret
= intel_private
.driver
->setup();
801 intel_private
.base
.gtt_mappable_entries
= intel_gtt_mappable_entries();
802 intel_private
.base
.gtt_total_entries
= intel_gtt_total_entries();
804 /* save the PGETBL reg for resume */
805 intel_private
.PGETBL_save
=
806 readl(intel_private
.registers
+I810_PGETBL_CTL
)
807 & ~I810_PGETBL_ENABLED
;
808 /* we only ever restore the register when enabling the PGTBL... */
810 intel_private
.PGETBL_save
|= I810_PGETBL_ENABLED
;
812 dev_info(&intel_private
.bridge_dev
->dev
,
813 "detected gtt size: %dK total, %dK mappable\n",
814 intel_private
.base
.gtt_total_entries
* 4,
815 intel_private
.base
.gtt_mappable_entries
* 4);
817 gtt_map_size
= intel_private
.base
.gtt_total_entries
* 4;
819 intel_private
.gtt
= ioremap(intel_private
.gtt_bus_addr
,
821 if (!intel_private
.gtt
) {
822 intel_private
.driver
->cleanup();
823 iounmap(intel_private
.registers
);
827 global_cache_flush(); /* FIXME: ? */
829 /* we have to call this as early as possible after the MMIO base address is known */
830 intel_private
.base
.gtt_stolen_entries
= intel_gtt_stolen_entries();
831 if (intel_private
.base
.gtt_stolen_entries
== 0) {
832 intel_private
.driver
->cleanup();
833 iounmap(intel_private
.registers
);
834 iounmap(intel_private
.gtt
);
838 ret
= intel_gtt_setup_scratch_page();
847 static int intel_fake_agp_fetch_size(void)
849 int num_sizes
= ARRAY_SIZE(intel_fake_agp_sizes
);
850 unsigned int aper_size
;
853 aper_size
= (intel_private
.base
.gtt_mappable_entries
<< PAGE_SHIFT
)
856 for (i
= 0; i
< num_sizes
; i
++) {
857 if (aper_size
== intel_fake_agp_sizes
[i
].size
) {
858 agp_bridge
->current_size
=
859 (void *) (intel_fake_agp_sizes
+ i
);
867 static void i830_cleanup(void)
869 kunmap(intel_private
.i8xx_page
);
870 intel_private
.i8xx_flush_page
= NULL
;
872 __free_page(intel_private
.i8xx_page
);
873 intel_private
.i8xx_page
= NULL
;
876 static void intel_i830_setup_flush(void)
878 /* return if we've already set the flush mechanism up */
879 if (intel_private
.i8xx_page
)
882 intel_private
.i8xx_page
= alloc_page(GFP_KERNEL
);
883 if (!intel_private
.i8xx_page
)
886 intel_private
.i8xx_flush_page
= kmap(intel_private
.i8xx_page
);
887 if (!intel_private
.i8xx_flush_page
)
891 /* The chipset_flush interface needs to get data that has already been
892 * flushed out of the CPU all the way out to main memory, because the GPU
893 * doesn't snoop those buffers.
895 * The 8xx series doesn't have the same lovely interface for flushing the
896 * chipset write buffers that the later chips do. According to the 865
897 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
898 * that buffer out, we just fill 1KB and clflush it out, on the assumption
899 * that it'll push whatever was in there out. It appears to work.
901 static void i830_chipset_flush(void)
903 unsigned int *pg
= intel_private
.i8xx_flush_page
;
908 clflush_cache_range(pg
, 1024);
909 else if (wbinvd_on_all_cpus() != 0)
910 printk(KERN_ERR
"Timed out waiting for cache flush.\n");
913 static void i830_write_entry(dma_addr_t addr
, unsigned int entry
,
916 u32 pte_flags
= I810_PTE_VALID
;
919 case AGP_DCACHE_MEMORY
:
920 pte_flags
|= I810_PTE_LOCAL
;
922 case AGP_USER_CACHED_MEMORY
:
923 pte_flags
|= I830_PTE_SYSTEM_CACHED
;
927 writel(addr
| pte_flags
, intel_private
.gtt
+ entry
);
930 static bool intel_enable_gtt(void)
935 if (INTEL_GTT_GEN
== 2)
936 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
,
939 pci_read_config_dword(intel_private
.pcidev
, I915_GMADDR
,
942 intel_private
.gma_bus_addr
= (gma_addr
& PCI_BASE_ADDRESS_MEM_MASK
);
944 if (INTEL_GTT_GEN
>= 6)
947 if (INTEL_GTT_GEN
== 2) {
950 pci_read_config_word(intel_private
.bridge_dev
,
951 I830_GMCH_CTRL
, &gmch_ctrl
);
952 gmch_ctrl
|= I830_GMCH_ENABLED
;
953 pci_write_config_word(intel_private
.bridge_dev
,
954 I830_GMCH_CTRL
, gmch_ctrl
);
956 pci_read_config_word(intel_private
.bridge_dev
,
957 I830_GMCH_CTRL
, &gmch_ctrl
);
958 if ((gmch_ctrl
& I830_GMCH_ENABLED
) == 0) {
959 dev_err(&intel_private
.pcidev
->dev
,
960 "failed to enable the GTT: GMCH_CTRL=%x\n",
966 reg
= intel_private
.registers
+I810_PGETBL_CTL
;
967 writel(intel_private
.PGETBL_save
, reg
);
968 if (HAS_PGTBL_EN
&& (readl(reg
) & I810_PGETBL_ENABLED
) == 0) {
969 dev_err(&intel_private
.pcidev
->dev
,
970 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
971 readl(reg
), intel_private
.PGETBL_save
);
978 static int i830_setup(void)
982 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, ®_addr
);
983 reg_addr
&= 0xfff80000;
985 intel_private
.registers
= ioremap(reg_addr
, KB(64));
986 if (!intel_private
.registers
)
989 intel_private
.gtt_bus_addr
= reg_addr
+ I810_PTE_BASE
;
991 intel_i830_setup_flush();
996 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data
*bridge
)
998 agp_bridge
->gatt_table_real
= NULL
;
999 agp_bridge
->gatt_table
= NULL
;
1000 agp_bridge
->gatt_bus_addr
= 0;
1005 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data
*bridge
)
1010 static int intel_fake_agp_configure(void)
1014 if (!intel_enable_gtt())
1017 agp_bridge
->gart_bus_addr
= intel_private
.gma_bus_addr
;
1019 for (i
= intel_private
.base
.gtt_stolen_entries
;
1020 i
< intel_private
.base
.gtt_total_entries
; i
++) {
1021 intel_private
.driver
->write_entry(intel_private
.scratch_page_dma
,
1024 readl(intel_private
.gtt
+i
-1); /* PCI Posting. */
1026 global_cache_flush();
1031 static bool i830_check_flags(unsigned int flags
)
1035 case AGP_PHYS_MEMORY
:
1036 case AGP_USER_CACHED_MEMORY
:
1037 case AGP_USER_MEMORY
:
1044 static void intel_gtt_insert_sg_entries(struct scatterlist
*sg_list
,
1045 unsigned int sg_len
,
1046 unsigned int pg_start
,
1049 struct scatterlist
*sg
;
1050 unsigned int len
, m
;
1055 /* sg may merge pages, but we have to separate
1056 * per-page addr for GTT */
1057 for_each_sg(sg_list
, sg
, sg_len
, i
) {
1058 len
= sg_dma_len(sg
) >> PAGE_SHIFT
;
1059 for (m
= 0; m
< len
; m
++) {
1060 dma_addr_t addr
= sg_dma_address(sg
) + (m
<< PAGE_SHIFT
);
1061 intel_private
.driver
->write_entry(addr
,
1066 readl(intel_private
.gtt
+j
-1);
1069 static int intel_fake_agp_insert_entries(struct agp_memory
*mem
,
1070 off_t pg_start
, int type
)
1075 if (mem
->page_count
== 0)
1078 if (pg_start
< intel_private
.base
.gtt_stolen_entries
) {
1079 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
1080 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1081 pg_start
, intel_private
.base
.gtt_stolen_entries
);
1083 dev_info(&intel_private
.pcidev
->dev
,
1084 "trying to insert into local/stolen memory\n");
1088 if ((pg_start
+ mem
->page_count
) > intel_private
.base
.gtt_total_entries
)
1091 if (type
!= mem
->type
)
1094 if (!intel_private
.driver
->check_flags(type
))
1097 if (!mem
->is_flushed
)
1098 global_cache_flush();
1100 if (USE_PCI_DMA_API
&& INTEL_GTT_GEN
> 2) {
1101 ret
= intel_agp_map_memory(mem
);
1105 intel_gtt_insert_sg_entries(mem
->sg_list
, mem
->num_sg
,
1108 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
1109 dma_addr_t addr
= page_to_phys(mem
->pages
[i
]);
1110 intel_private
.driver
->write_entry(addr
,
1113 readl(intel_private
.gtt
+j
-1);
1119 mem
->is_flushed
= true;
1123 static int intel_fake_agp_remove_entries(struct agp_memory
*mem
,
1124 off_t pg_start
, int type
)
1128 if (mem
->page_count
== 0)
1131 if (pg_start
< intel_private
.base
.gtt_stolen_entries
) {
1132 dev_info(&intel_private
.pcidev
->dev
,
1133 "trying to disable local/stolen memory\n");
1137 if (USE_PCI_DMA_API
&& INTEL_GTT_GEN
> 2)
1138 intel_agp_unmap_memory(mem
);
1140 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1141 intel_private
.driver
->write_entry(intel_private
.scratch_page_dma
,
1144 readl(intel_private
.gtt
+i
-1);
1149 static void intel_fake_agp_chipset_flush(struct agp_bridge_data
*bridge
)
1151 intel_private
.driver
->chipset_flush();
1154 static struct agp_memory
*intel_fake_agp_alloc_by_type(size_t pg_count
,
1157 if (type
== AGP_PHYS_MEMORY
)
1158 return alloc_agpphysmem_i8xx(pg_count
, type
);
1159 /* always return NULL for other allocation types for now */
1163 static int intel_alloc_chipset_flush_resource(void)
1166 ret
= pci_bus_alloc_resource(intel_private
.bridge_dev
->bus
, &intel_private
.ifp_resource
, PAGE_SIZE
,
1167 PAGE_SIZE
, PCIBIOS_MIN_MEM
, 0,
1168 pcibios_align_resource
, intel_private
.bridge_dev
);
1173 static void intel_i915_setup_chipset_flush(void)
1178 pci_read_config_dword(intel_private
.bridge_dev
, I915_IFPADDR
, &temp
);
1179 if (!(temp
& 0x1)) {
1180 intel_alloc_chipset_flush_resource();
1181 intel_private
.resource_valid
= 1;
1182 pci_write_config_dword(intel_private
.bridge_dev
, I915_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1186 intel_private
.resource_valid
= 1;
1187 intel_private
.ifp_resource
.start
= temp
;
1188 intel_private
.ifp_resource
.end
= temp
+ PAGE_SIZE
;
1189 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1190 /* some BIOSes reserve this area in a pnp some don't */
1192 intel_private
.resource_valid
= 0;
1196 static void intel_i965_g33_setup_chipset_flush(void)
1198 u32 temp_hi
, temp_lo
;
1201 pci_read_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
+ 4, &temp_hi
);
1202 pci_read_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
, &temp_lo
);
1204 if (!(temp_lo
& 0x1)) {
1206 intel_alloc_chipset_flush_resource();
1208 intel_private
.resource_valid
= 1;
1209 pci_write_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
+ 4,
1210 upper_32_bits(intel_private
.ifp_resource
.start
));
1211 pci_write_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1216 l64
= ((u64
)temp_hi
<< 32) | temp_lo
;
1218 intel_private
.resource_valid
= 1;
1219 intel_private
.ifp_resource
.start
= l64
;
1220 intel_private
.ifp_resource
.end
= l64
+ PAGE_SIZE
;
1221 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1222 /* some BIOSes reserve this area in a pnp some don't */
1224 intel_private
.resource_valid
= 0;
1228 static void intel_i9xx_setup_flush(void)
1230 /* return if already configured */
1231 if (intel_private
.ifp_resource
.start
)
1234 if (INTEL_GTT_GEN
== 6)
1237 /* setup a resource for this object */
1238 intel_private
.ifp_resource
.name
= "Intel Flush Page";
1239 intel_private
.ifp_resource
.flags
= IORESOURCE_MEM
;
1241 /* Setup chipset flush for 915 */
1242 if (IS_G33
|| INTEL_GTT_GEN
>= 4) {
1243 intel_i965_g33_setup_chipset_flush();
1245 intel_i915_setup_chipset_flush();
1248 if (intel_private
.ifp_resource
.start
)
1249 intel_private
.i9xx_flush_page
= ioremap_nocache(intel_private
.ifp_resource
.start
, PAGE_SIZE
);
1250 if (!intel_private
.i9xx_flush_page
)
1251 dev_err(&intel_private
.pcidev
->dev
,
1252 "can't ioremap flush page - no chipset flushing\n");
1255 static void i9xx_cleanup(void)
1257 if (intel_private
.i9xx_flush_page
)
1258 iounmap(intel_private
.i9xx_flush_page
);
1259 if (intel_private
.resource_valid
)
1260 release_resource(&intel_private
.ifp_resource
);
1261 intel_private
.ifp_resource
.start
= 0;
1262 intel_private
.resource_valid
= 0;
1265 static void i9xx_chipset_flush(void)
1267 if (intel_private
.i9xx_flush_page
)
1268 writel(1, intel_private
.i9xx_flush_page
);
1271 static void i965_write_entry(dma_addr_t addr
, unsigned int entry
,
1274 /* Shift high bits down */
1275 addr
|= (addr
>> 28) & 0xf0;
1276 writel(addr
| I810_PTE_VALID
, intel_private
.gtt
+ entry
);
1279 static bool gen6_check_flags(unsigned int flags
)
1284 static void gen6_write_entry(dma_addr_t addr
, unsigned int entry
,
1287 unsigned int type_mask
= flags
& ~AGP_USER_CACHED_MEMORY_GFDT
;
1288 unsigned int gfdt
= flags
& AGP_USER_CACHED_MEMORY_GFDT
;
1291 if (type_mask
== AGP_USER_MEMORY
)
1292 pte_flags
= GEN6_PTE_UNCACHED
| I810_PTE_VALID
;
1293 else if (type_mask
== AGP_USER_CACHED_MEMORY_LLC_MLC
) {
1294 pte_flags
= GEN6_PTE_LLC_MLC
| I810_PTE_VALID
;
1296 pte_flags
|= GEN6_PTE_GFDT
;
1297 } else { /* set 'normal'/'cached' to LLC by default */
1298 pte_flags
= GEN6_PTE_LLC
| I810_PTE_VALID
;
1300 pte_flags
|= GEN6_PTE_GFDT
;
1303 /* gen6 has bit11-4 for physical addr bit39-32 */
1304 addr
|= (addr
>> 28) & 0xff0;
1305 writel(addr
| pte_flags
, intel_private
.gtt
+ entry
);
1308 static void gen6_cleanup(void)
1312 static int i9xx_setup(void)
1316 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, ®_addr
);
1318 reg_addr
&= 0xfff80000;
1320 intel_private
.registers
= ioremap(reg_addr
, 128 * 4096);
1321 if (!intel_private
.registers
)
1324 if (INTEL_GTT_GEN
== 3) {
1327 pci_read_config_dword(intel_private
.pcidev
,
1328 I915_PTEADDR
, >t_addr
);
1329 intel_private
.gtt_bus_addr
= gtt_addr
;
1333 switch (INTEL_GTT_GEN
) {
1340 gtt_offset
= KB(512);
1343 intel_private
.gtt_bus_addr
= reg_addr
+ gtt_offset
;
1346 intel_i9xx_setup_flush();
1351 static const struct agp_bridge_driver intel_810_driver
= {
1352 .owner
= THIS_MODULE
,
1353 .aperture_sizes
= intel_i810_sizes
,
1354 .size_type
= FIXED_APER_SIZE
,
1355 .num_aperture_sizes
= 2,
1356 .needs_scratch_page
= true,
1357 .configure
= intel_i810_configure
,
1358 .fetch_size
= intel_i810_fetch_size
,
1359 .cleanup
= intel_i810_cleanup
,
1360 .mask_memory
= intel_i810_mask_memory
,
1361 .masks
= intel_i810_masks
,
1362 .agp_enable
= intel_fake_agp_enable
,
1363 .cache_flush
= global_cache_flush
,
1364 .create_gatt_table
= agp_generic_create_gatt_table
,
1365 .free_gatt_table
= agp_generic_free_gatt_table
,
1366 .insert_memory
= intel_i810_insert_entries
,
1367 .remove_memory
= intel_i810_remove_entries
,
1368 .alloc_by_type
= intel_i810_alloc_by_type
,
1369 .free_by_type
= intel_i810_free_by_type
,
1370 .agp_alloc_page
= agp_generic_alloc_page
,
1371 .agp_alloc_pages
= agp_generic_alloc_pages
,
1372 .agp_destroy_page
= agp_generic_destroy_page
,
1373 .agp_destroy_pages
= agp_generic_destroy_pages
,
1374 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
1377 static const struct agp_bridge_driver intel_fake_agp_driver
= {
1378 .owner
= THIS_MODULE
,
1379 .size_type
= FIXED_APER_SIZE
,
1380 .aperture_sizes
= intel_fake_agp_sizes
,
1381 .num_aperture_sizes
= ARRAY_SIZE(intel_fake_agp_sizes
),
1382 .configure
= intel_fake_agp_configure
,
1383 .fetch_size
= intel_fake_agp_fetch_size
,
1384 .cleanup
= intel_gtt_cleanup
,
1385 .agp_enable
= intel_fake_agp_enable
,
1386 .cache_flush
= global_cache_flush
,
1387 .create_gatt_table
= intel_fake_agp_create_gatt_table
,
1388 .free_gatt_table
= intel_fake_agp_free_gatt_table
,
1389 .insert_memory
= intel_fake_agp_insert_entries
,
1390 .remove_memory
= intel_fake_agp_remove_entries
,
1391 .alloc_by_type
= intel_fake_agp_alloc_by_type
,
1392 .free_by_type
= intel_i810_free_by_type
,
1393 .agp_alloc_page
= agp_generic_alloc_page
,
1394 .agp_alloc_pages
= agp_generic_alloc_pages
,
1395 .agp_destroy_page
= agp_generic_destroy_page
,
1396 .agp_destroy_pages
= agp_generic_destroy_pages
,
1397 .chipset_flush
= intel_fake_agp_chipset_flush
,
1400 static const struct intel_gtt_driver i81x_gtt_driver
= {
1402 .dma_mask_size
= 32,
1404 static const struct intel_gtt_driver i8xx_gtt_driver
= {
1406 .has_pgtbl_enable
= 1,
1407 .setup
= i830_setup
,
1408 .cleanup
= i830_cleanup
,
1409 .write_entry
= i830_write_entry
,
1410 .dma_mask_size
= 32,
1411 .check_flags
= i830_check_flags
,
1412 .chipset_flush
= i830_chipset_flush
,
1414 static const struct intel_gtt_driver i915_gtt_driver
= {
1416 .has_pgtbl_enable
= 1,
1417 .setup
= i9xx_setup
,
1418 .cleanup
= i9xx_cleanup
,
1419 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1420 .write_entry
= i830_write_entry
,
1421 .dma_mask_size
= 32,
1422 .check_flags
= i830_check_flags
,
1423 .chipset_flush
= i9xx_chipset_flush
,
1425 static const struct intel_gtt_driver g33_gtt_driver
= {
1428 .setup
= i9xx_setup
,
1429 .cleanup
= i9xx_cleanup
,
1430 .write_entry
= i965_write_entry
,
1431 .dma_mask_size
= 36,
1432 .check_flags
= i830_check_flags
,
1433 .chipset_flush
= i9xx_chipset_flush
,
1435 static const struct intel_gtt_driver pineview_gtt_driver
= {
1437 .is_pineview
= 1, .is_g33
= 1,
1438 .setup
= i9xx_setup
,
1439 .cleanup
= i9xx_cleanup
,
1440 .write_entry
= i965_write_entry
,
1441 .dma_mask_size
= 36,
1442 .check_flags
= i830_check_flags
,
1443 .chipset_flush
= i9xx_chipset_flush
,
1445 static const struct intel_gtt_driver i965_gtt_driver
= {
1447 .has_pgtbl_enable
= 1,
1448 .setup
= i9xx_setup
,
1449 .cleanup
= i9xx_cleanup
,
1450 .write_entry
= i965_write_entry
,
1451 .dma_mask_size
= 36,
1452 .check_flags
= i830_check_flags
,
1453 .chipset_flush
= i9xx_chipset_flush
,
1455 static const struct intel_gtt_driver g4x_gtt_driver
= {
1457 .setup
= i9xx_setup
,
1458 .cleanup
= i9xx_cleanup
,
1459 .write_entry
= i965_write_entry
,
1460 .dma_mask_size
= 36,
1461 .check_flags
= i830_check_flags
,
1462 .chipset_flush
= i9xx_chipset_flush
,
1464 static const struct intel_gtt_driver ironlake_gtt_driver
= {
1467 .setup
= i9xx_setup
,
1468 .cleanup
= i9xx_cleanup
,
1469 .write_entry
= i965_write_entry
,
1470 .dma_mask_size
= 36,
1471 .check_flags
= i830_check_flags
,
1472 .chipset_flush
= i9xx_chipset_flush
,
1474 static const struct intel_gtt_driver sandybridge_gtt_driver
= {
1476 .setup
= i9xx_setup
,
1477 .cleanup
= gen6_cleanup
,
1478 .write_entry
= gen6_write_entry
,
1479 .dma_mask_size
= 40,
1480 .check_flags
= gen6_check_flags
,
1481 .chipset_flush
= i9xx_chipset_flush
,
1484 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1485 * driver and gmch_driver must be non-null, and find_gmch will determine
1486 * which one should be used if a gmch_chip_id is present.
1488 static const struct intel_gtt_driver_description
{
1489 unsigned int gmch_chip_id
;
1491 const struct agp_bridge_driver
*gmch_driver
;
1492 const struct intel_gtt_driver
*gtt_driver
;
1493 } intel_gtt_chipsets
[] = {
1494 { PCI_DEVICE_ID_INTEL_82810_IG1
, "i810", &intel_810_driver
,
1496 { PCI_DEVICE_ID_INTEL_82810_IG3
, "i810", &intel_810_driver
,
1498 { PCI_DEVICE_ID_INTEL_82810E_IG
, "i810", &intel_810_driver
,
1500 { PCI_DEVICE_ID_INTEL_82815_CGC
, "i815", &intel_810_driver
,
1502 { PCI_DEVICE_ID_INTEL_82830_CGC
, "830M",
1503 &intel_fake_agp_driver
, &i8xx_gtt_driver
},
1504 { PCI_DEVICE_ID_INTEL_82845G_IG
, "830M",
1505 &intel_fake_agp_driver
, &i8xx_gtt_driver
},
1506 { PCI_DEVICE_ID_INTEL_82854_IG
, "854",
1507 &intel_fake_agp_driver
, &i8xx_gtt_driver
},
1508 { PCI_DEVICE_ID_INTEL_82855GM_IG
, "855GM",
1509 &intel_fake_agp_driver
, &i8xx_gtt_driver
},
1510 { PCI_DEVICE_ID_INTEL_82865_IG
, "865",
1511 &intel_fake_agp_driver
, &i8xx_gtt_driver
},
1512 { PCI_DEVICE_ID_INTEL_E7221_IG
, "E7221 (i915)",
1513 &intel_fake_agp_driver
, &i915_gtt_driver
},
1514 { PCI_DEVICE_ID_INTEL_82915G_IG
, "915G",
1515 &intel_fake_agp_driver
, &i915_gtt_driver
},
1516 { PCI_DEVICE_ID_INTEL_82915GM_IG
, "915GM",
1517 &intel_fake_agp_driver
, &i915_gtt_driver
},
1518 { PCI_DEVICE_ID_INTEL_82945G_IG
, "945G",
1519 &intel_fake_agp_driver
, &i915_gtt_driver
},
1520 { PCI_DEVICE_ID_INTEL_82945GM_IG
, "945GM",
1521 &intel_fake_agp_driver
, &i915_gtt_driver
},
1522 { PCI_DEVICE_ID_INTEL_82945GME_IG
, "945GME",
1523 &intel_fake_agp_driver
, &i915_gtt_driver
},
1524 { PCI_DEVICE_ID_INTEL_82946GZ_IG
, "946GZ",
1525 &intel_fake_agp_driver
, &i965_gtt_driver
},
1526 { PCI_DEVICE_ID_INTEL_82G35_IG
, "G35",
1527 &intel_fake_agp_driver
, &i965_gtt_driver
},
1528 { PCI_DEVICE_ID_INTEL_82965Q_IG
, "965Q",
1529 &intel_fake_agp_driver
, &i965_gtt_driver
},
1530 { PCI_DEVICE_ID_INTEL_82965G_IG
, "965G",
1531 &intel_fake_agp_driver
, &i965_gtt_driver
},
1532 { PCI_DEVICE_ID_INTEL_82965GM_IG
, "965GM",
1533 &intel_fake_agp_driver
, &i965_gtt_driver
},
1534 { PCI_DEVICE_ID_INTEL_82965GME_IG
, "965GME/GLE",
1535 &intel_fake_agp_driver
, &i965_gtt_driver
},
1536 { PCI_DEVICE_ID_INTEL_G33_IG
, "G33",
1537 &intel_fake_agp_driver
, &g33_gtt_driver
},
1538 { PCI_DEVICE_ID_INTEL_Q35_IG
, "Q35",
1539 &intel_fake_agp_driver
, &g33_gtt_driver
},
1540 { PCI_DEVICE_ID_INTEL_Q33_IG
, "Q33",
1541 &intel_fake_agp_driver
, &g33_gtt_driver
},
1542 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG
, "GMA3150",
1543 &intel_fake_agp_driver
, &pineview_gtt_driver
},
1544 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG
, "GMA3150",
1545 &intel_fake_agp_driver
, &pineview_gtt_driver
},
1546 { PCI_DEVICE_ID_INTEL_GM45_IG
, "GM45",
1547 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1548 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG
, "Eaglelake",
1549 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1550 { PCI_DEVICE_ID_INTEL_Q45_IG
, "Q45/Q43",
1551 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1552 { PCI_DEVICE_ID_INTEL_G45_IG
, "G45/G43",
1553 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1554 { PCI_DEVICE_ID_INTEL_B43_IG
, "B43",
1555 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1556 { PCI_DEVICE_ID_INTEL_B43_1_IG
, "B43",
1557 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1558 { PCI_DEVICE_ID_INTEL_G41_IG
, "G41",
1559 &intel_fake_agp_driver
, &g4x_gtt_driver
},
1560 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG
,
1561 "HD Graphics", &intel_fake_agp_driver
, &ironlake_gtt_driver
},
1562 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG
,
1563 "HD Graphics", &intel_fake_agp_driver
, &ironlake_gtt_driver
},
1564 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG
,
1565 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1566 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG
,
1567 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1568 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG
,
1569 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1570 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG
,
1571 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1572 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG
,
1573 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1574 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG
,
1575 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1576 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG
,
1577 "Sandybridge", &intel_fake_agp_driver
, &sandybridge_gtt_driver
},
1581 static int find_gmch(u16 device
)
1583 struct pci_dev
*gmch_device
;
1585 gmch_device
= pci_get_device(PCI_VENDOR_ID_INTEL
, device
, NULL
);
1586 if (gmch_device
&& PCI_FUNC(gmch_device
->devfn
) != 0) {
1587 gmch_device
= pci_get_device(PCI_VENDOR_ID_INTEL
,
1588 device
, gmch_device
);
1594 intel_private
.pcidev
= gmch_device
;
1598 int intel_gmch_probe(struct pci_dev
*pdev
,
1599 struct agp_bridge_data
*bridge
)
1602 bridge
->driver
= NULL
;
1604 for (i
= 0; intel_gtt_chipsets
[i
].name
!= NULL
; i
++) {
1605 if (find_gmch(intel_gtt_chipsets
[i
].gmch_chip_id
)) {
1607 intel_gtt_chipsets
[i
].gmch_driver
;
1608 intel_private
.driver
=
1609 intel_gtt_chipsets
[i
].gtt_driver
;
1614 if (!bridge
->driver
)
1617 bridge
->dev_private_data
= &intel_private
;
1620 intel_private
.bridge_dev
= pci_dev_get(pdev
);
1622 dev_info(&pdev
->dev
, "Intel %s Chipset\n", intel_gtt_chipsets
[i
].name
);
1624 mask
= intel_private
.driver
->dma_mask_size
;
1625 if (pci_set_dma_mask(intel_private
.pcidev
, DMA_BIT_MASK(mask
)))
1626 dev_err(&intel_private
.pcidev
->dev
,
1627 "set gfx device dma mask %d-bit failed!\n", mask
);
1629 pci_set_consistent_dma_mask(intel_private
.pcidev
,
1630 DMA_BIT_MASK(mask
));
1632 if (bridge
->driver
== &intel_810_driver
)
1635 if (intel_gtt_init() != 0)
1640 EXPORT_SYMBOL(intel_gmch_probe
);
1642 struct intel_gtt
*intel_gtt_get(void)
1644 return &intel_private
.base
;
1646 EXPORT_SYMBOL(intel_gtt_get
);
1648 void intel_gmch_remove(struct pci_dev
*pdev
)
1650 if (intel_private
.pcidev
)
1651 pci_dev_put(intel_private
.pcidev
);
1652 if (intel_private
.bridge_dev
)
1653 pci_dev_put(intel_private
.bridge_dev
);
1655 EXPORT_SYMBOL(intel_gmch_remove
);
1657 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1658 MODULE_LICENSE("GPL and additional rights");