]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x64, x2apic/intr-remap: fix the need for sequential array allocation of iommus
authorSuresh Siddha <suresh.b.siddha@intel.com>
Thu, 10 Jul 2008 18:16:36 +0000 (11:16 -0700)
committerIngo Molnar <mingo@elte.hu>
Sat, 12 Jul 2008 06:44:47 +0000 (08:44 +0200)
Clean up the intel-iommu code related to deferred iommu flush logic. There is
no need to allocate all the iommu's as a sequential array.

This will be used later in the interrupt-remapping patch series to
allocate iommu much early and individually for each device remapping
hardware unit.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
drivers/pci/dmar.c
drivers/pci/intel-iommu.c
drivers/pci/intel-iommu.h

index c00e387f5b7522d029ded027d97595dc554a295e..1a59423a8edaf75adda02026232cba745648547c 100644 (file)
@@ -377,11 +377,18 @@ int __init early_dmar_detect(void)
        return (ACPI_SUCCESS(status) ? 1 : 0);
 }
 
-struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
-                               struct dmar_drhd_unit *drhd)
+struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
 {
+       struct intel_iommu *iommu;
        int map_size;
        u32 ver;
+       static int iommu_allocated = 0;
+
+       iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+       if (!iommu)
+               return NULL;
+
+       iommu->seq_id = iommu_allocated++;
 
        iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
        if (!iommu->reg) {
index 1c0270d3e2e55c9d232841164b91bf132a54ae4c..4d59a6a1f4dd4a977724449f4b13276cf59b8f22 100644 (file)
@@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigned long data);
 
 DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
 
-static struct intel_iommu *g_iommus;
-
 #define HIGH_WATER_MARK 250
 struct deferred_flush_tables {
        int next;
@@ -1649,8 +1647,6 @@ int __init init_dmars(void)
         * endfor
         */
        for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
                g_num_of_iommus++;
                /*
                 * lock not needed as this is only incremented in the single
@@ -1659,26 +1655,17 @@ int __init init_dmars(void)
                 */
        }
 
-       g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
-       if (!g_iommus) {
-               ret = -ENOMEM;
-               goto error;
-       }
-
        deferred_flush = kzalloc(g_num_of_iommus *
                sizeof(struct deferred_flush_tables), GFP_KERNEL);
        if (!deferred_flush) {
-               kfree(g_iommus);
                ret = -ENOMEM;
                goto error;
        }
 
-       i = 0;
        for_each_drhd_unit(drhd) {
                if (drhd->ignored)
                        continue;
-               iommu = alloc_iommu(&g_iommus[i], drhd);
-               i++;
+               iommu = alloc_iommu(drhd);
                if (!iommu) {
                        ret = -ENOMEM;
                        goto error;
@@ -1770,7 +1757,6 @@ error:
                iommu = drhd->iommu;
                free_iommu(iommu);
        }
-       kfree(g_iommus);
        return ret;
 }
 
@@ -1927,7 +1913,10 @@ static void flush_unmaps(void)
        /* just flush them all */
        for (i = 0; i < g_num_of_iommus; i++) {
                if (deferred_flush[i].next) {
-                       iommu_flush_iotlb_global(&g_iommus[i], 0);
+                       struct intel_iommu *iommu =
+                               deferred_flush[i].domain[0]->iommu;
+
+                       iommu_flush_iotlb_global(iommu, 0);
                        for (j = 0; j < deferred_flush[i].next; j++) {
                                __free_iova(&deferred_flush[i].domain[j]->iovad,
                                                deferred_flush[i].iova[j]);
@@ -1957,7 +1946,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
        if (list_size == HIGH_WATER_MARK)
                flush_unmaps();
 
-       iommu_id = dom->iommu - g_iommus;
+       iommu_id = dom->iommu->seq_id;
+
        next = deferred_flush[iommu_id].next;
        deferred_flush[iommu_id].domain[next] = dom;
        deferred_flush[iommu_id].iova[next] = iova;
index 9e5e98c76c05f9f7e998f8c437a29641ca1c8e50..75c63f65b3f5b315e491e0e7e568434d4891536d 100644 (file)
@@ -182,6 +182,7 @@ struct intel_iommu {
        int             seg;
        u32             gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
        spinlock_t      register_lock; /* protect register handling */
+       int             seq_id; /* sequence id of the iommu */
 
 #ifdef CONFIG_DMAR
        unsigned long   *domain_ids; /* bitmap of domains */
@@ -198,8 +199,7 @@ struct intel_iommu {
 
 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
 
-extern struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
-                                      struct dmar_drhd_unit *drhd);
+extern struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd);
 extern void free_iommu(struct intel_iommu *iommu);
 
 #endif