]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/iommu/arm-smmu-v3.c
iommu/arm-smmu-v3: Specialise CMD_SYNC handling
[mirror_ubuntu-bionic-kernel.git] / drivers / iommu / arm-smmu-v3.c
index e67ba6c40faff07aaf0468f898b7ff6f99d94301..18a0fa7dd72d37014c9c50833916bc065b2d1a00 100644 (file)
 #define ARM64_TCR_TBI0_MASK            0x1UL
 
 #define CTXDESC_CD_0_AA64              (1UL << 41)
+#define CTXDESC_CD_0_S                 (1UL << 44)
 #define CTXDESC_CD_0_R                 (1UL << 45)
 #define CTXDESC_CD_0_A                 (1UL << 46)
 #define CTXDESC_CD_0_ASET_SHIFT                47
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
 
-/* Until ACPICA headers cover IORT rev. C */
-#ifndef ACPI_IORT_SMMU_HISILICON_HI161X
-#define ACPI_IORT_SMMU_HISILICON_HI161X                0x1
-#endif
-
-#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
-#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX                0x2
-#endif
-
 static bool disable_bypass;
 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
@@ -604,6 +596,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_TRANS_S2         (1 << 10)
 #define ARM_SMMU_FEAT_STALLS           (1 << 11)
 #define ARM_SMMU_FEAT_HYP              (1 << 12)
+#define ARM_SMMU_FEAT_STALL_FORCE      (1 << 13)
        u32                             features;
 
 #define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
@@ -936,13 +929,22 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
        queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
 }
 
+static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
+{
+       struct arm_smmu_queue *q = &smmu->cmdq.q;
+       bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+
+       while (queue_insert_raw(q, cmd) == -ENOSPC) {
+               if (queue_poll_cons(q, false, wfe))
+                       dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+       }
+}
+
 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
                                    struct arm_smmu_cmdq_ent *ent)
 {
        u64 cmd[CMDQ_ENT_DWORDS];
        unsigned long flags;
-       bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
-       struct arm_smmu_queue *q = &smmu->cmdq.q;
 
        if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
                dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
@@ -951,14 +953,27 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
        }
 
        spin_lock_irqsave(&smmu->cmdq.lock, flags);
-       while (queue_insert_raw(q, cmd) == -ENOSPC) {
-               if (queue_poll_cons(q, false, wfe))
-                       dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
-       }
+       arm_smmu_cmdq_insert_cmd(smmu, cmd);
+       spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+}
 
-       if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
-               dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
+static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+       u64 cmd[CMDQ_ENT_DWORDS];
+       unsigned long flags;
+       bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+       struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
+       int ret;
+
+       arm_smmu_cmdq_build_cmd(cmd, &ent);
+
+       spin_lock_irqsave(&smmu->cmdq.lock, flags);
+       arm_smmu_cmdq_insert_cmd(smmu, cmd);
+       ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
        spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+
+       if (ret)
+               dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
 }
 
 /* Context descriptor manipulation functions */
@@ -996,6 +1011,11 @@ static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
              CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
              CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
              CTXDESC_CD_0_V;
+
+       /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+       if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+               val |= CTXDESC_CD_0_S;
+
        cfg->cdptr[0] = cpu_to_le64(val);
 
        val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
@@ -1029,8 +1049,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
        };
 
        arm_smmu_cmdq_issue_cmd(smmu, &cmd);
-       cmd.opcode = CMDQ_OP_CMD_SYNC;
-       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+       arm_smmu_cmdq_issue_sync(smmu);
 }
 
 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
@@ -1094,7 +1113,11 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
                         << STRTAB_STE_1_SHCFG_SHIFT);
                dst[2] = 0; /* Nuke the VMID */
-               if (ste_live)
+               /*
+                * The SMMU can perform negative caching, so we must sync
+                * the STE regardless of whether the old value was live.
+                */
+               if (smmu)
                        arm_smmu_sync_ste_for_sid(smmu, sid);
                return;
        }
@@ -1112,7 +1135,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
 #endif
                         STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
 
-               if (smmu->features & ARM_SMMU_FEAT_STALLS)
+               if (smmu->features & ARM_SMMU_FEAT_STALLS &&
+                  !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
                        dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
 
                val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
@@ -1352,10 +1376,7 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
 /* IO_PGTABLE API */
 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
 {
-       struct arm_smmu_cmdq_ent cmd;
-
-       cmd.opcode = CMDQ_OP_CMD_SYNC;
-       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+       arm_smmu_cmdq_issue_sync(smmu);
 }
 
 static void arm_smmu_tlb_sync(void *cookie)
@@ -2399,8 +2420,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
        /* Invalidate any cached configuration */
        cmd.opcode = CMDQ_OP_CFGI_ALL;
        arm_smmu_cmdq_issue_cmd(smmu, &cmd);
-       cmd.opcode = CMDQ_OP_CMD_SYNC;
-       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+       arm_smmu_cmdq_issue_sync(smmu);
 
        /* Invalidate any stale TLB entries */
        if (smmu->features & ARM_SMMU_FEAT_HYP) {
@@ -2410,8 +2430,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
 
        cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
        arm_smmu_cmdq_issue_cmd(smmu, &cmd);
-       cmd.opcode = CMDQ_OP_CMD_SYNC;
-       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+       arm_smmu_cmdq_issue_sync(smmu);
 
        /* Event queue */
        writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
@@ -2532,13 +2551,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
         * register, but warn on mismatch.
         */
        if (!!(reg & IDR0_COHACC) != coherent)
-               dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
+               dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
                         coherent ? "true" : "false");
 
        switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
-       case IDR0_STALL_MODEL_STALL:
-               /* Fallthrough */
        case IDR0_STALL_MODEL_FORCE:
+               smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
+               /* Fallthrough */
+       case IDR0_STALL_MODEL_STALL:
                smmu->features |= ARM_SMMU_FEAT_STALLS;
        }
 
@@ -2665,7 +2685,7 @@ static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
        case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
                smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
                break;
-       case ACPI_IORT_SMMU_HISILICON_HI161X:
+       case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
                smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
                break;
        }