···183183{184184 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);185185 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;186186- size_t size = end - start + 1;186186+ size_t size;187187+188188+ /*189189+ * The mm_types defines vm_end as the first byte after the end address,190190+ * different from IOMMU subsystem using the last address of an address191191+ * range. So do a simple translation here by calculating size correctly.192192+ */193193+ size = end - start;187194188195 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))189196 arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
+30
drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
···258258 dev_name(dev), err);259259}260260261261+static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,262262+ struct io_pgtable_cfg *pgtbl_cfg,263263+ struct device *dev)264264+{265265+ struct arm_smmu_device *smmu = smmu_domain->smmu;266266+ const struct device_node *np = smmu->dev->of_node;267267+268268+ /*269269+ * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache270270+ * entries to not be invalidated correctly. The problem is that the walk271271+ * cache index generated for IOVA is not same across translation and272272+ * invalidation requests. This is leading to page faults when PMD entry273273+ * is released during unmap and populated with new PTE table during274274+ * subsequent map request. Disabling large page mappings avoids the275275+ * release of PMD entry and avoid translations seeing stale PMD entry in276276+ * walk cache.277277+ * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and278278+ * Tegra234.279279+ */280280+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||281281+ of_device_is_compatible(np, "nvidia,tegra194-smmu")) {282282+ smmu->pgsize_bitmap = PAGE_SIZE;283283+ pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;284284+ }285285+286286+ return 0;287287+}288288+261289static const struct arm_smmu_impl nvidia_smmu_impl = {262290 .read_reg = nvidia_smmu_read_reg,263291 .write_reg = nvidia_smmu_write_reg,···296268 .global_fault = nvidia_smmu_global_fault,297269 .context_fault = nvidia_smmu_context_fault,298270 .probe_finalize = nvidia_smmu_probe_finalize,271271+ .init_context = nvidia_smmu_init_context,299272};300273301274static const struct arm_smmu_impl nvidia_smmu_single_impl = {302275 .probe_finalize = nvidia_smmu_probe_finalize,276276+ .init_context = nvidia_smmu_init_context,303277};304278305279struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)