Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.2 2701 lines 70 kB view raw
1/* 2 * IOMMU API for ARM architected SMMUv3 implementations. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * 16 * Copyright (C) 2015 ARM Limited 17 * 18 * Author: Will Deacon <will.deacon@arm.com> 19 * 20 * This driver is powered by bad coffee and bombay mix. 21 */ 22 23#include <linux/delay.h> 24#include <linux/err.h> 25#include <linux/interrupt.h> 26#include <linux/iommu.h> 27#include <linux/iopoll.h> 28#include <linux/module.h> 29#include <linux/of.h> 30#include <linux/of_address.h> 31#include <linux/pci.h> 32#include <linux/platform_device.h> 33 34#include "io-pgtable.h" 35 36/* MMIO registers */ 37#define ARM_SMMU_IDR0 0x0 38#define IDR0_ST_LVL_SHIFT 27 39#define IDR0_ST_LVL_MASK 0x3 40#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT) 41#define IDR0_STALL_MODEL (3 << 24) 42#define IDR0_TTENDIAN_SHIFT 21 43#define IDR0_TTENDIAN_MASK 0x3 44#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT) 45#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT) 46#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT) 47#define IDR0_CD2L (1 << 19) 48#define IDR0_VMID16 (1 << 18) 49#define IDR0_PRI (1 << 16) 50#define IDR0_SEV (1 << 14) 51#define IDR0_MSI (1 << 13) 52#define IDR0_ASID16 (1 << 12) 53#define IDR0_ATS (1 << 10) 54#define IDR0_HYP (1 << 9) 55#define IDR0_COHACC (1 << 4) 56#define IDR0_TTF_SHIFT 2 57#define IDR0_TTF_MASK 0x3 58#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) 59#define IDR0_S1P (1 << 1) 60#define IDR0_S2P (1 << 0) 61 62#define ARM_SMMU_IDR1 0x4 63#define IDR1_TABLES_PRESET (1 << 30) 64#define IDR1_QUEUES_PRESET (1 << 29) 65#define IDR1_REL (1 << 28) 66#define IDR1_CMDQ_SHIFT 21 67#define IDR1_CMDQ_MASK 0x1f 68#define IDR1_EVTQ_SHIFT 16 69#define IDR1_EVTQ_MASK 0x1f 70#define IDR1_PRIQ_SHIFT 11 71#define IDR1_PRIQ_MASK 0x1f 72#define IDR1_SSID_SHIFT 6 73#define IDR1_SSID_MASK 0x1f 74#define IDR1_SID_SHIFT 0 75#define IDR1_SID_MASK 0x3f 76 77#define ARM_SMMU_IDR5 0x14 78#define IDR5_STALL_MAX_SHIFT 16 79#define IDR5_STALL_MAX_MASK 0xffff 80#define IDR5_GRAN64K (1 << 6) 81#define IDR5_GRAN16K (1 << 5) 82#define IDR5_GRAN4K (1 << 4) 83#define IDR5_OAS_SHIFT 0 84#define IDR5_OAS_MASK 0x7 85#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT) 86#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT) 87#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT) 88#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT) 89#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT) 90#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT) 91 92#define ARM_SMMU_CR0 0x20 93#define CR0_CMDQEN (1 << 3) 94#define CR0_EVTQEN (1 << 2) 95#define CR0_PRIQEN (1 << 1) 96#define CR0_SMMUEN (1 << 0) 97 98#define ARM_SMMU_CR0ACK 0x24 99 100#define ARM_SMMU_CR1 0x28 101#define CR1_SH_NSH 0 102#define CR1_SH_OSH 2 103#define CR1_SH_ISH 3 104#define CR1_CACHE_NC 0 105#define CR1_CACHE_WB 1 106#define CR1_CACHE_WT 2 107#define CR1_TABLE_SH_SHIFT 10 108#define CR1_TABLE_OC_SHIFT 8 109#define CR1_TABLE_IC_SHIFT 6 110#define CR1_QUEUE_SH_SHIFT 4 111#define CR1_QUEUE_OC_SHIFT 2 112#define CR1_QUEUE_IC_SHIFT 0 113 114#define ARM_SMMU_CR2 0x2c 115#define CR2_PTM (1 << 2) 116#define CR2_RECINVSID (1 << 1) 117#define CR2_E2H (1 << 0) 118 119#define ARM_SMMU_IRQ_CTRL 0x50 120#define IRQ_CTRL_EVTQ_IRQEN (1 << 2) 121#define IRQ_CTRL_GERROR_IRQEN (1 << 0) 122 123#define ARM_SMMU_IRQ_CTRLACK 0x54 124 125#define ARM_SMMU_GERROR 0x60 126#define GERROR_SFM_ERR (1 << 8) 127#define GERROR_MSI_GERROR_ABT_ERR (1 << 7) 128#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6) 129#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5) 130#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4) 131#define GERROR_PRIQ_ABT_ERR (1 << 3) 132#define GERROR_EVTQ_ABT_ERR (1 << 2) 133#define GERROR_CMDQ_ERR (1 << 0) 134#define GERROR_ERR_MASK 0xfd 135 136#define ARM_SMMU_GERRORN 0x64 137 138#define ARM_SMMU_GERROR_IRQ_CFG0 0x68 139#define ARM_SMMU_GERROR_IRQ_CFG1 0x70 140#define ARM_SMMU_GERROR_IRQ_CFG2 0x74 141 142#define ARM_SMMU_STRTAB_BASE 0x80 143#define STRTAB_BASE_RA (1UL << 62) 144#define STRTAB_BASE_ADDR_SHIFT 6 145#define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL 146 147#define ARM_SMMU_STRTAB_BASE_CFG 0x88 148#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0 149#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f 150#define STRTAB_BASE_CFG_SPLIT_SHIFT 6 151#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f 152#define STRTAB_BASE_CFG_FMT_SHIFT 16 153#define STRTAB_BASE_CFG_FMT_MASK 0x3 154#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT) 155#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT) 156 157#define ARM_SMMU_CMDQ_BASE 0x90 158#define ARM_SMMU_CMDQ_PROD 0x98 159#define ARM_SMMU_CMDQ_CONS 0x9c 160 161#define ARM_SMMU_EVTQ_BASE 0xa0 162#define ARM_SMMU_EVTQ_PROD 0x100a8 163#define ARM_SMMU_EVTQ_CONS 0x100ac 164#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0 165#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8 166#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc 167 168#define ARM_SMMU_PRIQ_BASE 0xc0 169#define ARM_SMMU_PRIQ_PROD 0x100c8 170#define ARM_SMMU_PRIQ_CONS 0x100cc 171#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0 172#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8 173#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc 174 175/* Common MSI config fields */ 176#define MSI_CFG0_SH_SHIFT 60 177#define MSI_CFG0_SH_NSH (0UL << MSI_CFG0_SH_SHIFT) 178#define MSI_CFG0_SH_OSH (2UL << MSI_CFG0_SH_SHIFT) 179#define MSI_CFG0_SH_ISH (3UL << MSI_CFG0_SH_SHIFT) 180#define MSI_CFG0_MEMATTR_SHIFT 56 181#define MSI_CFG0_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG0_MEMATTR_SHIFT) 182#define MSI_CFG0_ADDR_SHIFT 2 183#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL 184 185#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) 186#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) 187#define Q_OVERFLOW_FLAG (1 << 31) 188#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG) 189#define Q_ENT(q, p) ((q)->base + \ 190 Q_IDX(q, p) * (q)->ent_dwords) 191 192#define Q_BASE_RWA (1UL << 62) 193#define Q_BASE_ADDR_SHIFT 5 194#define Q_BASE_ADDR_MASK 0xfffffffffffUL 195#define Q_BASE_LOG2SIZE_SHIFT 0 196#define Q_BASE_LOG2SIZE_MASK 0x1fUL 197 198/* 199 * Stream table. 200 * 201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries 202 * 2lvl: 128k L1 entries, 203 * 256 lazy entries per table (each table covers a PCI bus) 204 */ 205#define STRTAB_L1_SZ_SHIFT 20 206#define STRTAB_SPLIT 8 207 208#define STRTAB_L1_DESC_DWORDS 1 209#define STRTAB_L1_DESC_SPAN_SHIFT 0 210#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL 211#define STRTAB_L1_DESC_L2PTR_SHIFT 6 212#define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL 213 214#define STRTAB_STE_DWORDS 8 215#define STRTAB_STE_0_V (1UL << 0) 216#define STRTAB_STE_0_CFG_SHIFT 1 217#define STRTAB_STE_0_CFG_MASK 0x7UL 218#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT) 219#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT) 220#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT) 221#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT) 222 223#define STRTAB_STE_0_S1FMT_SHIFT 4 224#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT) 225#define STRTAB_STE_0_S1CTXPTR_SHIFT 6 226#define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL 227#define STRTAB_STE_0_S1CDMAX_SHIFT 59 228#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL 229 230#define STRTAB_STE_1_S1C_CACHE_NC 0UL 231#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL 232#define STRTAB_STE_1_S1C_CACHE_WT 2UL 233#define STRTAB_STE_1_S1C_CACHE_WB 3UL 234#define STRTAB_STE_1_S1C_SH_NSH 0UL 235#define STRTAB_STE_1_S1C_SH_OSH 2UL 236#define STRTAB_STE_1_S1C_SH_ISH 3UL 237#define STRTAB_STE_1_S1CIR_SHIFT 2 238#define STRTAB_STE_1_S1COR_SHIFT 4 239#define STRTAB_STE_1_S1CSH_SHIFT 6 240 241#define STRTAB_STE_1_S1STALLD (1UL << 27) 242 243#define STRTAB_STE_1_EATS_ABT 0UL 244#define STRTAB_STE_1_EATS_TRANS 1UL 245#define STRTAB_STE_1_EATS_S1CHK 2UL 246#define STRTAB_STE_1_EATS_SHIFT 28 247 248#define STRTAB_STE_1_STRW_NSEL1 0UL 249#define STRTAB_STE_1_STRW_EL2 2UL 250#define STRTAB_STE_1_STRW_SHIFT 30 251 252#define STRTAB_STE_2_S2VMID_SHIFT 0 253#define STRTAB_STE_2_S2VMID_MASK 0xffffUL 254#define STRTAB_STE_2_VTCR_SHIFT 32 255#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL 256#define STRTAB_STE_2_S2AA64 (1UL << 51) 257#define STRTAB_STE_2_S2ENDI (1UL << 52) 258#define STRTAB_STE_2_S2PTW (1UL << 54) 259#define STRTAB_STE_2_S2R (1UL << 58) 260 261#define STRTAB_STE_3_S2TTB_SHIFT 4 262#define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL 263 264/* Context descriptor (stage-1 only) */ 265#define CTXDESC_CD_DWORDS 8 266#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0 267#define ARM64_TCR_T0SZ_SHIFT 0 268#define ARM64_TCR_T0SZ_MASK 0x1fUL 269#define CTXDESC_CD_0_TCR_TG0_SHIFT 6 270#define ARM64_TCR_TG0_SHIFT 14 271#define ARM64_TCR_TG0_MASK 0x3UL 272#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 273#define ARM64_TCR_IRGN0_SHIFT 8 274#define ARM64_TCR_IRGN0_MASK 0x3UL 275#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 276#define ARM64_TCR_ORGN0_SHIFT 10 277#define ARM64_TCR_ORGN0_MASK 0x3UL 278#define CTXDESC_CD_0_TCR_SH0_SHIFT 12 279#define ARM64_TCR_SH0_SHIFT 12 280#define ARM64_TCR_SH0_MASK 0x3UL 281#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14 282#define ARM64_TCR_EPD0_SHIFT 7 283#define ARM64_TCR_EPD0_MASK 0x1UL 284#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30 285#define ARM64_TCR_EPD1_SHIFT 23 286#define ARM64_TCR_EPD1_MASK 0x1UL 287 288#define CTXDESC_CD_0_ENDI (1UL << 15) 289#define CTXDESC_CD_0_V (1UL << 31) 290 291#define CTXDESC_CD_0_TCR_IPS_SHIFT 32 292#define ARM64_TCR_IPS_SHIFT 32 293#define ARM64_TCR_IPS_MASK 0x7UL 294#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38 295#define ARM64_TCR_TBI0_SHIFT 37 296#define ARM64_TCR_TBI0_MASK 0x1UL 297 298#define CTXDESC_CD_0_AA64 (1UL << 41) 299#define CTXDESC_CD_0_R (1UL << 45) 300#define CTXDESC_CD_0_A (1UL << 46) 301#define CTXDESC_CD_0_ASET_SHIFT 47 302#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT) 303#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT) 304#define CTXDESC_CD_0_ASID_SHIFT 48 305#define CTXDESC_CD_0_ASID_MASK 0xffffUL 306 307#define CTXDESC_CD_1_TTB0_SHIFT 4 308#define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL 309 310#define CTXDESC_CD_3_MAIR_SHIFT 0 311 312/* Convert between AArch64 (CPU) TCR format and SMMU CD format */ 313#define ARM_SMMU_TCR2CD(tcr, fld) \ 314 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \ 315 << CTXDESC_CD_0_TCR_##fld##_SHIFT) 316 317/* Command queue */ 318#define CMDQ_ENT_DWORDS 2 319#define CMDQ_MAX_SZ_SHIFT 8 320 321#define CMDQ_ERR_SHIFT 24 322#define CMDQ_ERR_MASK 0x7f 323#define CMDQ_ERR_CERROR_NONE_IDX 0 324#define CMDQ_ERR_CERROR_ILL_IDX 1 325#define CMDQ_ERR_CERROR_ABT_IDX 2 326 327#define CMDQ_0_OP_SHIFT 0 328#define CMDQ_0_OP_MASK 0xffUL 329#define CMDQ_0_SSV (1UL << 11) 330 331#define CMDQ_PREFETCH_0_SID_SHIFT 32 332#define CMDQ_PREFETCH_1_SIZE_SHIFT 0 333#define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL 334 335#define CMDQ_CFGI_0_SID_SHIFT 32 336#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL 337#define CMDQ_CFGI_1_LEAF (1UL << 0) 338#define CMDQ_CFGI_1_RANGE_SHIFT 0 339#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL 340 341#define CMDQ_TLBI_0_VMID_SHIFT 32 342#define CMDQ_TLBI_0_ASID_SHIFT 48 343#define CMDQ_TLBI_1_LEAF (1UL << 0) 344#define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL 345 346#define CMDQ_PRI_0_SSID_SHIFT 12 347#define CMDQ_PRI_0_SSID_MASK 0xfffffUL 348#define CMDQ_PRI_0_SID_SHIFT 32 349#define CMDQ_PRI_0_SID_MASK 0xffffffffUL 350#define CMDQ_PRI_1_GRPID_SHIFT 0 351#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL 352#define CMDQ_PRI_1_RESP_SHIFT 12 353#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT) 354#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT) 355#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT) 356 357#define CMDQ_SYNC_0_CS_SHIFT 12 358#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT) 359#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT) 360 361/* Event queue */ 362#define EVTQ_ENT_DWORDS 4 363#define EVTQ_MAX_SZ_SHIFT 7 364 365#define EVTQ_0_ID_SHIFT 0 366#define EVTQ_0_ID_MASK 0xffUL 367 368/* PRI queue */ 369#define PRIQ_ENT_DWORDS 2 370#define PRIQ_MAX_SZ_SHIFT 8 371 372#define PRIQ_0_SID_SHIFT 0 373#define PRIQ_0_SID_MASK 0xffffffffUL 374#define PRIQ_0_SSID_SHIFT 32 375#define PRIQ_0_SSID_MASK 0xfffffUL 376#define PRIQ_0_OF (1UL << 57) 377#define PRIQ_0_PERM_PRIV (1UL << 58) 378#define PRIQ_0_PERM_EXEC (1UL << 59) 379#define PRIQ_0_PERM_READ (1UL << 60) 380#define PRIQ_0_PERM_WRITE (1UL << 61) 381#define PRIQ_0_PRG_LAST (1UL << 62) 382#define PRIQ_0_SSID_V (1UL << 63) 383 384#define PRIQ_1_PRG_IDX_SHIFT 0 385#define PRIQ_1_PRG_IDX_MASK 0x1ffUL 386#define PRIQ_1_ADDR_SHIFT 12 387#define PRIQ_1_ADDR_MASK 0xfffffffffffffUL 388 389/* High-level queue structures */ 390#define ARM_SMMU_POLL_TIMEOUT_US 100 391 392static bool disable_bypass; 393module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); 394MODULE_PARM_DESC(disable_bypass, 395 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); 396 397enum pri_resp { 398 PRI_RESP_DENY, 399 PRI_RESP_FAIL, 400 PRI_RESP_SUCC, 401}; 402 403struct arm_smmu_cmdq_ent { 404 /* Common fields */ 405 u8 opcode; 406 bool substream_valid; 407 408 /* Command-specific fields */ 409 union { 410 #define CMDQ_OP_PREFETCH_CFG 0x1 411 struct { 412 u32 sid; 413 u8 size; 414 u64 addr; 415 } prefetch; 416 417 #define CMDQ_OP_CFGI_STE 0x3 418 #define CMDQ_OP_CFGI_ALL 0x4 419 struct { 420 u32 sid; 421 union { 422 bool leaf; 423 u8 span; 424 }; 425 } cfgi; 426 427 #define CMDQ_OP_TLBI_NH_ASID 0x11 428 #define CMDQ_OP_TLBI_NH_VA 0x12 429 #define CMDQ_OP_TLBI_EL2_ALL 0x20 430 #define CMDQ_OP_TLBI_S12_VMALL 0x28 431 #define CMDQ_OP_TLBI_S2_IPA 0x2a 432 #define CMDQ_OP_TLBI_NSNH_ALL 0x30 433 struct { 434 u16 asid; 435 u16 vmid; 436 bool leaf; 437 u64 addr; 438 } tlbi; 439 440 #define CMDQ_OP_PRI_RESP 0x41 441 struct { 442 u32 sid; 443 u32 ssid; 444 u16 grpid; 445 enum pri_resp resp; 446 } pri; 447 448 #define CMDQ_OP_CMD_SYNC 0x46 449 }; 450}; 451 452struct arm_smmu_queue { 453 int irq; /* Wired interrupt */ 454 455 __le64 *base; 456 dma_addr_t base_dma; 457 u64 q_base; 458 459 size_t ent_dwords; 460 u32 max_n_shift; 461 u32 prod; 462 u32 cons; 463 464 u32 __iomem *prod_reg; 465 u32 __iomem *cons_reg; 466}; 467 468struct arm_smmu_cmdq { 469 struct arm_smmu_queue q; 470 spinlock_t lock; 471}; 472 473struct arm_smmu_evtq { 474 struct arm_smmu_queue q; 475 u32 max_stalls; 476}; 477 478struct arm_smmu_priq { 479 struct arm_smmu_queue q; 480}; 481 482/* High-level stream table and context descriptor structures */ 483struct arm_smmu_strtab_l1_desc { 484 u8 span; 485 486 __le64 *l2ptr; 487 dma_addr_t l2ptr_dma; 488}; 489 490struct arm_smmu_s1_cfg { 491 __le64 *cdptr; 492 dma_addr_t cdptr_dma; 493 494 struct arm_smmu_ctx_desc { 495 u16 asid; 496 u64 ttbr; 497 u64 tcr; 498 u64 mair; 499 } cd; 500}; 501 502struct arm_smmu_s2_cfg { 503 u16 vmid; 504 u64 vttbr; 505 u64 vtcr; 506}; 507 508struct arm_smmu_strtab_ent { 509 bool valid; 510 511 bool bypass; /* Overrides s1/s2 config */ 512 struct arm_smmu_s1_cfg *s1_cfg; 513 struct arm_smmu_s2_cfg *s2_cfg; 514}; 515 516struct arm_smmu_strtab_cfg { 517 __le64 *strtab; 518 dma_addr_t strtab_dma; 519 struct arm_smmu_strtab_l1_desc *l1_desc; 520 unsigned int num_l1_ents; 521 522 u64 strtab_base; 523 u32 strtab_base_cfg; 524}; 525 526/* An SMMUv3 instance */ 527struct arm_smmu_device { 528 struct device *dev; 529 void __iomem *base; 530 531#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0) 532#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1) 533#define ARM_SMMU_FEAT_TT_LE (1 << 2) 534#define ARM_SMMU_FEAT_TT_BE (1 << 3) 535#define ARM_SMMU_FEAT_PRI (1 << 4) 536#define ARM_SMMU_FEAT_ATS (1 << 5) 537#define ARM_SMMU_FEAT_SEV (1 << 6) 538#define ARM_SMMU_FEAT_MSI (1 << 7) 539#define ARM_SMMU_FEAT_COHERENCY (1 << 8) 540#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9) 541#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10) 542#define ARM_SMMU_FEAT_STALLS (1 << 11) 543#define ARM_SMMU_FEAT_HYP (1 << 12) 544 u32 features; 545 546#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) 547 u32 options; 548 549 struct arm_smmu_cmdq cmdq; 550 struct arm_smmu_evtq evtq; 551 struct arm_smmu_priq priq; 552 553 int gerr_irq; 554 555 unsigned long ias; /* IPA */ 556 unsigned long oas; /* PA */ 557 558#define ARM_SMMU_MAX_ASIDS (1 << 16) 559 unsigned int asid_bits; 560 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS); 561 562#define ARM_SMMU_MAX_VMIDS (1 << 16) 563 unsigned int vmid_bits; 564 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS); 565 566 unsigned int ssid_bits; 567 unsigned int sid_bits; 568 569 struct arm_smmu_strtab_cfg strtab_cfg; 570 struct list_head list; 571}; 572 573/* SMMU private data for an IOMMU group */ 574struct arm_smmu_group { 575 struct arm_smmu_device *smmu; 576 struct arm_smmu_domain *domain; 577 int num_sids; 578 u32 *sids; 579 struct arm_smmu_strtab_ent ste; 580}; 581 582/* SMMU private data for an IOMMU domain */ 583enum arm_smmu_domain_stage { 584 ARM_SMMU_DOMAIN_S1 = 0, 585 ARM_SMMU_DOMAIN_S2, 586 ARM_SMMU_DOMAIN_NESTED, 587}; 588 589struct arm_smmu_domain { 590 struct arm_smmu_device *smmu; 591 struct mutex init_mutex; /* Protects smmu pointer */ 592 593 struct io_pgtable_ops *pgtbl_ops; 594 spinlock_t pgtbl_lock; 595 596 enum arm_smmu_domain_stage stage; 597 union { 598 struct arm_smmu_s1_cfg s1_cfg; 599 struct arm_smmu_s2_cfg s2_cfg; 600 }; 601 602 struct iommu_domain domain; 603}; 604 605/* Our list of SMMU instances */ 606static DEFINE_SPINLOCK(arm_smmu_devices_lock); 607static LIST_HEAD(arm_smmu_devices); 608 609struct arm_smmu_option_prop { 610 u32 opt; 611 const char *prop; 612}; 613 614static struct arm_smmu_option_prop arm_smmu_options[] = { 615 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, 616 { 0, NULL}, 617}; 618 619static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 620{ 621 return container_of(dom, struct arm_smmu_domain, domain); 622} 623 624static void parse_driver_options(struct arm_smmu_device *smmu) 625{ 626 int i = 0; 627 628 do { 629 if (of_property_read_bool(smmu->dev->of_node, 630 arm_smmu_options[i].prop)) { 631 smmu->options |= arm_smmu_options[i].opt; 632 dev_notice(smmu->dev, "option %s\n", 633 arm_smmu_options[i].prop); 634 } 635 } while (arm_smmu_options[++i].opt); 636} 637 638/* Low-level queue manipulation functions */ 639static bool queue_full(struct arm_smmu_queue *q) 640{ 641 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && 642 Q_WRP(q, q->prod) != Q_WRP(q, q->cons); 643} 644 645static bool queue_empty(struct arm_smmu_queue *q) 646{ 647 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && 648 Q_WRP(q, q->prod) == Q_WRP(q, q->cons); 649} 650 651static void queue_sync_cons(struct arm_smmu_queue *q) 652{ 653 q->cons = readl_relaxed(q->cons_reg); 654} 655 656static void queue_inc_cons(struct arm_smmu_queue *q) 657{ 658 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; 659 660 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); 661 writel(q->cons, q->cons_reg); 662} 663 664static int queue_sync_prod(struct arm_smmu_queue *q) 665{ 666 int ret = 0; 667 u32 prod = readl_relaxed(q->prod_reg); 668 669 if (Q_OVF(q, prod) != Q_OVF(q, q->prod)) 670 ret = -EOVERFLOW; 671 672 q->prod = prod; 673 return ret; 674} 675 676static void queue_inc_prod(struct arm_smmu_queue *q) 677{ 678 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; 679 680 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); 681 writel(q->prod, q->prod_reg); 682} 683 684static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until) 685{ 686 if (Q_WRP(q, q->cons) == Q_WRP(q, until)) 687 return Q_IDX(q, q->cons) < Q_IDX(q, until); 688 689 return Q_IDX(q, q->cons) >= Q_IDX(q, until); 690} 691 692static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe) 693{ 694 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); 695 696 while (queue_sync_cons(q), __queue_cons_before(q, until)) { 697 if (ktime_compare(ktime_get(), timeout) > 0) 698 return -ETIMEDOUT; 699 700 if (wfe) { 701 wfe(); 702 } else { 703 cpu_relax(); 704 udelay(1); 705 } 706 } 707 708 return 0; 709} 710 711static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) 712{ 713 int i; 714 715 for (i = 0; i < n_dwords; ++i) 716 *dst++ = cpu_to_le64(*src++); 717} 718 719static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) 720{ 721 if (queue_full(q)) 722 return -ENOSPC; 723 724 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords); 725 queue_inc_prod(q); 726 return 0; 727} 728 729static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) 730{ 731 int i; 732 733 for (i = 0; i < n_dwords; ++i) 734 *dst++ = le64_to_cpu(*src++); 735} 736 737static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) 738{ 739 if (queue_empty(q)) 740 return -EAGAIN; 741 742 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords); 743 queue_inc_cons(q); 744 return 0; 745} 746 747/* High-level queue accessors */ 748static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) 749{ 750 memset(cmd, 0, CMDQ_ENT_DWORDS << 3); 751 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT; 752 753 switch (ent->opcode) { 754 case CMDQ_OP_TLBI_EL2_ALL: 755 case CMDQ_OP_TLBI_NSNH_ALL: 756 break; 757 case CMDQ_OP_PREFETCH_CFG: 758 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT; 759 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT; 760 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; 761 break; 762 case CMDQ_OP_CFGI_STE: 763 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT; 764 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0; 765 break; 766 case CMDQ_OP_CFGI_ALL: 767 /* Cover the entire SID range */ 768 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT; 769 break; 770 case CMDQ_OP_TLBI_NH_VA: 771 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; 772 /* Fallthrough */ 773 case CMDQ_OP_TLBI_S2_IPA: 774 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; 775 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; 776 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; 777 break; 778 case CMDQ_OP_TLBI_NH_ASID: 779 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; 780 /* Fallthrough */ 781 case CMDQ_OP_TLBI_S12_VMALL: 782 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; 783 break; 784 case CMDQ_OP_PRI_RESP: 785 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0; 786 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT; 787 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT; 788 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT; 789 switch (ent->pri.resp) { 790 case PRI_RESP_DENY: 791 cmd[1] |= CMDQ_PRI_1_RESP_DENY; 792 break; 793 case PRI_RESP_FAIL: 794 cmd[1] |= CMDQ_PRI_1_RESP_FAIL; 795 break; 796 case PRI_RESP_SUCC: 797 cmd[1] |= CMDQ_PRI_1_RESP_SUCC; 798 break; 799 default: 800 return -EINVAL; 801 } 802 break; 803 case CMDQ_OP_CMD_SYNC: 804 cmd[0] |= CMDQ_SYNC_0_CS_SEV; 805 break; 806 default: 807 return -ENOENT; 808 } 809 810 return 0; 811} 812 813static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) 814{ 815 static const char *cerror_str[] = { 816 [CMDQ_ERR_CERROR_NONE_IDX] = "No error", 817 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command", 818 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch", 819 }; 820 821 int i; 822 u64 cmd[CMDQ_ENT_DWORDS]; 823 struct arm_smmu_queue *q = &smmu->cmdq.q; 824 u32 cons = readl_relaxed(q->cons_reg); 825 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK; 826 struct arm_smmu_cmdq_ent cmd_sync = { 827 .opcode = CMDQ_OP_CMD_SYNC, 828 }; 829 830 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, 831 cerror_str[idx]); 832 833 switch (idx) { 834 case CMDQ_ERR_CERROR_ILL_IDX: 835 break; 836 case CMDQ_ERR_CERROR_ABT_IDX: 837 dev_err(smmu->dev, "retrying command fetch\n"); 838 case CMDQ_ERR_CERROR_NONE_IDX: 839 return; 840 } 841 842 /* 843 * We may have concurrent producers, so we need to be careful 844 * not to touch any of the shadow cmdq state. 845 */ 846 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords); 847 dev_err(smmu->dev, "skipping command in error state:\n"); 848 for (i = 0; i < ARRAY_SIZE(cmd); ++i) 849 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); 850 851 /* Convert the erroneous command into a CMD_SYNC */ 852 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) { 853 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n"); 854 return; 855 } 856 857 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords); 858} 859 860static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, 861 struct arm_smmu_cmdq_ent *ent) 862{ 863 u32 until; 864 u64 cmd[CMDQ_ENT_DWORDS]; 865 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); 866 struct arm_smmu_queue *q = &smmu->cmdq.q; 867 868 if (arm_smmu_cmdq_build_cmd(cmd, ent)) { 869 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", 870 ent->opcode); 871 return; 872 } 873 874 spin_lock(&smmu->cmdq.lock); 875 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) { 876 /* 877 * Keep the queue locked, otherwise the producer could wrap 878 * twice and we could see a future consumer pointer that looks 879 * like it's behind us. 880 */ 881 if (queue_poll_cons(q, until, wfe)) 882 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); 883 } 884 885 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe)) 886 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); 887 spin_unlock(&smmu->cmdq.lock); 888} 889 890/* Context descriptor manipulation functions */ 891static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr) 892{ 893 u64 val = 0; 894 895 /* Repack the TCR. Just care about TTBR0 for now */ 896 val |= ARM_SMMU_TCR2CD(tcr, T0SZ); 897 val |= ARM_SMMU_TCR2CD(tcr, TG0); 898 val |= ARM_SMMU_TCR2CD(tcr, IRGN0); 899 val |= ARM_SMMU_TCR2CD(tcr, ORGN0); 900 val |= ARM_SMMU_TCR2CD(tcr, SH0); 901 val |= ARM_SMMU_TCR2CD(tcr, EPD0); 902 val |= ARM_SMMU_TCR2CD(tcr, EPD1); 903 val |= ARM_SMMU_TCR2CD(tcr, IPS); 904 val |= ARM_SMMU_TCR2CD(tcr, TBI0); 905 906 return val; 907} 908 909static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu, 910 struct arm_smmu_s1_cfg *cfg) 911{ 912 u64 val; 913 914 /* 915 * We don't need to issue any invalidation here, as we'll invalidate 916 * the STE when installing the new entry anyway. 917 */ 918 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) | 919#ifdef __BIG_ENDIAN 920 CTXDESC_CD_0_ENDI | 921#endif 922 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE | 923 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT | 924 CTXDESC_CD_0_V; 925 cfg->cdptr[0] = cpu_to_le64(val); 926 927 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT; 928 cfg->cdptr[1] = cpu_to_le64(val); 929 930 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT); 931} 932 933/* Stream table manipulation functions */ 934static void 935arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) 936{ 937 u64 val = 0; 938 939 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK) 940 << STRTAB_L1_DESC_SPAN_SHIFT; 941 val |= desc->l2ptr_dma & 942 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT; 943 944 *dst = cpu_to_le64(val); 945} 946 947static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) 948{ 949 struct arm_smmu_cmdq_ent cmd = { 950 .opcode = CMDQ_OP_CFGI_STE, 951 .cfgi = { 952 .sid = sid, 953 .leaf = true, 954 }, 955 }; 956 957 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 958 cmd.opcode = CMDQ_OP_CMD_SYNC; 959 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 960} 961 962static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, 963 __le64 *dst, struct arm_smmu_strtab_ent *ste) 964{ 965 /* 966 * This is hideously complicated, but we only really care about 967 * three cases at the moment: 968 * 969 * 1. Invalid (all zero) -> bypass (init) 970 * 2. Bypass -> translation (attach) 971 * 3. Translation -> bypass (detach) 972 * 973 * Given that we can't update the STE atomically and the SMMU 974 * doesn't read the thing in a defined order, that leaves us 975 * with the following maintenance requirements: 976 * 977 * 1. Update Config, return (init time STEs aren't live) 978 * 2. Write everything apart from dword 0, sync, write dword 0, sync 979 * 3. Update Config, sync 980 */ 981 u64 val = le64_to_cpu(dst[0]); 982 bool ste_live = false; 983 struct arm_smmu_cmdq_ent prefetch_cmd = { 984 .opcode = CMDQ_OP_PREFETCH_CFG, 985 .prefetch = { 986 .sid = sid, 987 }, 988 }; 989 990 if (val & STRTAB_STE_0_V) { 991 u64 cfg; 992 993 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT; 994 switch (cfg) { 995 case STRTAB_STE_0_CFG_BYPASS: 996 break; 997 case STRTAB_STE_0_CFG_S1_TRANS: 998 case STRTAB_STE_0_CFG_S2_TRANS: 999 ste_live = true; 1000 break; 1001 default: 1002 BUG(); /* STE corruption */ 1003 } 1004 } 1005 1006 /* Nuke the existing Config, as we're going to rewrite it */ 1007 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); 1008 1009 if (ste->valid) 1010 val |= STRTAB_STE_0_V; 1011 else 1012 val &= ~STRTAB_STE_0_V; 1013 1014 if (ste->bypass) { 1015 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT 1016 : STRTAB_STE_0_CFG_BYPASS; 1017 dst[0] = cpu_to_le64(val); 1018 dst[2] = 0; /* Nuke the VMID */ 1019 if (ste_live) 1020 arm_smmu_sync_ste_for_sid(smmu, sid); 1021 return; 1022 } 1023 1024 if (ste->s1_cfg) { 1025 BUG_ON(ste_live); 1026 dst[1] = cpu_to_le64( 1027 STRTAB_STE_1_S1C_CACHE_WBRA 1028 << STRTAB_STE_1_S1CIR_SHIFT | 1029 STRTAB_STE_1_S1C_CACHE_WBRA 1030 << STRTAB_STE_1_S1COR_SHIFT | 1031 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT | 1032 STRTAB_STE_1_S1STALLD | 1033#ifdef CONFIG_PCI_ATS 1034 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | 1035#endif 1036 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); 1037 1038 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK 1039 << STRTAB_STE_0_S1CTXPTR_SHIFT) | 1040 STRTAB_STE_0_CFG_S1_TRANS; 1041 1042 } 1043 1044 if (ste->s2_cfg) { 1045 BUG_ON(ste_live); 1046 dst[2] = cpu_to_le64( 1047 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT | 1048 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK) 1049 << STRTAB_STE_2_VTCR_SHIFT | 1050#ifdef __BIG_ENDIAN 1051 STRTAB_STE_2_S2ENDI | 1052#endif 1053 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | 1054 STRTAB_STE_2_S2R); 1055 1056 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr & 1057 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT); 1058 1059 val |= STRTAB_STE_0_CFG_S2_TRANS; 1060 } 1061 1062 arm_smmu_sync_ste_for_sid(smmu, sid); 1063 dst[0] = cpu_to_le64(val); 1064 arm_smmu_sync_ste_for_sid(smmu, sid); 1065 1066 /* It's likely that we'll want to use the new STE soon */ 1067 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) 1068 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1069} 1070 1071static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) 1072{ 1073 unsigned int i; 1074 struct arm_smmu_strtab_ent ste = { 1075 .valid = true, 1076 .bypass = true, 1077 }; 1078 1079 for (i = 0; i < nent; ++i) { 1080 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste); 1081 strtab += STRTAB_STE_DWORDS; 1082 } 1083} 1084 1085static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) 1086{ 1087 size_t size; 1088 void *strtab; 1089 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 1090 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; 1091 1092 if (desc->l2ptr) 1093 return 0; 1094 1095 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 1096 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; 1097 1098 desc->span = STRTAB_SPLIT + 1; 1099 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, 1100 GFP_KERNEL); 1101 if (!desc->l2ptr) { 1102 dev_err(smmu->dev, 1103 "failed to allocate l2 stream table for SID %u\n", 1104 sid); 1105 return -ENOMEM; 1106 } 1107 1108 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT); 1109 arm_smmu_write_strtab_l1_desc(strtab, desc); 1110 return 0; 1111} 1112 1113/* IRQ and event handlers */ 1114static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) 1115{ 1116 int i; 1117 struct arm_smmu_device *smmu = dev; 1118 struct arm_smmu_queue *q = &smmu->evtq.q; 1119 u64 evt[EVTQ_ENT_DWORDS]; 1120 1121 while (!queue_remove_raw(q, evt)) { 1122 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; 1123 1124 dev_info(smmu->dev, "event 0x%02x received:\n", id); 1125 for (i = 0; i < ARRAY_SIZE(evt); ++i) 1126 dev_info(smmu->dev, "\t0x%016llx\n", 1127 (unsigned long long)evt[i]); 1128 } 1129 1130 /* Sync our overflow flag, as we believe we're up to speed */ 1131 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); 1132 return IRQ_HANDLED; 1133} 1134 1135static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev) 1136{ 1137 irqreturn_t ret = IRQ_WAKE_THREAD; 1138 struct arm_smmu_device *smmu = dev; 1139 struct arm_smmu_queue *q = &smmu->evtq.q; 1140 1141 /* 1142 * Not much we can do on overflow, so scream and pretend we're 1143 * trying harder. 1144 */ 1145 if (queue_sync_prod(q) == -EOVERFLOW) 1146 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); 1147 else if (queue_empty(q)) 1148 ret = IRQ_NONE; 1149 1150 return ret; 1151} 1152 1153static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) 1154{ 1155 struct arm_smmu_device *smmu = dev; 1156 struct arm_smmu_queue *q = &smmu->priq.q; 1157 u64 evt[PRIQ_ENT_DWORDS]; 1158 1159 while (!queue_remove_raw(q, evt)) { 1160 u32 sid, ssid; 1161 u16 grpid; 1162 bool ssv, last; 1163 1164 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK; 1165 ssv = evt[0] & PRIQ_0_SSID_V; 1166 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0; 1167 last = evt[0] & PRIQ_0_PRG_LAST; 1168 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK; 1169 1170 dev_info(smmu->dev, "unexpected PRI request received:\n"); 1171 dev_info(smmu->dev, 1172 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", 1173 sid, ssid, grpid, last ? "L" : "", 1174 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", 1175 evt[0] & PRIQ_0_PERM_READ ? "R" : "", 1176 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", 1177 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", 1178 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT); 1179 1180 if (last) { 1181 struct arm_smmu_cmdq_ent cmd = { 1182 .opcode = CMDQ_OP_PRI_RESP, 1183 .substream_valid = ssv, 1184 .pri = { 1185 .sid = sid, 1186 .ssid = ssid, 1187 .grpid = grpid, 1188 .resp = PRI_RESP_DENY, 1189 }, 1190 }; 1191 1192 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 1193 } 1194 } 1195 1196 /* Sync our overflow flag, as we believe we're up to speed */ 1197 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); 1198 return IRQ_HANDLED; 1199} 1200 1201static irqreturn_t arm_smmu_priq_handler(int irq, void *dev) 1202{ 1203 irqreturn_t ret = IRQ_WAKE_THREAD; 1204 struct arm_smmu_device *smmu = dev; 1205 struct arm_smmu_queue *q = &smmu->priq.q; 1206 1207 /* PRIQ overflow indicates a programming error */ 1208 if (queue_sync_prod(q) == -EOVERFLOW) 1209 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); 1210 else if (queue_empty(q)) 1211 ret = IRQ_NONE; 1212 1213 return ret; 1214} 1215 1216static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) 1217{ 1218 /* We don't actually use CMD_SYNC interrupts for anything */ 1219 return IRQ_HANDLED; 1220} 1221 1222static int arm_smmu_device_disable(struct arm_smmu_device *smmu); 1223 1224static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) 1225{ 1226 u32 gerror, gerrorn; 1227 struct arm_smmu_device *smmu = dev; 1228 1229 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); 1230 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); 1231 1232 gerror ^= gerrorn; 1233 if (!(gerror & GERROR_ERR_MASK)) 1234 return IRQ_NONE; /* No errors pending */ 1235 1236 dev_warn(smmu->dev, 1237 "unexpected global error reported (0x%08x), this could be serious\n", 1238 gerror); 1239 1240 if (gerror & GERROR_SFM_ERR) { 1241 dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); 1242 arm_smmu_device_disable(smmu); 1243 } 1244 1245 if (gerror & GERROR_MSI_GERROR_ABT_ERR) 1246 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); 1247 1248 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) { 1249 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); 1250 arm_smmu_priq_handler(irq, smmu->dev); 1251 } 1252 1253 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) { 1254 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); 1255 arm_smmu_evtq_handler(irq, smmu->dev); 1256 } 1257 1258 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) { 1259 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); 1260 arm_smmu_cmdq_sync_handler(irq, smmu->dev); 1261 } 1262 1263 if (gerror & GERROR_PRIQ_ABT_ERR) 1264 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); 1265 1266 if (gerror & GERROR_EVTQ_ABT_ERR) 1267 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); 1268 1269 if (gerror & GERROR_CMDQ_ERR) 1270 arm_smmu_cmdq_skip_err(smmu); 1271 1272 writel(gerror, smmu->base + ARM_SMMU_GERRORN); 1273 return IRQ_HANDLED; 1274} 1275 1276/* IO_PGTABLE API */ 1277static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) 1278{ 1279 struct arm_smmu_cmdq_ent cmd; 1280 1281 cmd.opcode = CMDQ_OP_CMD_SYNC; 1282 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 1283} 1284 1285static void arm_smmu_tlb_sync(void *cookie) 1286{ 1287 struct arm_smmu_domain *smmu_domain = cookie; 1288 __arm_smmu_tlb_sync(smmu_domain->smmu); 1289} 1290 1291static void arm_smmu_tlb_inv_context(void *cookie) 1292{ 1293 struct arm_smmu_domain *smmu_domain = cookie; 1294 struct arm_smmu_device *smmu = smmu_domain->smmu; 1295 struct arm_smmu_cmdq_ent cmd; 1296 1297 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 1298 cmd.opcode = CMDQ_OP_TLBI_NH_ASID; 1299 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; 1300 cmd.tlbi.vmid = 0; 1301 } else { 1302 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; 1303 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; 1304 } 1305 1306 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 1307 __arm_smmu_tlb_sync(smmu); 1308} 1309 1310static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, 1311 bool leaf, void *cookie) 1312{ 1313 struct arm_smmu_domain *smmu_domain = cookie; 1314 struct arm_smmu_device *smmu = smmu_domain->smmu; 1315 struct arm_smmu_cmdq_ent cmd = { 1316 .tlbi = { 1317 .leaf = leaf, 1318 .addr = iova, 1319 }, 1320 }; 1321 1322 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 1323 cmd.opcode = CMDQ_OP_TLBI_NH_VA; 1324 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; 1325 } else { 1326 cmd.opcode = CMDQ_OP_TLBI_S2_IPA; 1327 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; 1328 } 1329 1330 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 1331} 1332 1333static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) 1334{ 1335 struct arm_smmu_domain *smmu_domain = cookie; 1336 struct arm_smmu_device *smmu = smmu_domain->smmu; 1337 unsigned long offset = (unsigned long)addr & ~PAGE_MASK; 1338 1339 if (smmu->features & ARM_SMMU_FEAT_COHERENCY) { 1340 dsb(ishst); 1341 } else { 1342 dma_addr_t dma_addr; 1343 struct device *dev = smmu->dev; 1344 1345 dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size, 1346 DMA_TO_DEVICE); 1347 1348 if (dma_mapping_error(dev, dma_addr)) 1349 dev_err(dev, "failed to flush pgtable at %p\n", addr); 1350 else 1351 dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE); 1352 } 1353} 1354 1355static struct iommu_gather_ops arm_smmu_gather_ops = { 1356 .tlb_flush_all = arm_smmu_tlb_inv_context, 1357 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, 1358 .tlb_sync = arm_smmu_tlb_sync, 1359 .flush_pgtable = arm_smmu_flush_pgtable, 1360}; 1361 1362/* IOMMU API */ 1363static bool arm_smmu_capable(enum iommu_cap cap) 1364{ 1365 switch (cap) { 1366 case IOMMU_CAP_CACHE_COHERENCY: 1367 return true; 1368 case IOMMU_CAP_INTR_REMAP: 1369 return true; /* MSIs are just memory writes */ 1370 case IOMMU_CAP_NOEXEC: 1371 return true; 1372 default: 1373 return false; 1374 } 1375} 1376 1377static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) 1378{ 1379 struct arm_smmu_domain *smmu_domain; 1380 1381 if (type != IOMMU_DOMAIN_UNMANAGED) 1382 return NULL; 1383 1384 /* 1385 * Allocate the domain and initialise some of its data structures. 1386 * We can't really do anything meaningful until we've added a 1387 * master. 1388 */ 1389 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); 1390 if (!smmu_domain) 1391 return NULL; 1392 1393 mutex_init(&smmu_domain->init_mutex); 1394 spin_lock_init(&smmu_domain->pgtbl_lock); 1395 return &smmu_domain->domain; 1396} 1397 1398static int arm_smmu_bitmap_alloc(unsigned long *map, int span) 1399{ 1400 int idx, size = 1 << span; 1401 1402 do { 1403 idx = find_first_zero_bit(map, size); 1404 if (idx == size) 1405 return -ENOSPC; 1406 } while (test_and_set_bit(idx, map)); 1407 1408 return idx; 1409} 1410 1411static void arm_smmu_bitmap_free(unsigned long *map, int idx) 1412{ 1413 clear_bit(idx, map); 1414} 1415 1416static void arm_smmu_domain_free(struct iommu_domain *domain) 1417{ 1418 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1419 struct arm_smmu_device *smmu = smmu_domain->smmu; 1420 1421 free_io_pgtable_ops(smmu_domain->pgtbl_ops); 1422 1423 /* Free the CD and ASID, if we allocated them */ 1424 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 1425 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; 1426 1427 if (cfg->cdptr) { 1428 dma_free_coherent(smmu_domain->smmu->dev, 1429 CTXDESC_CD_DWORDS << 3, 1430 cfg->cdptr, 1431 cfg->cdptr_dma); 1432 1433 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid); 1434 } 1435 } else { 1436 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 1437 if (cfg->vmid) 1438 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); 1439 } 1440 1441 kfree(smmu_domain); 1442} 1443 1444static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, 1445 struct io_pgtable_cfg *pgtbl_cfg) 1446{ 1447 int ret; 1448 u16 asid; 1449 struct arm_smmu_device *smmu = smmu_domain->smmu; 1450 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; 1451 1452 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); 1453 if (IS_ERR_VALUE(asid)) 1454 return asid; 1455 1456 cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, 1457 &cfg->cdptr_dma, GFP_KERNEL); 1458 if (!cfg->cdptr) { 1459 dev_warn(smmu->dev, "failed to allocate context descriptor\n"); 1460 goto out_free_asid; 1461 } 1462 1463 cfg->cd.asid = asid; 1464 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; 1465 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr; 1466 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; 1467 return 0; 1468 1469out_free_asid: 1470 arm_smmu_bitmap_free(smmu->asid_map, asid); 1471 return ret; 1472} 1473 1474static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, 1475 struct io_pgtable_cfg *pgtbl_cfg) 1476{ 1477 u16 vmid; 1478 struct arm_smmu_device *smmu = smmu_domain->smmu; 1479 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 1480 1481 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); 1482 if (IS_ERR_VALUE(vmid)) 1483 return vmid; 1484 1485 cfg->vmid = vmid; 1486 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; 1487 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; 1488 return 0; 1489} 1490 1491static struct iommu_ops arm_smmu_ops; 1492 1493static int arm_smmu_domain_finalise(struct iommu_domain *domain) 1494{ 1495 int ret; 1496 unsigned long ias, oas; 1497 enum io_pgtable_fmt fmt; 1498 struct io_pgtable_cfg pgtbl_cfg; 1499 struct io_pgtable_ops *pgtbl_ops; 1500 int (*finalise_stage_fn)(struct arm_smmu_domain *, 1501 struct io_pgtable_cfg *); 1502 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1503 struct arm_smmu_device *smmu = smmu_domain->smmu; 1504 1505 /* Restrict the stage to what we can actually support */ 1506 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) 1507 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; 1508 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) 1509 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 1510 1511 switch (smmu_domain->stage) { 1512 case ARM_SMMU_DOMAIN_S1: 1513 ias = VA_BITS; 1514 oas = smmu->ias; 1515 fmt = ARM_64_LPAE_S1; 1516 finalise_stage_fn = arm_smmu_domain_finalise_s1; 1517 break; 1518 case ARM_SMMU_DOMAIN_NESTED: 1519 case ARM_SMMU_DOMAIN_S2: 1520 ias = smmu->ias; 1521 oas = smmu->oas; 1522 fmt = ARM_64_LPAE_S2; 1523 finalise_stage_fn = arm_smmu_domain_finalise_s2; 1524 break; 1525 default: 1526 return -EINVAL; 1527 } 1528 1529 pgtbl_cfg = (struct io_pgtable_cfg) { 1530 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, 1531 .ias = ias, 1532 .oas = oas, 1533 .tlb = &arm_smmu_gather_ops, 1534 }; 1535 1536 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); 1537 if (!pgtbl_ops) 1538 return -ENOMEM; 1539 1540 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 1541 smmu_domain->pgtbl_ops = pgtbl_ops; 1542 1543 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); 1544 if (IS_ERR_VALUE(ret)) 1545 free_io_pgtable_ops(pgtbl_ops); 1546 1547 return ret; 1548} 1549 1550static struct arm_smmu_group *arm_smmu_group_get(struct device *dev) 1551{ 1552 struct iommu_group *group; 1553 struct arm_smmu_group *smmu_group; 1554 1555 group = iommu_group_get(dev); 1556 if (!group) 1557 return NULL; 1558 1559 smmu_group = iommu_group_get_iommudata(group); 1560 iommu_group_put(group); 1561 return smmu_group; 1562} 1563 1564static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) 1565{ 1566 __le64 *step; 1567 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 1568 1569 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { 1570 struct arm_smmu_strtab_l1_desc *l1_desc; 1571 int idx; 1572 1573 /* Two-level walk */ 1574 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS; 1575 l1_desc = &cfg->l1_desc[idx]; 1576 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS; 1577 step = &l1_desc->l2ptr[idx]; 1578 } else { 1579 /* Simple linear lookup */ 1580 step = &cfg->strtab[sid * STRTAB_STE_DWORDS]; 1581 } 1582 1583 return step; 1584} 1585 1586static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) 1587{ 1588 int i; 1589 struct arm_smmu_domain *smmu_domain = smmu_group->domain; 1590 struct arm_smmu_strtab_ent *ste = &smmu_group->ste; 1591 struct arm_smmu_device *smmu = smmu_group->smmu; 1592 1593 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 1594 ste->s1_cfg = &smmu_domain->s1_cfg; 1595 ste->s2_cfg = NULL; 1596 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); 1597 } else { 1598 ste->s1_cfg = NULL; 1599 ste->s2_cfg = &smmu_domain->s2_cfg; 1600 } 1601 1602 for (i = 0; i < smmu_group->num_sids; ++i) { 1603 u32 sid = smmu_group->sids[i]; 1604 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); 1605 1606 arm_smmu_write_strtab_ent(smmu, sid, step, ste); 1607 } 1608 1609 return 0; 1610} 1611 1612static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) 1613{ 1614 int ret = 0; 1615 struct arm_smmu_device *smmu; 1616 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1617 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); 1618 1619 if (!smmu_group) 1620 return -ENOENT; 1621 1622 /* Already attached to a different domain? */ 1623 if (smmu_group->domain && smmu_group->domain != smmu_domain) 1624 return -EEXIST; 1625 1626 smmu = smmu_group->smmu; 1627 mutex_lock(&smmu_domain->init_mutex); 1628 1629 if (!smmu_domain->smmu) { 1630 smmu_domain->smmu = smmu; 1631 ret = arm_smmu_domain_finalise(domain); 1632 if (ret) { 1633 smmu_domain->smmu = NULL; 1634 goto out_unlock; 1635 } 1636 } else if (smmu_domain->smmu != smmu) { 1637 dev_err(dev, 1638 "cannot attach to SMMU %s (upstream of %s)\n", 1639 dev_name(smmu_domain->smmu->dev), 1640 dev_name(smmu->dev)); 1641 ret = -ENXIO; 1642 goto out_unlock; 1643 } 1644 1645 /* Group already attached to this domain? */ 1646 if (smmu_group->domain) 1647 goto out_unlock; 1648 1649 smmu_group->domain = smmu_domain; 1650 smmu_group->ste.bypass = false; 1651 1652 ret = arm_smmu_install_ste_for_group(smmu_group); 1653 if (IS_ERR_VALUE(ret)) 1654 smmu_group->domain = NULL; 1655 1656out_unlock: 1657 mutex_unlock(&smmu_domain->init_mutex); 1658 return ret; 1659} 1660 1661static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) 1662{ 1663 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1664 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); 1665 1666 BUG_ON(!smmu_domain); 1667 BUG_ON(!smmu_group); 1668 1669 mutex_lock(&smmu_domain->init_mutex); 1670 BUG_ON(smmu_group->domain != smmu_domain); 1671 1672 smmu_group->ste.bypass = true; 1673 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group))) 1674 dev_warn(dev, "failed to install bypass STE\n"); 1675 1676 smmu_group->domain = NULL; 1677 mutex_unlock(&smmu_domain->init_mutex); 1678} 1679 1680static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, 1681 phys_addr_t paddr, size_t size, int prot) 1682{ 1683 int ret; 1684 unsigned long flags; 1685 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1686 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; 1687 1688 if (!ops) 1689 return -ENODEV; 1690 1691 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); 1692 ret = ops->map(ops, iova, paddr, size, prot); 1693 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); 1694 return ret; 1695} 1696 1697static size_t 1698arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) 1699{ 1700 size_t ret; 1701 unsigned long flags; 1702 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1703 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; 1704 1705 if (!ops) 1706 return 0; 1707 1708 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); 1709 ret = ops->unmap(ops, iova, size); 1710 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); 1711 return ret; 1712} 1713 1714static phys_addr_t 1715arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1716{ 1717 phys_addr_t ret; 1718 unsigned long flags; 1719 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1720 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; 1721 1722 if (!ops) 1723 return 0; 1724 1725 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); 1726 ret = ops->iova_to_phys(ops, iova); 1727 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); 1728 1729 return ret; 1730} 1731 1732static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp) 1733{ 1734 *(u32 *)sidp = alias; 1735 return 0; /* Continue walking */ 1736} 1737 1738static void __arm_smmu_release_pci_iommudata(void *data) 1739{ 1740 kfree(data); 1741} 1742 1743static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev) 1744{ 1745 struct device_node *of_node; 1746 struct arm_smmu_device *curr, *smmu = NULL; 1747 struct pci_bus *bus = pdev->bus; 1748 1749 /* Walk up to the root bus */ 1750 while (!pci_is_root_bus(bus)) 1751 bus = bus->parent; 1752 1753 /* Follow the "iommus" phandle from the host controller */ 1754 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0); 1755 if (!of_node) 1756 return NULL; 1757 1758 /* See if we can find an SMMU corresponding to the phandle */ 1759 spin_lock(&arm_smmu_devices_lock); 1760 list_for_each_entry(curr, &arm_smmu_devices, list) { 1761 if (curr->dev->of_node == of_node) { 1762 smmu = curr; 1763 break; 1764 } 1765 } 1766 spin_unlock(&arm_smmu_devices_lock); 1767 of_node_put(of_node); 1768 return smmu; 1769} 1770 1771static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) 1772{ 1773 unsigned long limit = smmu->strtab_cfg.num_l1_ents; 1774 1775 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) 1776 limit *= 1UL << STRTAB_SPLIT; 1777 1778 return sid < limit; 1779} 1780 1781static int arm_smmu_add_device(struct device *dev) 1782{ 1783 int i, ret; 1784 u32 sid, *sids; 1785 struct pci_dev *pdev; 1786 struct iommu_group *group; 1787 struct arm_smmu_group *smmu_group; 1788 struct arm_smmu_device *smmu; 1789 1790 /* We only support PCI, for now */ 1791 if (!dev_is_pci(dev)) 1792 return -ENODEV; 1793 1794 pdev = to_pci_dev(dev); 1795 group = iommu_group_get_for_dev(dev); 1796 if (IS_ERR(group)) 1797 return PTR_ERR(group); 1798 1799 smmu_group = iommu_group_get_iommudata(group); 1800 if (!smmu_group) { 1801 smmu = arm_smmu_get_for_pci_dev(pdev); 1802 if (!smmu) { 1803 ret = -ENOENT; 1804 goto out_put_group; 1805 } 1806 1807 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL); 1808 if (!smmu_group) { 1809 ret = -ENOMEM; 1810 goto out_put_group; 1811 } 1812 1813 smmu_group->ste.valid = true; 1814 smmu_group->smmu = smmu; 1815 iommu_group_set_iommudata(group, smmu_group, 1816 __arm_smmu_release_pci_iommudata); 1817 } else { 1818 smmu = smmu_group->smmu; 1819 } 1820 1821 /* Assume SID == RID until firmware tells us otherwise */ 1822 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); 1823 for (i = 0; i < smmu_group->num_sids; ++i) { 1824 /* If we already know about this SID, then we're done */ 1825 if (smmu_group->sids[i] == sid) 1826 return 0; 1827 } 1828 1829 /* Check the SID is in range of the SMMU and our stream table */ 1830 if (!arm_smmu_sid_in_range(smmu, sid)) { 1831 ret = -ERANGE; 1832 goto out_put_group; 1833 } 1834 1835 /* Ensure l2 strtab is initialised */ 1836 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { 1837 ret = arm_smmu_init_l2_strtab(smmu, sid); 1838 if (ret) 1839 goto out_put_group; 1840 } 1841 1842 /* Resize the SID array for the group */ 1843 smmu_group->num_sids++; 1844 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids), 1845 GFP_KERNEL); 1846 if (!sids) { 1847 smmu_group->num_sids--; 1848 ret = -ENOMEM; 1849 goto out_put_group; 1850 } 1851 1852 /* Add the new SID */ 1853 sids[smmu_group->num_sids - 1] = sid; 1854 smmu_group->sids = sids; 1855 return 0; 1856 1857out_put_group: 1858 iommu_group_put(group); 1859 return ret; 1860} 1861 1862static void arm_smmu_remove_device(struct device *dev) 1863{ 1864 iommu_group_remove_device(dev); 1865} 1866 1867static int arm_smmu_domain_get_attr(struct iommu_domain *domain, 1868 enum iommu_attr attr, void *data) 1869{ 1870 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1871 1872 switch (attr) { 1873 case DOMAIN_ATTR_NESTING: 1874 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); 1875 return 0; 1876 default: 1877 return -ENODEV; 1878 } 1879} 1880 1881static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1882 enum iommu_attr attr, void *data) 1883{ 1884 int ret = 0; 1885 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1886 1887 mutex_lock(&smmu_domain->init_mutex); 1888 1889 switch (attr) { 1890 case DOMAIN_ATTR_NESTING: 1891 if (smmu_domain->smmu) { 1892 ret = -EPERM; 1893 goto out_unlock; 1894 } 1895 1896 if (*(int *)data) 1897 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; 1898 else 1899 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 1900 1901 break; 1902 default: 1903 ret = -ENODEV; 1904 } 1905 1906out_unlock: 1907 mutex_unlock(&smmu_domain->init_mutex); 1908 return ret; 1909} 1910 1911static struct iommu_ops arm_smmu_ops = { 1912 .capable = arm_smmu_capable, 1913 .domain_alloc = arm_smmu_domain_alloc, 1914 .domain_free = arm_smmu_domain_free, 1915 .attach_dev = arm_smmu_attach_dev, 1916 .detach_dev = arm_smmu_detach_dev, 1917 .map = arm_smmu_map, 1918 .unmap = arm_smmu_unmap, 1919 .iova_to_phys = arm_smmu_iova_to_phys, 1920 .add_device = arm_smmu_add_device, 1921 .remove_device = arm_smmu_remove_device, 1922 .domain_get_attr = arm_smmu_domain_get_attr, 1923 .domain_set_attr = arm_smmu_domain_set_attr, 1924 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 1925}; 1926 1927/* Probing and initialisation functions */ 1928static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, 1929 struct arm_smmu_queue *q, 1930 unsigned long prod_off, 1931 unsigned long cons_off, 1932 size_t dwords) 1933{ 1934 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3; 1935 1936 q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); 1937 if (!q->base) { 1938 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n", 1939 qsz); 1940 return -ENOMEM; 1941 } 1942 1943 q->prod_reg = smmu->base + prod_off; 1944 q->cons_reg = smmu->base + cons_off; 1945 q->ent_dwords = dwords; 1946 1947 q->q_base = Q_BASE_RWA; 1948 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT; 1949 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK) 1950 << Q_BASE_LOG2SIZE_SHIFT; 1951 1952 q->prod = q->cons = 0; 1953 return 0; 1954} 1955 1956static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu, 1957 struct arm_smmu_queue *q) 1958{ 1959 size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3; 1960 1961 dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma); 1962} 1963 1964static void arm_smmu_free_queues(struct arm_smmu_device *smmu) 1965{ 1966 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q); 1967 arm_smmu_free_one_queue(smmu, &smmu->evtq.q); 1968 1969 if (smmu->features & ARM_SMMU_FEAT_PRI) 1970 arm_smmu_free_one_queue(smmu, &smmu->priq.q); 1971} 1972 1973static int arm_smmu_init_queues(struct arm_smmu_device *smmu) 1974{ 1975 int ret; 1976 1977 /* cmdq */ 1978 spin_lock_init(&smmu->cmdq.lock); 1979 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, 1980 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS); 1981 if (ret) 1982 goto out; 1983 1984 /* evtq */ 1985 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, 1986 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS); 1987 if (ret) 1988 goto out_free_cmdq; 1989 1990 /* priq */ 1991 if (!(smmu->features & ARM_SMMU_FEAT_PRI)) 1992 return 0; 1993 1994 ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, 1995 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS); 1996 if (ret) 1997 goto out_free_evtq; 1998 1999 return 0; 2000 2001out_free_evtq: 2002 arm_smmu_free_one_queue(smmu, &smmu->evtq.q); 2003out_free_cmdq: 2004 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q); 2005out: 2006 return ret; 2007} 2008 2009static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu) 2010{ 2011 int i; 2012 size_t size; 2013 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2014 2015 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 2016 for (i = 0; i < cfg->num_l1_ents; ++i) { 2017 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i]; 2018 2019 if (!desc->l2ptr) 2020 continue; 2021 2022 dma_free_coherent(smmu->dev, size, desc->l2ptr, 2023 desc->l2ptr_dma); 2024 } 2025} 2026 2027static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) 2028{ 2029 unsigned int i; 2030 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2031 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents; 2032 void *strtab = smmu->strtab_cfg.strtab; 2033 2034 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL); 2035 if (!cfg->l1_desc) { 2036 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n"); 2037 return -ENOMEM; 2038 } 2039 2040 for (i = 0; i < cfg->num_l1_ents; ++i) { 2041 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]); 2042 strtab += STRTAB_L1_DESC_DWORDS << 3; 2043 } 2044 2045 return 0; 2046} 2047 2048static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) 2049{ 2050 void *strtab; 2051 u64 reg; 2052 u32 size, l1size; 2053 int ret; 2054 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2055 2056 /* Calculate the L1 size, capped to the SIDSIZE */ 2057 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); 2058 size = min(size, smmu->sid_bits - STRTAB_SPLIT); 2059 cfg->num_l1_ents = 1 << size; 2060 2061 size += STRTAB_SPLIT; 2062 if (size < smmu->sid_bits) 2063 dev_warn(smmu->dev, 2064 "2-level strtab only covers %u/%u bits of SID\n", 2065 size, smmu->sid_bits); 2066 2067 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2068 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, 2069 GFP_KERNEL); 2070 if (!strtab) { 2071 dev_err(smmu->dev, 2072 "failed to allocate l1 stream table (%u bytes)\n", 2073 size); 2074 return -ENOMEM; 2075 } 2076 cfg->strtab = strtab; 2077 2078 /* Configure strtab_base_cfg for 2 levels */ 2079 reg = STRTAB_BASE_CFG_FMT_2LVL; 2080 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK) 2081 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT; 2082 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK) 2083 << STRTAB_BASE_CFG_SPLIT_SHIFT; 2084 cfg->strtab_base_cfg = reg; 2085 2086 ret = arm_smmu_init_l1_strtab(smmu); 2087 if (ret) 2088 dma_free_coherent(smmu->dev, 2089 l1size, 2090 strtab, 2091 cfg->strtab_dma); 2092 return ret; 2093} 2094 2095static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) 2096{ 2097 void *strtab; 2098 u64 reg; 2099 u32 size; 2100 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2101 2102 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); 2103 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, 2104 GFP_KERNEL); 2105 if (!strtab) { 2106 dev_err(smmu->dev, 2107 "failed to allocate linear stream table (%u bytes)\n", 2108 size); 2109 return -ENOMEM; 2110 } 2111 cfg->strtab = strtab; 2112 cfg->num_l1_ents = 1 << smmu->sid_bits; 2113 2114 /* Configure strtab_base_cfg for a linear table covering all SIDs */ 2115 reg = STRTAB_BASE_CFG_FMT_LINEAR; 2116 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK) 2117 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT; 2118 cfg->strtab_base_cfg = reg; 2119 2120 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents); 2121 return 0; 2122} 2123 2124static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) 2125{ 2126 u64 reg; 2127 int ret; 2128 2129 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) 2130 ret = arm_smmu_init_strtab_2lvl(smmu); 2131 else 2132 ret = arm_smmu_init_strtab_linear(smmu); 2133 2134 if (ret) 2135 return ret; 2136 2137 /* Set the strtab base address */ 2138 reg = smmu->strtab_cfg.strtab_dma & 2139 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT; 2140 reg |= STRTAB_BASE_RA; 2141 smmu->strtab_cfg.strtab_base = reg; 2142 2143 /* Allocate the first VMID for stage-2 bypass STEs */ 2144 set_bit(0, smmu->vmid_map); 2145 return 0; 2146} 2147 2148static void arm_smmu_free_strtab(struct arm_smmu_device *smmu) 2149{ 2150 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2151 u32 size = cfg->num_l1_ents; 2152 2153 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { 2154 arm_smmu_free_l2_strtab(smmu); 2155 size *= STRTAB_L1_DESC_DWORDS << 3; 2156 } else { 2157 size *= STRTAB_STE_DWORDS * 3; 2158 } 2159 2160 dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma); 2161} 2162 2163static int arm_smmu_init_structures(struct arm_smmu_device *smmu) 2164{ 2165 int ret; 2166 2167 ret = arm_smmu_init_queues(smmu); 2168 if (ret) 2169 return ret; 2170 2171 ret = arm_smmu_init_strtab(smmu); 2172 if (ret) 2173 goto out_free_queues; 2174 2175 return 0; 2176 2177out_free_queues: 2178 arm_smmu_free_queues(smmu); 2179 return ret; 2180} 2181 2182static void arm_smmu_free_structures(struct arm_smmu_device *smmu) 2183{ 2184 arm_smmu_free_strtab(smmu); 2185 arm_smmu_free_queues(smmu); 2186} 2187 2188static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, 2189 unsigned int reg_off, unsigned int ack_off) 2190{ 2191 u32 reg; 2192 2193 writel_relaxed(val, smmu->base + reg_off); 2194 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, 2195 1, ARM_SMMU_POLL_TIMEOUT_US); 2196} 2197 2198static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) 2199{ 2200 int ret, irq; 2201 2202 /* Disable IRQs first */ 2203 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, 2204 ARM_SMMU_IRQ_CTRLACK); 2205 if (ret) { 2206 dev_err(smmu->dev, "failed to disable irqs\n"); 2207 return ret; 2208 } 2209 2210 /* Clear the MSI address regs */ 2211 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); 2212 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); 2213 2214 /* Request wired interrupt lines */ 2215 irq = smmu->evtq.q.irq; 2216 if (irq) { 2217 ret = devm_request_threaded_irq(smmu->dev, irq, 2218 arm_smmu_evtq_handler, 2219 arm_smmu_evtq_thread, 2220 0, "arm-smmu-v3-evtq", smmu); 2221 if (IS_ERR_VALUE(ret)) 2222 dev_warn(smmu->dev, "failed to enable evtq irq\n"); 2223 } 2224 2225 irq = smmu->cmdq.q.irq; 2226 if (irq) { 2227 ret = devm_request_irq(smmu->dev, irq, 2228 arm_smmu_cmdq_sync_handler, 0, 2229 "arm-smmu-v3-cmdq-sync", smmu); 2230 if (IS_ERR_VALUE(ret)) 2231 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); 2232 } 2233 2234 irq = smmu->gerr_irq; 2235 if (irq) { 2236 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, 2237 0, "arm-smmu-v3-gerror", smmu); 2238 if (IS_ERR_VALUE(ret)) 2239 dev_warn(smmu->dev, "failed to enable gerror irq\n"); 2240 } 2241 2242 if (smmu->features & ARM_SMMU_FEAT_PRI) { 2243 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); 2244 2245 irq = smmu->priq.q.irq; 2246 if (irq) { 2247 ret = devm_request_threaded_irq(smmu->dev, irq, 2248 arm_smmu_priq_handler, 2249 arm_smmu_priq_thread, 2250 0, "arm-smmu-v3-priq", 2251 smmu); 2252 if (IS_ERR_VALUE(ret)) 2253 dev_warn(smmu->dev, 2254 "failed to enable priq irq\n"); 2255 } 2256 } 2257 2258 /* Enable interrupt generation on the SMMU */ 2259 ret = arm_smmu_write_reg_sync(smmu, 2260 IRQ_CTRL_EVTQ_IRQEN | 2261 IRQ_CTRL_GERROR_IRQEN, 2262 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); 2263 if (ret) 2264 dev_warn(smmu->dev, "failed to enable irqs\n"); 2265 2266 return 0; 2267} 2268 2269static int arm_smmu_device_disable(struct arm_smmu_device *smmu) 2270{ 2271 int ret; 2272 2273 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); 2274 if (ret) 2275 dev_err(smmu->dev, "failed to clear cr0\n"); 2276 2277 return ret; 2278} 2279 2280static int arm_smmu_device_reset(struct arm_smmu_device *smmu) 2281{ 2282 int ret; 2283 u32 reg, enables; 2284 struct arm_smmu_cmdq_ent cmd; 2285 2286 /* Clear CR0 and sync (disables SMMU and queue processing) */ 2287 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); 2288 if (reg & CR0_SMMUEN) 2289 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); 2290 2291 ret = arm_smmu_device_disable(smmu); 2292 if (ret) 2293 return ret; 2294 2295 /* CR1 (table and queue memory attributes) */ 2296 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) | 2297 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) | 2298 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) | 2299 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) | 2300 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) | 2301 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT); 2302 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); 2303 2304 /* CR2 (random crap) */ 2305 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H; 2306 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); 2307 2308 /* Stream table */ 2309 writeq_relaxed(smmu->strtab_cfg.strtab_base, 2310 smmu->base + ARM_SMMU_STRTAB_BASE); 2311 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg, 2312 smmu->base + ARM_SMMU_STRTAB_BASE_CFG); 2313 2314 /* Command queue */ 2315 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); 2316 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD); 2317 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS); 2318 2319 enables = CR0_CMDQEN; 2320 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 2321 ARM_SMMU_CR0ACK); 2322 if (ret) { 2323 dev_err(smmu->dev, "failed to enable command queue\n"); 2324 return ret; 2325 } 2326 2327 /* Invalidate any cached configuration */ 2328 cmd.opcode = CMDQ_OP_CFGI_ALL; 2329 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2330 cmd.opcode = CMDQ_OP_CMD_SYNC; 2331 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2332 2333 /* Invalidate any stale TLB entries */ 2334 if (smmu->features & ARM_SMMU_FEAT_HYP) { 2335 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL; 2336 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2337 } 2338 2339 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL; 2340 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2341 cmd.opcode = CMDQ_OP_CMD_SYNC; 2342 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2343 2344 /* Event queue */ 2345 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); 2346 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD); 2347 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS); 2348 2349 enables |= CR0_EVTQEN; 2350 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 2351 ARM_SMMU_CR0ACK); 2352 if (ret) { 2353 dev_err(smmu->dev, "failed to enable event queue\n"); 2354 return ret; 2355 } 2356 2357 /* PRI queue */ 2358 if (smmu->features & ARM_SMMU_FEAT_PRI) { 2359 writeq_relaxed(smmu->priq.q.q_base, 2360 smmu->base + ARM_SMMU_PRIQ_BASE); 2361 writel_relaxed(smmu->priq.q.prod, 2362 smmu->base + ARM_SMMU_PRIQ_PROD); 2363 writel_relaxed(smmu->priq.q.cons, 2364 smmu->base + ARM_SMMU_PRIQ_CONS); 2365 2366 enables |= CR0_PRIQEN; 2367 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 2368 ARM_SMMU_CR0ACK); 2369 if (ret) { 2370 dev_err(smmu->dev, "failed to enable PRI queue\n"); 2371 return ret; 2372 } 2373 } 2374 2375 ret = arm_smmu_setup_irqs(smmu); 2376 if (ret) { 2377 dev_err(smmu->dev, "failed to setup irqs\n"); 2378 return ret; 2379 } 2380 2381 /* Enable the SMMU interface */ 2382 enables |= CR0_SMMUEN; 2383 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 2384 ARM_SMMU_CR0ACK); 2385 if (ret) { 2386 dev_err(smmu->dev, "failed to enable SMMU interface\n"); 2387 return ret; 2388 } 2389 2390 return 0; 2391} 2392 2393static int arm_smmu_device_probe(struct arm_smmu_device *smmu) 2394{ 2395 u32 reg; 2396 bool coherent; 2397 unsigned long pgsize_bitmap = 0; 2398 2399 /* IDR0 */ 2400 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); 2401 2402 /* 2-level structures */ 2403 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL) 2404 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; 2405 2406 if (reg & IDR0_CD2L) 2407 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; 2408 2409 /* 2410 * Translation table endianness. 2411 * We currently require the same endianness as the CPU, but this 2412 * could be changed later by adding a new IO_PGTABLE_QUIRK. 2413 */ 2414 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) { 2415 case IDR0_TTENDIAN_MIXED: 2416 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; 2417 break; 2418#ifdef __BIG_ENDIAN 2419 case IDR0_TTENDIAN_BE: 2420 smmu->features |= ARM_SMMU_FEAT_TT_BE; 2421 break; 2422#else 2423 case IDR0_TTENDIAN_LE: 2424 smmu->features |= ARM_SMMU_FEAT_TT_LE; 2425 break; 2426#endif 2427 default: 2428 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); 2429 return -ENXIO; 2430 } 2431 2432 /* Boolean feature flags */ 2433 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI) 2434 smmu->features |= ARM_SMMU_FEAT_PRI; 2435 2436 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS) 2437 smmu->features |= ARM_SMMU_FEAT_ATS; 2438 2439 if (reg & IDR0_SEV) 2440 smmu->features |= ARM_SMMU_FEAT_SEV; 2441 2442 if (reg & IDR0_MSI) 2443 smmu->features |= ARM_SMMU_FEAT_MSI; 2444 2445 if (reg & IDR0_HYP) 2446 smmu->features |= ARM_SMMU_FEAT_HYP; 2447 2448 /* 2449 * The dma-coherent property is used in preference to the ID 2450 * register, but warn on mismatch. 2451 */ 2452 coherent = of_dma_is_coherent(smmu->dev->of_node); 2453 if (coherent) 2454 smmu->features |= ARM_SMMU_FEAT_COHERENCY; 2455 2456 if (!!(reg & IDR0_COHACC) != coherent) 2457 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n", 2458 coherent ? "true" : "false"); 2459 2460 if (reg & IDR0_STALL_MODEL) 2461 smmu->features |= ARM_SMMU_FEAT_STALLS; 2462 2463 if (reg & IDR0_S1P) 2464 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; 2465 2466 if (reg & IDR0_S2P) 2467 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; 2468 2469 if (!(reg & (IDR0_S1P | IDR0_S2P))) { 2470 dev_err(smmu->dev, "no translation support!\n"); 2471 return -ENXIO; 2472 } 2473 2474 /* We only support the AArch64 table format at present */ 2475 if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { 2476 dev_err(smmu->dev, "AArch64 table format not supported!\n"); 2477 return -ENXIO; 2478 } 2479 2480 /* ASID/VMID sizes */ 2481 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; 2482 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; 2483 2484 /* IDR1 */ 2485 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); 2486 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) { 2487 dev_err(smmu->dev, "embedded implementation not supported\n"); 2488 return -ENXIO; 2489 } 2490 2491 /* Queue sizes, capped at 4k */ 2492 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT, 2493 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK); 2494 if (!smmu->cmdq.q.max_n_shift) { 2495 /* Odd alignment restrictions on the base, so ignore for now */ 2496 dev_err(smmu->dev, "unit-length command queue not supported\n"); 2497 return -ENXIO; 2498 } 2499 2500 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT, 2501 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK); 2502 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT, 2503 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK); 2504 2505 /* SID/SSID sizes */ 2506 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; 2507 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; 2508 2509 /* IDR5 */ 2510 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); 2511 2512 /* Maximum number of outstanding stalls */ 2513 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT 2514 & IDR5_STALL_MAX_MASK; 2515 2516 /* Page sizes */ 2517 if (reg & IDR5_GRAN64K) 2518 pgsize_bitmap |= SZ_64K | SZ_512M; 2519 if (reg & IDR5_GRAN16K) 2520 pgsize_bitmap |= SZ_16K | SZ_32M; 2521 if (reg & IDR5_GRAN4K) 2522 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; 2523 2524 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap; 2525 2526 /* Output address size */ 2527 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) { 2528 case IDR5_OAS_32_BIT: 2529 smmu->oas = 32; 2530 break; 2531 case IDR5_OAS_36_BIT: 2532 smmu->oas = 36; 2533 break; 2534 case IDR5_OAS_40_BIT: 2535 smmu->oas = 40; 2536 break; 2537 case IDR5_OAS_42_BIT: 2538 smmu->oas = 42; 2539 break; 2540 case IDR5_OAS_44_BIT: 2541 smmu->oas = 44; 2542 break; 2543 case IDR5_OAS_48_BIT: 2544 smmu->oas = 48; 2545 break; 2546 default: 2547 dev_err(smmu->dev, "unknown output address size!\n"); 2548 return -ENXIO; 2549 } 2550 2551 /* Set the DMA mask for our table walker */ 2552 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) 2553 dev_warn(smmu->dev, 2554 "failed to set DMA mask for table walker\n"); 2555 2556 if (!smmu->ias) 2557 smmu->ias = smmu->oas; 2558 2559 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", 2560 smmu->ias, smmu->oas, smmu->features); 2561 return 0; 2562} 2563 2564static int arm_smmu_device_dt_probe(struct platform_device *pdev) 2565{ 2566 int irq, ret; 2567 struct resource *res; 2568 struct arm_smmu_device *smmu; 2569 struct device *dev = &pdev->dev; 2570 2571 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 2572 if (!smmu) { 2573 dev_err(dev, "failed to allocate arm_smmu_device\n"); 2574 return -ENOMEM; 2575 } 2576 smmu->dev = dev; 2577 2578 /* Base address */ 2579 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2580 if (resource_size(res) + 1 < SZ_128K) { 2581 dev_err(dev, "MMIO region too small (%pr)\n", res); 2582 return -EINVAL; 2583 } 2584 2585 smmu->base = devm_ioremap_resource(dev, res); 2586 if (IS_ERR(smmu->base)) 2587 return PTR_ERR(smmu->base); 2588 2589 /* Interrupt lines */ 2590 irq = platform_get_irq_byname(pdev, "eventq"); 2591 if (irq > 0) 2592 smmu->evtq.q.irq = irq; 2593 2594 irq = platform_get_irq_byname(pdev, "priq"); 2595 if (irq > 0) 2596 smmu->priq.q.irq = irq; 2597 2598 irq = platform_get_irq_byname(pdev, "cmdq-sync"); 2599 if (irq > 0) 2600 smmu->cmdq.q.irq = irq; 2601 2602 irq = platform_get_irq_byname(pdev, "gerror"); 2603 if (irq > 0) 2604 smmu->gerr_irq = irq; 2605 2606 parse_driver_options(smmu); 2607 2608 /* Probe the h/w */ 2609 ret = arm_smmu_device_probe(smmu); 2610 if (ret) 2611 return ret; 2612 2613 /* Initialise in-memory data structures */ 2614 ret = arm_smmu_init_structures(smmu); 2615 if (ret) 2616 return ret; 2617 2618 /* Reset the device */ 2619 ret = arm_smmu_device_reset(smmu); 2620 if (ret) 2621 goto out_free_structures; 2622 2623 /* Record our private device structure */ 2624 INIT_LIST_HEAD(&smmu->list); 2625 spin_lock(&arm_smmu_devices_lock); 2626 list_add(&smmu->list, &arm_smmu_devices); 2627 spin_unlock(&arm_smmu_devices_lock); 2628 return 0; 2629 2630out_free_structures: 2631 arm_smmu_free_structures(smmu); 2632 return ret; 2633} 2634 2635static int arm_smmu_device_remove(struct platform_device *pdev) 2636{ 2637 struct arm_smmu_device *curr, *smmu = NULL; 2638 struct device *dev = &pdev->dev; 2639 2640 spin_lock(&arm_smmu_devices_lock); 2641 list_for_each_entry(curr, &arm_smmu_devices, list) { 2642 if (curr->dev == dev) { 2643 smmu = curr; 2644 list_del(&smmu->list); 2645 break; 2646 } 2647 } 2648 spin_unlock(&arm_smmu_devices_lock); 2649 2650 if (!smmu) 2651 return -ENODEV; 2652 2653 arm_smmu_device_disable(smmu); 2654 arm_smmu_free_structures(smmu); 2655 return 0; 2656} 2657 2658static struct of_device_id arm_smmu_of_match[] = { 2659 { .compatible = "arm,smmu-v3", }, 2660 { }, 2661}; 2662MODULE_DEVICE_TABLE(of, arm_smmu_of_match); 2663 2664static struct platform_driver arm_smmu_driver = { 2665 .driver = { 2666 .name = "arm-smmu-v3", 2667 .of_match_table = of_match_ptr(arm_smmu_of_match), 2668 }, 2669 .probe = arm_smmu_device_dt_probe, 2670 .remove = arm_smmu_device_remove, 2671}; 2672 2673static int __init arm_smmu_init(void) 2674{ 2675 struct device_node *np; 2676 int ret; 2677 2678 np = of_find_matching_node(NULL, arm_smmu_of_match); 2679 if (!np) 2680 return 0; 2681 2682 of_node_put(np); 2683 2684 ret = platform_driver_register(&arm_smmu_driver); 2685 if (ret) 2686 return ret; 2687 2688 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); 2689} 2690 2691static void __exit arm_smmu_exit(void) 2692{ 2693 return platform_driver_unregister(&arm_smmu_driver); 2694} 2695 2696subsys_initcall(arm_smmu_init); 2697module_exit(arm_smmu_exit); 2698 2699MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); 2700MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); 2701MODULE_LICENSE("GPL v2");