Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ACPI: scan: Support multiple DMA windows with different offsets

In DT systems configurations, of_dma_get_range() returns struct
bus_dma_region DMA regions; they are used to set-up devices
DMA windows with different offset available for translation between DMA
address and CPU address.

In ACPI systems configuration, acpi_dma_get_range() does not return
DMA regions yet and that precludes setting up the dev->dma_range_map
pointer and therefore DMA regions with multiple offsets.

Update acpi_dma_get_range() to return struct bus_dma_region
DMA regions like of_dma_get_range() does.

After updating acpi_dma_get_range(), acpi_arch_dma_setup() is changed for
ARM64, where the original dma_addr and size are removed as these
arguments are now redundant, and pass 0 and U64_MAX for dma_base
and size of arch_setup_dma_ops; this is a simplification consistent
with what other ACPI architectures also pass to iommu_setup_dma_ops().

Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Jianmin Lv <lvjianmin@loongson.cn>
Reviewed-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Jianmin Lv and committed by
Rafael J. Wysocki
bf2ee8d0 521a547c

+48 -51
+17 -11
drivers/acpi/arm64/dma.c
··· 4 4 #include <linux/device.h> 5 5 #include <linux/dma-direct.h> 6 6 7 - void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 7 + void acpi_arch_dma_setup(struct device *dev) 8 8 { 9 9 int ret; 10 10 u64 end, mask; 11 - u64 dmaaddr = 0, size = 0, offset = 0; 11 + u64 size = 0; 12 + const struct bus_dma_region *map = NULL; 12 13 13 14 /* 14 15 * If @dev is expected to be DMA-capable then the bus code that created ··· 27 26 else 28 27 size = 1ULL << 32; 29 28 30 - ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 29 + ret = acpi_dma_get_range(dev, &map); 30 + if (!ret && map) { 31 + const struct bus_dma_region *r = map; 32 + 33 + for (end = 0; r->size; r++) { 34 + if (r->dma_start + r->size - 1 > end) 35 + end = r->dma_start + r->size - 1; 36 + } 37 + 38 + size = end + 1; 39 + dev->dma_range_map = map; 40 + } 41 + 31 42 if (ret == -ENODEV) 32 43 ret = iort_dma_get_ranges(dev, &size); 33 44 if (!ret) { ··· 47 34 * Limit coherent and dma mask based on size retrieved from 48 35 * firmware. 49 36 */ 50 - end = dmaaddr + size - 1; 37 + end = size - 1; 51 38 mask = DMA_BIT_MASK(ilog2(end) + 1); 52 39 dev->bus_dma_limit = end; 53 40 dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); 54 41 *dev->dma_mask = min(*dev->dma_mask, mask); 55 42 } 56 - 57 - *dma_addr = dmaaddr; 58 - *dma_size = size; 59 - 60 - ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size); 61 - 62 - dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : ""); 63 43 }
+27 -34
drivers/acpi/scan.c
··· 20 20 #include <linux/platform_data/x86/apple.h> 21 21 #include <linux/pgtable.h> 22 22 #include <linux/crc32.h> 23 + #include <linux/dma-direct.h> 23 24 24 25 #include "internal.h" 25 26 ··· 1468 1467 * acpi_dma_get_range() - Get device DMA parameters. 1469 1468 * 1470 1469 * @dev: device to configure 1471 - * @dma_addr: pointer device DMA address result 1472 - * @offset: pointer to the DMA offset result 1473 - * @size: pointer to DMA range size result 1470 + * @map: pointer to DMA ranges result 1474 1471 * 1475 - * Evaluate DMA regions and return respectively DMA region start, offset 1476 - * and size in dma_addr, offset and size on parsing success; it does not 1477 - * update the passed in values on failure. 1472 + * Evaluate DMA regions and return pointer to DMA regions on 1473 + * parsing success; it does not update the passed in values on failure. 1478 1474 * 1479 1475 * Return 0 on success, < 0 on failure. 1480 1476 */ 1481 - int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, 1482 - u64 *size) 1477 + int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) 1483 1478 { 1484 1479 struct acpi_device *adev; 1485 1480 LIST_HEAD(list); 1486 1481 struct resource_entry *rentry; 1487 1482 int ret; 1488 1483 struct device *dma_dev = dev; 1489 - u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0; 1484 + struct bus_dma_region *r; 1490 1485 1491 1486 /* 1492 1487 * Walk the device tree chasing an ACPI companion with a _DMA ··· 1507 1510 1508 1511 ret = acpi_dev_get_dma_resources(adev, &list); 1509 1512 if (ret > 0) { 1510 - list_for_each_entry(rentry, &list, node) { 1511 - if (dma_offset && rentry->offset != dma_offset) { 1512 - ret = -EINVAL; 1513 - dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n"); 1514 - goto out; 1515 - } 1516 - dma_offset = rentry->offset; 1517 - 1518 - /* Take lower and upper limits */ 1519 - if (rentry->res->start < dma_start) 1520 - dma_start = rentry->res->start; 1521 - if (rentry->res->end > dma_end) 1522 - dma_end = rentry->res->end; 1523 - } 1524 - 1525 - if (dma_start >= dma_end) { 1526 - ret = -EINVAL; 1527 - dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); 1513 + r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL); 1514 + if (!r) { 1515 + ret = -ENOMEM; 1528 1516 goto out; 1529 1517 } 1530 1518 1531 - *dma_addr = dma_start - dma_offset; 1532 - len = dma_end - dma_start; 1533 - *size = max(len, len + 1); 1534 - *offset = dma_offset; 1519 + list_for_each_entry(rentry, &list, node) { 1520 + if (rentry->res->start >= rentry->res->end) { 1521 + kfree(r); 1522 + ret = -EINVAL; 1523 + dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); 1524 + goto out; 1525 + } 1526 + 1527 + r->cpu_start = rentry->res->start; 1528 + r->dma_start = rentry->res->start - rentry->offset; 1529 + r->size = resource_size(rentry->res); 1530 + r->offset = rentry->offset; 1531 + r++; 1532 + } 1533 + 1534 + *map = r; 1535 1535 } 1536 1536 out: 1537 1537 acpi_dev_free_resource_list(&list); ··· 1618 1624 const u32 *input_id) 1619 1625 { 1620 1626 const struct iommu_ops *iommu; 1621 - u64 dma_addr = 0, size = 0; 1622 1627 1623 1628 if (attr == DEV_DMA_NOT_SUPPORTED) { 1624 1629 set_dma_ops(dev, &dma_dummy_ops); 1625 1630 return 0; 1626 1631 } 1627 1632 1628 - acpi_arch_dma_setup(dev, &dma_addr, &size); 1633 + acpi_arch_dma_setup(dev); 1629 1634 1630 1635 iommu = acpi_iommu_configure_id(dev, input_id); 1631 1636 if (PTR_ERR(iommu) == -EPROBE_DEFER) 1632 1637 return -EPROBE_DEFER; 1633 1638 1634 - arch_setup_dma_ops(dev, dma_addr, size, 1639 + arch_setup_dma_ops(dev, 0, U64_MAX, 1635 1640 iommu, attr == DEV_DMA_COHERENT); 1636 1641 1637 1642 return 0;
+1 -2
include/acpi/acpi_bus.h
··· 613 613 int acpi_iommu_fwspec_init(struct device *dev, u32 id, 614 614 struct fwnode_handle *fwnode, 615 615 const struct iommu_ops *ops); 616 - int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, 617 - u64 *size); 616 + int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map); 618 617 int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, 619 618 const u32 *input_id); 620 619 static inline int acpi_dma_configure(struct device *dev,
+3 -4
include/linux/acpi.h
··· 281 281 282 282 #ifdef CONFIG_ARM64 283 283 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); 284 - void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size); 284 + void acpi_arch_dma_setup(struct device *dev); 285 285 #else 286 286 static inline void 287 287 acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } 288 288 static inline void 289 - acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { } 289 + acpi_arch_dma_setup(struct device *dev) { } 290 290 #endif 291 291 292 292 int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); ··· 977 977 return DEV_DMA_NOT_SUPPORTED; 978 978 } 979 979 980 - static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr, 981 - u64 *offset, u64 *size) 980 + static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) 982 981 { 983 982 return -ENODEV; 984 983 }