···468468 size_t size = buffer->len;469469470470 dma_handle = dma_map_single(dev, addr, size, direction);471471+ if (dma_mapping_error(dma_handle)) {472472+ /*473473+ * reduce current DMA mapping usage,474474+ * delay and try again later or475475+ * reset driver.476476+ */477477+ goto map_error_handling;478478+ }471479472480and to unmap it:473481474482 dma_unmap_single(dev, dma_handle, size, direction);483483+484484+You should call dma_mapping_error() as dma_map_single() could fail and return485485+error. Not all dma implementations support dma_mapping_error() interface.486486+However, it is a good practice to call dma_mapping_error() interface, which487487+will invoke the generic mapping error check interface. Doing so will ensure488488+that the mapping code will work correctly on all dma implementations without489489+any dependency on the specifics of the underlying implementation. Using the490490+returned address without checking for errors could result in failures ranging491491+from panics to silent data corruption. Couple of example of incorrect ways to492492+check for errors that make assumptions about the underlying dma implementation493493+are as follows and these are applicable to dma_map_page() as well.494494+495495+Incorrect example 1:496496+ dma_addr_t dma_handle;497497+498498+ dma_handle = dma_map_single(dev, addr, size, direction);499499+ if ((dma_handle & 0xffff != 0) || (dma_handle >= 0x1000000)) {500500+ goto map_error;501501+ }502502+503503+Incorrect example 2:504504+ dma_addr_t dma_handle;505505+506506+ dma_handle = dma_map_single(dev, addr, size, direction);507507+ if (dma_handle == DMA_ERROR_CODE) {508508+ goto map_error;509509+ }475510476511You should call dma_unmap_single when the DMA activity is finished, e.g.477512from the interrupt which told you that the DMA transfer is done.···524489 size_t size = buffer->len;525490526491 dma_handle = dma_map_page(dev, page, offset, size, direction);492492+ if (dma_mapping_error(dma_handle)) {493493+ /*494494+ * reduce current DMA mapping usage,495495+ * delay and try again later or496496+ * reset driver.497497+ */498498+ goto map_error_handling;499499+ }527500528501 ...529502530503 dma_unmap_page(dev, dma_handle, size, direction);531504532505Here, "offset" means byte offset within the given page.506506+507507+You should call dma_mapping_error() as dma_map_page() could fail and return508508+error as outlined under the dma_map_single() discussion.509509+510510+You should call dma_unmap_page when the DMA activity is finished, e.g.511511+from the interrupt which told you that the DMA transfer is done.533512534513With scatterlists, you map a region gathered from several regions by:535514···627578 dma_addr_t mapping;628579629580 mapping = dma_map_single(cp->dev, buffer, len, DMA_FROM_DEVICE);581581+ if (dma_mapping_error(dma_handle)) {582582+ /*583583+ * reduce current DMA mapping usage,584584+ * delay and try again later or585585+ * reset driver.586586+ */587587+ goto map_error_handling;588588+ }630589631590 cp->rx_buf = buffer;632591 cp->rx_len = len;···715658 * delay and try again later or716659 * reset driver.717660 */661661+ goto map_error_handling;662662+ }663663+664664+- unmap pages that are already mapped, when mapping error occurs in the middle665665+ of a multiple page mapping attempt. These example are applicable to666666+ dma_map_page() as well.667667+668668+Example 1:669669+ dma_addr_t dma_handle1;670670+ dma_addr_t dma_handle2;671671+672672+ dma_handle1 = dma_map_single(dev, addr, size, direction);673673+ if (dma_mapping_error(dev, dma_handle1)) {674674+ /*675675+ * reduce current DMA mapping usage,676676+ * delay and try again later or677677+ * reset driver.678678+ */679679+ goto map_error_handling1;680680+ }681681+ dma_handle2 = dma_map_single(dev, addr, size, direction);682682+ if (dma_mapping_error(dev, dma_handle2)) {683683+ /*684684+ * reduce current DMA mapping usage,685685+ * delay and try again later or686686+ * reset driver.687687+ */688688+ goto map_error_handling2;689689+ }690690+691691+ ...692692+693693+ map_error_handling2:694694+ dma_unmap_single(dma_handle1);695695+ map_error_handling1:696696+697697+Example 2: (if buffers are allocated a loop, unmap all mapped buffers when698698+ mapping error is detected in the middle)699699+700700+ dma_addr_t dma_addr;701701+ dma_addr_t array[DMA_BUFFERS];702702+ int save_index = 0;703703+704704+ for (i = 0; i < DMA_BUFFERS; i++) {705705+706706+ ...707707+708708+ dma_addr = dma_map_single(dev, addr, size, direction);709709+ if (dma_mapping_error(dev, dma_addr)) {710710+ /*711711+ * reduce current DMA mapping usage,712712+ * delay and try again later or713713+ * reset driver.714714+ */715715+ goto map_error_handling;716716+ }717717+ array[i].dma_addr = dma_addr;718718+ save_index++;719719+ }720720+721721+ ...722722+723723+ map_error_handling:724724+725725+ for (i = 0; i < save_index; i++) {726726+727727+ ...728728+729729+ dma_unmap_single(array[i].dma_addr);718730 }719731720732Networking drivers must call dev_kfree_skb to free the socket buffer
+12
Documentation/DMA-API.txt
···678678of preallocated entries is defined per architecture. If it is too low for you679679boot with 'dma_debug_entries=<your_desired_number>' to overwrite the680680architectural default.681681+682682+void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr);683683+684684+dma-debug interface debug_dma_mapping_error() to debug drivers that fail685685+to check dma mapping errors on addresses returned by dma_map_single() and686686+dma_map_page() interfaces. This interface clears a flag set by687687+debug_dma_map_page() to indicate that dma_mapping_error() has been called by688688+the driver. When driver does unmap, debug_dma_unmap() checks the flag and if689689+this flag is still set, prints warning message that includes call trace that690690+leads up to the unmap. This interface can be called from dma_mapping_error()691691+routines to enable dma mapping error check debugging.692692+
···5757 * physically contiguous memory regions it is mapping into page sizes5858 * that we support.5959 *6060- * Traditionally the IOMMU core just handed us the mappings directly,6161- * after making sure the size is an order of a 4KiB page and that the6262- * mapping has natural alignment.6363- *6464- * To retain this behavior, we currently advertise that we support6565- * all page sizes that are an order of 4KiB.6666- *6767- * If at some point we'd like to utilize the IOMMU core's new behavior,6868- * we could change this to advertise the real page sizes we support.6060+ * 512GB Pages are not supported due to a hardware bug6961 */7070-#define AMD_IOMMU_PGSIZES (~0xFFFUL)6262+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))71637264static DEFINE_RWLOCK(amd_iommu_devtable_lock);7365···131139 spin_lock_irqsave(&dev_data_list_lock, flags);132140 list_del(&dev_data->dev_data_list);133141 spin_unlock_irqrestore(&dev_data_list_lock, flags);142142+143143+ if (dev_data->group)144144+ iommu_group_put(dev_data->group);134145135146 kfree(dev_data);136147}···269274 *from = to;270275}271276277277+static struct pci_bus *find_hosted_bus(struct pci_bus *bus)278278+{279279+ while (!bus->self) {280280+ if (!pci_is_root_bus(bus))281281+ bus = bus->parent;282282+ else283283+ return ERR_PTR(-ENODEV);284284+ }285285+286286+ return bus;287287+}288288+272289#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)290290+291291+static struct pci_dev *get_isolation_root(struct pci_dev *pdev)292292+{293293+ struct pci_dev *dma_pdev = pdev;294294+295295+ /* Account for quirked devices */296296+ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));297297+298298+ /*299299+ * If it's a multifunction device that does not support our300300+ * required ACS flags, add to the same group as function 0.301301+ */302302+ if (dma_pdev->multifunction &&303303+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))304304+ swap_pci_ref(&dma_pdev,305305+ pci_get_slot(dma_pdev->bus,306306+ PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),307307+ 0)));308308+309309+ /*310310+ * Devices on the root bus go through the iommu. If that's not us,311311+ * find the next upstream device and test ACS up to the root bus.312312+ * Finding the next device may require skipping virtual buses.313313+ */314314+ while (!pci_is_root_bus(dma_pdev->bus)) {315315+ struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);316316+ if (IS_ERR(bus))317317+ break;318318+319319+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))320320+ break;321321+322322+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));323323+ }324324+325325+ return dma_pdev;326326+}327327+328328+static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)329329+{330330+ struct iommu_group *group = iommu_group_get(&pdev->dev);331331+ int ret;332332+333333+ if (!group) {334334+ group = iommu_group_alloc();335335+ if (IS_ERR(group))336336+ return PTR_ERR(group);337337+338338+ WARN_ON(&pdev->dev != dev);339339+ }340340+341341+ ret = iommu_group_add_device(group, dev);342342+ iommu_group_put(group);343343+ return ret;344344+}345345+346346+static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,347347+ struct device *dev)348348+{349349+ if (!dev_data->group) {350350+ struct iommu_group *group = iommu_group_alloc();351351+ if (IS_ERR(group))352352+ return PTR_ERR(group);353353+354354+ dev_data->group = group;355355+ }356356+357357+ return iommu_group_add_device(dev_data->group, dev);358358+}359359+360360+static int init_iommu_group(struct device *dev)361361+{362362+ struct iommu_dev_data *dev_data;363363+ struct iommu_group *group;364364+ struct pci_dev *dma_pdev;365365+ int ret;366366+367367+ group = iommu_group_get(dev);368368+ if (group) {369369+ iommu_group_put(group);370370+ return 0;371371+ }372372+373373+ dev_data = find_dev_data(get_device_id(dev));374374+ if (!dev_data)375375+ return -ENOMEM;376376+377377+ if (dev_data->alias_data) {378378+ u16 alias;379379+ struct pci_bus *bus;380380+381381+ if (dev_data->alias_data->group)382382+ goto use_group;383383+384384+ /*385385+ * If the alias device exists, it's effectively just a first386386+ * level quirk for finding the DMA source.387387+ */388388+ alias = amd_iommu_alias_table[dev_data->devid];389389+ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);390390+ if (dma_pdev) {391391+ dma_pdev = get_isolation_root(dma_pdev);392392+ goto use_pdev;393393+ }394394+395395+ /*396396+ * If the alias is virtual, try to find a parent device397397+ * and test whether the IOMMU group is actualy rooted above398398+ * the alias. Be careful to also test the parent device if399399+ * we think the alias is the root of the group.400400+ */401401+ bus = pci_find_bus(0, alias >> 8);402402+ if (!bus)403403+ goto use_group;404404+405405+ bus = find_hosted_bus(bus);406406+ if (IS_ERR(bus) || !bus->self)407407+ goto use_group;408408+409409+ dma_pdev = get_isolation_root(pci_dev_get(bus->self));410410+ if (dma_pdev != bus->self || (dma_pdev->multifunction &&411411+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))412412+ goto use_pdev;413413+414414+ pci_dev_put(dma_pdev);415415+ goto use_group;416416+ }417417+418418+ dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));419419+use_pdev:420420+ ret = use_pdev_iommu_group(dma_pdev, dev);421421+ pci_dev_put(dma_pdev);422422+ return ret;423423+use_group:424424+ return use_dev_data_iommu_group(dev_data->alias_data, dev);425425+}273426274427static int iommu_init_device(struct device *dev)275428{276276- struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);429429+ struct pci_dev *pdev = to_pci_dev(dev);277430 struct iommu_dev_data *dev_data;278278- struct iommu_group *group;279431 u16 alias;280432 int ret;281433···445303 return -ENOTSUPP;446304 }447305 dev_data->alias_data = alias_data;448448-449449- dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);450306 }451307452452- if (dma_pdev == NULL)453453- dma_pdev = pci_dev_get(pdev);454454-455455- /* Account for quirked devices */456456- swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));457457-458458- /*459459- * If it's a multifunction device that does not support our460460- * required ACS flags, add to the same group as function 0.461461- */462462- if (dma_pdev->multifunction &&463463- !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))464464- swap_pci_ref(&dma_pdev,465465- pci_get_slot(dma_pdev->bus,466466- PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),467467- 0)));468468-469469- /*470470- * Devices on the root bus go through the iommu. If that's not us,471471- * find the next upstream device and test ACS up to the root bus.472472- * Finding the next device may require skipping virtual buses.473473- */474474- while (!pci_is_root_bus(dma_pdev->bus)) {475475- struct pci_bus *bus = dma_pdev->bus;476476-477477- while (!bus->self) {478478- if (!pci_is_root_bus(bus))479479- bus = bus->parent;480480- else481481- goto root_bus;482482- }483483-484484- if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))485485- break;486486-487487- swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));488488- }489489-490490-root_bus:491491- group = iommu_group_get(&dma_pdev->dev);492492- pci_dev_put(dma_pdev);493493- if (!group) {494494- group = iommu_group_alloc();495495- if (IS_ERR(group))496496- return PTR_ERR(group);497497- }498498-499499- ret = iommu_group_add_device(group, dev);500500-501501- iommu_group_put(group);502502-308308+ ret = init_iommu_group(dev);503309 if (ret)504310 return ret;505311
+1
drivers/iommu/amd_iommu_types.h
···426426 struct iommu_dev_data *alias_data;/* The alias dev_data */427427 struct protection_domain *domain; /* Domain the device is bound to */428428 atomic_t bind; /* Domain attach reference count */429429+ struct iommu_group *group; /* IOMMU group for virtual aliases */429430 u16 devid; /* PCI Device ID */430431 bool iommu_v2; /* Device can make use of IOMMUv2 */431432 bool passthrough; /* Default for device is pt_domain */
+31
drivers/iommu/intel-iommu.c
···23202320 return 0;23212321}2322232223232323+static bool device_has_rmrr(struct pci_dev *dev)23242324+{23252325+ struct dmar_rmrr_unit *rmrr;23262326+ int i;23272327+23282328+ for_each_rmrr_units(rmrr) {23292329+ for (i = 0; i < rmrr->devices_cnt; i++) {23302330+ /*23312331+ * Return TRUE if this RMRR contains the device that23322332+ * is passed in.23332333+ */23342334+ if (rmrr->devices[i] == dev)23352335+ return true;23362336+ }23372337+ }23382338+ return false;23392339+}23402340+23232341static int iommu_should_identity_map(struct pci_dev *pdev, int startup)23242342{23432343+23442344+ /*23452345+ * We want to prevent any device associated with an RMRR from23462346+ * getting placed into the SI Domain. This is done because23472347+ * problems exist when devices are moved in and out of domains23482348+ * and their respective RMRR info is lost. We exempt USB devices23492349+ * from this process due to their usage of RMRRs that are known23502350+ * to not be needed after BIOS hand-off to OS.23512351+ */23522352+ if (device_has_rmrr(pdev) &&23532353+ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)23542354+ return 0;23552355+23252356 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))23262357 return 1;23272358
+4-4
drivers/iommu/omap-iommu-debug.c
···1818#include <linux/uaccess.h>1919#include <linux/platform_device.h>2020#include <linux/debugfs.h>2121+#include <linux/omap-iommu.h>2222+#include <linux/platform_data/iommu-omap.h>21232222-#include <plat/iommu.h>2323-#include <plat/iovmm.h>2424-2525-#include <plat/iopgtable.h>2424+#include "omap-iopgtable.h"2525+#include "omap-iommu.h"26262727#define MAXCOLUMN 100 /* for short messages */2828
···11+/*22+ * omap iommu: simple virtual address space management33+ *44+ * Copyright (C) 2008-2009 Nokia Corporation55+ *66+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+1313+#ifndef _INTEL_IOMMU_H_1414+#define _INTEL_IOMMU_H_1515+1616+struct iovm_struct {1717+ struct omap_iommu *iommu; /* iommu object which this belongs to */1818+ u32 da_start; /* area definition */1919+ u32 da_end;2020+ u32 flags; /* IOVMF_: see below */2121+ struct list_head list; /* linked in ascending order */2222+ const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */2323+ void *va; /* mpu side mapped address */2424+};2525+2626+#define MMU_RAM_ENDIAN_SHIFT 92727+#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)2828+#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)2929+#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE3030+#define MMU_RAM_ELSZ_SHIFT 73131+#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_83232+3333+struct iommu_domain;3434+3535+extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da);3636+extern u323737+omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,3838+ const struct sg_table *sgt, u32 flags);3939+extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,4040+ struct device *dev, u32 da);4141+extern u324242+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev,4343+ u32 da, size_t bytes, u32 flags);4444+extern void4545+omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,4646+ const u32 da);4747+extern void *omap_da_to_va(struct device *dev, u32 da);4848+4949+extern void omap_iommu_save_ctx(struct device *dev);5050+extern void omap_iommu_restore_ctx(struct device *dev);5151+5252+#endif
+54
include/linux/platform_data/iommu-omap.h
···11+/*22+ * omap iommu: main structures33+ *44+ * Copyright (C) 2008-2009 Nokia Corporation55+ *66+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+1313+#include <linux/platform_device.h>1414+1515+#define MMU_REG_SIZE 2561616+1717+/**1818+ * struct iommu_arch_data - omap iommu private data1919+ * @name: name of the iommu device2020+ * @iommu_dev: handle of the iommu device2121+ *2222+ * This is an omap iommu private data object, which binds an iommu user2323+ * to its iommu device. This object should be placed at the iommu user's2424+ * dev_archdata so generic IOMMU API can be used without having to2525+ * utilize omap-specific plumbing anymore.2626+ */2727+struct omap_iommu_arch_data {2828+ const char *name;2929+ struct omap_iommu *iommu_dev;3030+};3131+3232+/**3333+ * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod3434+ * @da_start: device address where the va space starts.3535+ * @da_end: device address where the va space ends.3636+ * @nr_tlb_entries: number of entries supported by the translation3737+ * look-aside buffer (TLB).3838+ */3939+struct omap_mmu_dev_attr {4040+ u32 da_start;4141+ u32 da_end;4242+ int nr_tlb_entries;4343+};4444+4545+struct iommu_platform_data {4646+ const char *name;4747+ const char *reset_name;4848+ int nr_tlb_entries;4949+ u32 da_start;5050+ u32 da_end;5151+5252+ int (*assert_reset)(struct platform_device *pdev, const char *name);5353+ int (*deassert_reset)(struct platform_device *pdev, const char *name);5454+};