···1010 DMA engines offload copy operations from the CPU to dedicated1111 hardware, allowing the copies to happen asynchronously.12121313+comment "DMA Devices"1414+1515+config INTEL_IOATDMA1616+ tristate "Intel I/OAT DMA support"1717+ depends on DMA_ENGINE && PCI1818+ default m1919+ ---help---2020+ Enable support for the Intel(R) I/OAT DMA engine.2121+1322endmenu
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+2222+/*2323+ * This driver supports an Intel I/OAT DMA engine, which does asynchronous2424+ * copy operations.2525+ */2626+2727+#include <linux/init.h>2828+#include <linux/module.h>2929+#include <linux/pci.h>3030+#include <linux/interrupt.h>3131+#include <linux/dmaengine.h>3232+#include <linux/delay.h>3333+#include "ioatdma.h"3434+#include "ioatdma_io.h"3535+#include "ioatdma_registers.h"3636+#include "ioatdma_hw.h"3737+3838+#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)3939+#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)4040+#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)4141+4242+/* internal functions */4343+static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);4444+static void __devexit ioat_remove(struct pci_dev *pdev);4545+4646+static int enumerate_dma_channels(struct ioat_device *device)4747+{4848+ u8 xfercap_scale;4949+ u32 xfercap;5050+ int i;5151+ struct ioat_dma_chan *ioat_chan;5252+5353+ device->common.chancnt = ioatdma_read8(device, IOAT_CHANCNT_OFFSET);5454+ xfercap_scale = ioatdma_read8(device, IOAT_XFERCAP_OFFSET);5555+ xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));5656+5757+ for (i = 0; i < device->common.chancnt; i++) {5858+ ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);5959+ if (!ioat_chan) {6060+ device->common.chancnt = i;6161+ break;6262+ }6363+6464+ ioat_chan->device = device;6565+ ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));6666+ ioat_chan->xfercap = xfercap;6767+ spin_lock_init(&ioat_chan->cleanup_lock);6868+ spin_lock_init(&ioat_chan->desc_lock);6969+ INIT_LIST_HEAD(&ioat_chan->free_desc);7070+ INIT_LIST_HEAD(&ioat_chan->used_desc);7171+ /* This should be made common somewhere in dmaengine.c */7272+ ioat_chan->common.device = &device->common;7373+ ioat_chan->common.client = NULL;7474+ list_add_tail(&ioat_chan->common.device_node,7575+ &device->common.channels);7676+ }7777+ return device->common.chancnt;7878+}7979+8080+static struct ioat_desc_sw *ioat_dma_alloc_descriptor(8181+ struct ioat_dma_chan *ioat_chan,8282+ int flags)8383+{8484+ struct ioat_dma_descriptor *desc;8585+ struct ioat_desc_sw *desc_sw;8686+ struct ioat_device *ioat_device;8787+ dma_addr_t phys;8888+8989+ ioat_device = to_ioat_device(ioat_chan->common.device);9090+ desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);9191+ if (unlikely(!desc))9292+ return NULL;9393+9494+ desc_sw = kzalloc(sizeof(*desc_sw), flags);9595+ if (unlikely(!desc_sw)) {9696+ pci_pool_free(ioat_device->dma_pool, desc, phys);9797+ return NULL;9898+ }9999+100100+ memset(desc, 0, sizeof(*desc));101101+ desc_sw->hw = desc;102102+ desc_sw->phys = phys;103103+104104+ return desc_sw;105105+}106106+107107+#define INITIAL_IOAT_DESC_COUNT 128108108+109109+static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);110110+111111+/* returns the actual number of allocated descriptors */112112+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)113113+{114114+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);115115+ struct ioat_desc_sw *desc = NULL;116116+ u16 chanctrl;117117+ u32 chanerr;118118+ int i;119119+ LIST_HEAD(tmp_list);120120+121121+ /*122122+ * In-use bit automatically set by reading chanctrl123123+ * If 0, we got it, if 1, someone else did124124+ */125125+ chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);126126+ if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)127127+ return -EBUSY;128128+129129+ /* Setup register to interrupt and write completion status on error */130130+ chanctrl = IOAT_CHANCTRL_CHANNEL_IN_USE |131131+ IOAT_CHANCTRL_ERR_INT_EN |132132+ IOAT_CHANCTRL_ANY_ERR_ABORT_EN |133133+ IOAT_CHANCTRL_ERR_COMPLETION_EN;134134+ ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);135135+136136+ chanerr = ioatdma_chan_read32(ioat_chan, IOAT_CHANERR_OFFSET);137137+ if (chanerr) {138138+ printk("IOAT: CHANERR = %x, clearing\n", chanerr);139139+ ioatdma_chan_write32(ioat_chan, IOAT_CHANERR_OFFSET, chanerr);140140+ }141141+142142+ /* Allocate descriptors */143143+ for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {144144+ desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);145145+ if (!desc) {146146+ printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);147147+ break;148148+ }149149+ list_add_tail(&desc->node, &tmp_list);150150+ }151151+ spin_lock_bh(&ioat_chan->desc_lock);152152+ list_splice(&tmp_list, &ioat_chan->free_desc);153153+ spin_unlock_bh(&ioat_chan->desc_lock);154154+155155+ /* allocate a completion writeback area */156156+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */157157+ ioat_chan->completion_virt =158158+ pci_pool_alloc(ioat_chan->device->completion_pool,159159+ GFP_KERNEL,160160+ &ioat_chan->completion_addr);161161+ memset(ioat_chan->completion_virt, 0,162162+ sizeof(*ioat_chan->completion_virt));163163+ ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_LOW,164164+ ((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF);165165+ ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_HIGH,166166+ ((u64) ioat_chan->completion_addr) >> 32);167167+168168+ ioat_start_null_desc(ioat_chan);169169+ return i;170170+}171171+172172+static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);173173+174174+static void ioat_dma_free_chan_resources(struct dma_chan *chan)175175+{176176+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);177177+ struct ioat_device *ioat_device = to_ioat_device(chan->device);178178+ struct ioat_desc_sw *desc, *_desc;179179+ u16 chanctrl;180180+ int in_use_descs = 0;181181+182182+ ioat_dma_memcpy_cleanup(ioat_chan);183183+184184+ ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);185185+186186+ spin_lock_bh(&ioat_chan->desc_lock);187187+ list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {188188+ in_use_descs++;189189+ list_del(&desc->node);190190+ pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);191191+ kfree(desc);192192+ }193193+ list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {194194+ list_del(&desc->node);195195+ pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);196196+ kfree(desc);197197+ }198198+ spin_unlock_bh(&ioat_chan->desc_lock);199199+200200+ pci_pool_free(ioat_device->completion_pool,201201+ ioat_chan->completion_virt,202202+ ioat_chan->completion_addr);203203+204204+ /* one is ok since we left it on there on purpose */205205+ if (in_use_descs > 1)206206+ printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",207207+ in_use_descs - 1);208208+209209+ ioat_chan->last_completion = ioat_chan->completion_addr = 0;210210+211211+ /* Tell hw the chan is free */212212+ chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);213213+ chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;214214+ ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);215215+}216216+217217+/**218218+ * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction219219+ * @chan: IOAT DMA channel handle220220+ * @dest: DMA destination address221221+ * @src: DMA source address222222+ * @len: transaction length in bytes223223+ */224224+225225+static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,226226+ dma_addr_t dest,227227+ dma_addr_t src,228228+ size_t len)229229+{230230+ struct ioat_desc_sw *first;231231+ struct ioat_desc_sw *prev;232232+ struct ioat_desc_sw *new;233233+ dma_cookie_t cookie;234234+ LIST_HEAD(new_chain);235235+ u32 copy;236236+ size_t orig_len;237237+ dma_addr_t orig_src, orig_dst;238238+ unsigned int desc_count = 0;239239+ unsigned int append = 0;240240+241241+ if (!ioat_chan || !dest || !src)242242+ return -EFAULT;243243+244244+ if (!len)245245+ return ioat_chan->common.cookie;246246+247247+ orig_len = len;248248+ orig_src = src;249249+ orig_dst = dest;250250+251251+ first = NULL;252252+ prev = NULL;253253+254254+ spin_lock_bh(&ioat_chan->desc_lock);255255+256256+ while (len) {257257+ if (!list_empty(&ioat_chan->free_desc)) {258258+ new = to_ioat_desc(ioat_chan->free_desc.next);259259+ list_del(&new->node);260260+ } else {261261+ /* try to get another desc */262262+ new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);263263+ /* will this ever happen? */264264+ /* TODO add upper limit on these */265265+ BUG_ON(!new);266266+ }267267+268268+ copy = min((u32) len, ioat_chan->xfercap);269269+270270+ new->hw->size = copy;271271+ new->hw->ctl = 0;272272+ new->hw->src_addr = src;273273+ new->hw->dst_addr = dest;274274+ new->cookie = 0;275275+276276+ /* chain together the physical address list for the HW */277277+ if (!first)278278+ first = new;279279+ else280280+ prev->hw->next = (u64) new->phys;281281+282282+ prev = new;283283+284284+ len -= copy;285285+ dest += copy;286286+ src += copy;287287+288288+ list_add_tail(&new->node, &new_chain);289289+ desc_count++;290290+ }291291+ new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;292292+ new->hw->next = 0;293293+294294+ /* cookie incr and addition to used_list must be atomic */295295+296296+ cookie = ioat_chan->common.cookie;297297+ cookie++;298298+ if (cookie < 0)299299+ cookie = 1;300300+ ioat_chan->common.cookie = new->cookie = cookie;301301+302302+ pci_unmap_addr_set(new, src, orig_src);303303+ pci_unmap_addr_set(new, dst, orig_dst);304304+ pci_unmap_len_set(new, src_len, orig_len);305305+ pci_unmap_len_set(new, dst_len, orig_len);306306+307307+ /* write address into NextDescriptor field of last desc in chain */308308+ to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->phys;309309+ list_splice_init(&new_chain, ioat_chan->used_desc.prev);310310+311311+ ioat_chan->pending += desc_count;312312+ if (ioat_chan->pending >= 20) {313313+ append = 1;314314+ ioat_chan->pending = 0;315315+ }316316+317317+ spin_unlock_bh(&ioat_chan->desc_lock);318318+319319+ if (append)320320+ ioatdma_chan_write8(ioat_chan,321321+ IOAT_CHANCMD_OFFSET,322322+ IOAT_CHANCMD_APPEND);323323+ return cookie;324324+}325325+326326+/**327327+ * ioat_dma_memcpy_buf_to_buf - wrapper that takes src & dest bufs328328+ * @chan: IOAT DMA channel handle329329+ * @dest: DMA destination address330330+ * @src: DMA source address331331+ * @len: transaction length in bytes332332+ */333333+334334+static dma_cookie_t ioat_dma_memcpy_buf_to_buf(struct dma_chan *chan,335335+ void *dest,336336+ void *src,337337+ size_t len)338338+{339339+ dma_addr_t dest_addr;340340+ dma_addr_t src_addr;341341+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);342342+343343+ dest_addr = pci_map_single(ioat_chan->device->pdev,344344+ dest, len, PCI_DMA_FROMDEVICE);345345+ src_addr = pci_map_single(ioat_chan->device->pdev,346346+ src, len, PCI_DMA_TODEVICE);347347+348348+ return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);349349+}350350+351351+/**352352+ * ioat_dma_memcpy_buf_to_pg - wrapper, copying from a buf to a page353353+ * @chan: IOAT DMA channel handle354354+ * @page: pointer to the page to copy to355355+ * @offset: offset into that page356356+ * @src: DMA source address357357+ * @len: transaction length in bytes358358+ */359359+360360+static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,361361+ struct page *page,362362+ unsigned int offset,363363+ void *src,364364+ size_t len)365365+{366366+ dma_addr_t dest_addr;367367+ dma_addr_t src_addr;368368+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);369369+370370+ dest_addr = pci_map_page(ioat_chan->device->pdev,371371+ page, offset, len, PCI_DMA_FROMDEVICE);372372+ src_addr = pci_map_single(ioat_chan->device->pdev,373373+ src, len, PCI_DMA_TODEVICE);374374+375375+ return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);376376+}377377+378378+/**379379+ * ioat_dma_memcpy_pg_to_pg - wrapper, copying between two pages380380+ * @chan: IOAT DMA channel handle381381+ * @dest_pg: pointer to the page to copy to382382+ * @dest_off: offset into that page383383+ * @src_pg: pointer to the page to copy from384384+ * @src_off: offset into that page385385+ * @len: transaction length in bytes. This is guaranteed to not make a copy386386+ * across a page boundary.387387+ */388388+389389+static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,390390+ struct page *dest_pg,391391+ unsigned int dest_off,392392+ struct page *src_pg,393393+ unsigned int src_off,394394+ size_t len)395395+{396396+ dma_addr_t dest_addr;397397+ dma_addr_t src_addr;398398+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);399399+400400+ dest_addr = pci_map_page(ioat_chan->device->pdev,401401+ dest_pg, dest_off, len, PCI_DMA_FROMDEVICE);402402+ src_addr = pci_map_page(ioat_chan->device->pdev,403403+ src_pg, src_off, len, PCI_DMA_TODEVICE);404404+405405+ return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);406406+}407407+408408+/**409409+ * ioat_dma_memcpy_issue_pending - push potentially unrecognoized appended descriptors to hw410410+ * @chan: DMA channel handle411411+ */412412+413413+static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)414414+{415415+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);416416+417417+ if (ioat_chan->pending != 0) {418418+ ioat_chan->pending = 0;419419+ ioatdma_chan_write8(ioat_chan,420420+ IOAT_CHANCMD_OFFSET,421421+ IOAT_CHANCMD_APPEND);422422+ }423423+}424424+425425+static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)426426+{427427+ unsigned long phys_complete;428428+ struct ioat_desc_sw *desc, *_desc;429429+ dma_cookie_t cookie = 0;430430+431431+ prefetch(chan->completion_virt);432432+433433+ if (!spin_trylock(&chan->cleanup_lock))434434+ return;435435+436436+ /* The completion writeback can happen at any time,437437+ so reads by the driver need to be atomic operations438438+ The descriptor physical addresses are limited to 32-bits439439+ when the CPU can only do a 32-bit mov */440440+441441+#if (BITS_PER_LONG == 64)442442+ phys_complete =443443+ chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;444444+#else445445+ phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;446446+#endif447447+448448+ if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==449449+ IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {450450+ printk("IOAT: Channel halted, chanerr = %x\n",451451+ ioatdma_chan_read32(chan, IOAT_CHANERR_OFFSET));452452+453453+ /* TODO do something to salvage the situation */454454+ }455455+456456+ if (phys_complete == chan->last_completion) {457457+ spin_unlock(&chan->cleanup_lock);458458+ return;459459+ }460460+461461+ spin_lock_bh(&chan->desc_lock);462462+ list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {463463+464464+ /*465465+ * Incoming DMA requests may use multiple descriptors, due to466466+ * exceeding xfercap, perhaps. If so, only the last one will467467+ * have a cookie, and require unmapping.468468+ */469469+ if (desc->cookie) {470470+ cookie = desc->cookie;471471+472472+ /* yes we are unmapping both _page and _single alloc'd473473+ regions with unmap_page. Is this *really* that bad?474474+ */475475+ pci_unmap_page(chan->device->pdev,476476+ pci_unmap_addr(desc, dst),477477+ pci_unmap_len(desc, dst_len),478478+ PCI_DMA_FROMDEVICE);479479+ pci_unmap_page(chan->device->pdev,480480+ pci_unmap_addr(desc, src),481481+ pci_unmap_len(desc, src_len),482482+ PCI_DMA_TODEVICE);483483+ }484484+485485+ if (desc->phys != phys_complete) {486486+ /* a completed entry, but not the last, so cleanup */487487+ list_del(&desc->node);488488+ list_add_tail(&desc->node, &chan->free_desc);489489+ } else {490490+ /* last used desc. Do not remove, so we can append from491491+ it, but don't look at it next time, either */492492+ desc->cookie = 0;493493+494494+ /* TODO check status bits? */495495+ break;496496+ }497497+ }498498+499499+ spin_unlock_bh(&chan->desc_lock);500500+501501+ chan->last_completion = phys_complete;502502+ if (cookie != 0)503503+ chan->completed_cookie = cookie;504504+505505+ spin_unlock(&chan->cleanup_lock);506506+}507507+508508+/**509509+ * ioat_dma_is_complete - poll the status of a IOAT DMA transaction510510+ * @chan: IOAT DMA channel handle511511+ * @cookie: DMA transaction identifier512512+ */513513+514514+static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,515515+ dma_cookie_t cookie,516516+ dma_cookie_t *done,517517+ dma_cookie_t *used)518518+{519519+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);520520+ dma_cookie_t last_used;521521+ dma_cookie_t last_complete;522522+ enum dma_status ret;523523+524524+ last_used = chan->cookie;525525+ last_complete = ioat_chan->completed_cookie;526526+527527+ if (done)528528+ *done= last_complete;529529+ if (used)530530+ *used = last_used;531531+532532+ ret = dma_async_is_complete(cookie, last_complete, last_used);533533+ if (ret == DMA_SUCCESS)534534+ return ret;535535+536536+ ioat_dma_memcpy_cleanup(ioat_chan);537537+538538+ last_used = chan->cookie;539539+ last_complete = ioat_chan->completed_cookie;540540+541541+ if (done)542542+ *done= last_complete;543543+ if (used)544544+ *used = last_used;545545+546546+ return dma_async_is_complete(cookie, last_complete, last_used);547547+}548548+549549+/* PCI API */550550+551551+static struct pci_device_id ioat_pci_tbl[] = {552552+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },553553+ { 0, }554554+};555555+556556+static struct pci_driver ioat_pci_drv = {557557+ .name = "ioatdma",558558+ .id_table = ioat_pci_tbl,559559+ .probe = ioat_probe,560560+ .remove = __devexit_p(ioat_remove),561561+};562562+563563+static irqreturn_t ioat_do_interrupt(int irq, void *data, struct pt_regs *regs)564564+{565565+ struct ioat_device *instance = data;566566+ unsigned long attnstatus;567567+ u8 intrctrl;568568+569569+ intrctrl = ioatdma_read8(instance, IOAT_INTRCTRL_OFFSET);570570+571571+ if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))572572+ return IRQ_NONE;573573+574574+ if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {575575+ ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);576576+ return IRQ_NONE;577577+ }578578+579579+ attnstatus = ioatdma_read32(instance, IOAT_ATTNSTATUS_OFFSET);580580+581581+ printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);582582+583583+ ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);584584+ return IRQ_HANDLED;585585+}586586+587587+static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)588588+{589589+ struct ioat_desc_sw *desc;590590+591591+ spin_lock_bh(&ioat_chan->desc_lock);592592+593593+ if (!list_empty(&ioat_chan->free_desc)) {594594+ desc = to_ioat_desc(ioat_chan->free_desc.next);595595+ list_del(&desc->node);596596+ } else {597597+ /* try to get another desc */598598+ spin_unlock_bh(&ioat_chan->desc_lock);599599+ desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);600600+ spin_lock_bh(&ioat_chan->desc_lock);601601+ /* will this ever happen? */602602+ BUG_ON(!desc);603603+ }604604+605605+ desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;606606+ desc->hw->next = 0;607607+608608+ list_add_tail(&desc->node, &ioat_chan->used_desc);609609+ spin_unlock_bh(&ioat_chan->desc_lock);610610+611611+#if (BITS_PER_LONG == 64)612612+ ioatdma_chan_write64(ioat_chan, IOAT_CHAINADDR_OFFSET, desc->phys);613613+#else614614+ ioatdma_chan_write32(ioat_chan,615615+ IOAT_CHAINADDR_OFFSET_LOW,616616+ (u32) desc->phys);617617+ ioatdma_chan_write32(ioat_chan, IOAT_CHAINADDR_OFFSET_HIGH, 0);618618+#endif619619+ ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_START);620620+}621621+622622+/*623623+ * Perform a IOAT transaction to verify the HW works.624624+ */625625+#define IOAT_TEST_SIZE 2000626626+627627+static int ioat_self_test(struct ioat_device *device)628628+{629629+ int i;630630+ u8 *src;631631+ u8 *dest;632632+ struct dma_chan *dma_chan;633633+ dma_cookie_t cookie;634634+ int err = 0;635635+636636+ src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);637637+ if (!src)638638+ return -ENOMEM;639639+ dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);640640+ if (!dest) {641641+ kfree(src);642642+ return -ENOMEM;643643+ }644644+645645+ /* Fill in src buffer */646646+ for (i = 0; i < IOAT_TEST_SIZE; i++)647647+ src[i] = (u8)i;648648+649649+ /* Start copy, using first DMA channel */650650+ dma_chan = container_of(device->common.channels.next,651651+ struct dma_chan,652652+ device_node);653653+ if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {654654+ err = -ENODEV;655655+ goto out;656656+ }657657+658658+ cookie = ioat_dma_memcpy_buf_to_buf(dma_chan, dest, src, IOAT_TEST_SIZE);659659+ ioat_dma_memcpy_issue_pending(dma_chan);660660+ msleep(1);661661+662662+ if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {663663+ printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");664664+ err = -ENODEV;665665+ goto free_resources;666666+ }667667+ if (memcmp(src, dest, IOAT_TEST_SIZE)) {668668+ printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");669669+ err = -ENODEV;670670+ goto free_resources;671671+ }672672+673673+free_resources:674674+ ioat_dma_free_chan_resources(dma_chan);675675+out:676676+ kfree(src);677677+ kfree(dest);678678+ return err;679679+}680680+681681+static int __devinit ioat_probe(struct pci_dev *pdev,682682+ const struct pci_device_id *ent)683683+{684684+ int err;685685+ unsigned long mmio_start, mmio_len;686686+ void *reg_base;687687+ struct ioat_device *device;688688+689689+ err = pci_enable_device(pdev);690690+ if (err)691691+ goto err_enable_device;692692+693693+ err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);694694+ if (err)695695+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);696696+ if (err)697697+ goto err_set_dma_mask;698698+699699+ err = pci_request_regions(pdev, ioat_pci_drv.name);700700+ if (err)701701+ goto err_request_regions;702702+703703+ mmio_start = pci_resource_start(pdev, 0);704704+ mmio_len = pci_resource_len(pdev, 0);705705+706706+ reg_base = ioremap(mmio_start, mmio_len);707707+ if (!reg_base) {708708+ err = -ENOMEM;709709+ goto err_ioremap;710710+ }711711+712712+ device = kzalloc(sizeof(*device), GFP_KERNEL);713713+ if (!device) {714714+ err = -ENOMEM;715715+ goto err_kzalloc;716716+ }717717+718718+ /* DMA coherent memory pool for DMA descriptor allocations */719719+ device->dma_pool = pci_pool_create("dma_desc_pool", pdev,720720+ sizeof(struct ioat_dma_descriptor), 64, 0);721721+ if (!device->dma_pool) {722722+ err = -ENOMEM;723723+ goto err_dma_pool;724724+ }725725+726726+ device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);727727+ if (!device->completion_pool) {728728+ err = -ENOMEM;729729+ goto err_completion_pool;730730+ }731731+732732+ device->pdev = pdev;733733+ pci_set_drvdata(pdev, device);734734+#ifdef CONFIG_PCI_MSI735735+ if (pci_enable_msi(pdev) == 0) {736736+ device->msi = 1;737737+ } else {738738+ device->msi = 0;739739+ }740740+#endif741741+ err = request_irq(pdev->irq, &ioat_do_interrupt, SA_SHIRQ, "ioat",742742+ device);743743+ if (err)744744+ goto err_irq;745745+746746+ device->reg_base = reg_base;747747+748748+ ioatdma_write8(device, IOAT_INTRCTRL_OFFSET, IOAT_INTRCTRL_MASTER_INT_EN);749749+ pci_set_master(pdev);750750+751751+ INIT_LIST_HEAD(&device->common.channels);752752+ enumerate_dma_channels(device);753753+754754+ device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;755755+ device->common.device_free_chan_resources = ioat_dma_free_chan_resources;756756+ device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf;757757+ device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg;758758+ device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg;759759+ device->common.device_memcpy_complete = ioat_dma_is_complete;760760+ device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending;761761+ printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",762762+ device->common.chancnt);763763+764764+ err = ioat_self_test(device);765765+ if (err)766766+ goto err_self_test;767767+768768+ dma_async_device_register(&device->common);769769+770770+ return 0;771771+772772+err_self_test:773773+err_irq:774774+ pci_pool_destroy(device->completion_pool);775775+err_completion_pool:776776+ pci_pool_destroy(device->dma_pool);777777+err_dma_pool:778778+ kfree(device);779779+err_kzalloc:780780+ iounmap(reg_base);781781+err_ioremap:782782+ pci_release_regions(pdev);783783+err_request_regions:784784+err_set_dma_mask:785785+ pci_disable_device(pdev);786786+err_enable_device:787787+ return err;788788+}789789+790790+static void __devexit ioat_remove(struct pci_dev *pdev)791791+{792792+ struct ioat_device *device;793793+ struct dma_chan *chan, *_chan;794794+ struct ioat_dma_chan *ioat_chan;795795+796796+ device = pci_get_drvdata(pdev);797797+ dma_async_device_unregister(&device->common);798798+799799+ free_irq(device->pdev->irq, device);800800+#ifdef CONFIG_PCI_MSI801801+ if (device->msi)802802+ pci_disable_msi(device->pdev);803803+#endif804804+ pci_pool_destroy(device->dma_pool);805805+ pci_pool_destroy(device->completion_pool);806806+ iounmap(device->reg_base);807807+ pci_release_regions(pdev);808808+ pci_disable_device(pdev);809809+ list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {810810+ ioat_chan = to_ioat_chan(chan);811811+ list_del(&chan->device_node);812812+ kfree(ioat_chan);813813+ }814814+ kfree(device);815815+}816816+817817+/* MODULE API */818818+MODULE_VERSION("1.7");819819+MODULE_LICENSE("GPL");820820+MODULE_AUTHOR("Intel Corporation");821821+822822+static int __init ioat_init_module(void)823823+{824824+ /* it's currently unsafe to unload this module */825825+ /* if forced, worst case is that rmmod hangs */826826+ if (THIS_MODULE != NULL)827827+ THIS_MODULE->unsafe = 1;828828+829829+ return pci_module_init(&ioat_pci_drv);830830+}831831+832832+module_init(ioat_init_module);833833+834834+static void __exit ioat_exit_module(void)835835+{836836+ pci_unregister_driver(&ioat_pci_drv);837837+}838838+839839+module_exit(ioat_exit_module);
+126
drivers/dma/ioatdma.h
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+#ifndef IOATDMA_H2222+#define IOATDMA_H2323+2424+#include <linux/dmaengine.h>2525+#include "ioatdma_hw.h"2626+#include <linux/init.h>2727+#include <linux/dmapool.h>2828+#include <linux/cache.h>2929+3030+#define PCI_DEVICE_ID_INTEL_IOAT 0x1a383131+3232+#define IOAT_LOW_COMPLETION_MASK 0xffffffc03333+3434+extern struct list_head dma_device_list;3535+extern struct list_head dma_client_list;3636+3737+/**3838+ * struct ioat_device - internal representation of a IOAT device3939+ * @pdev: PCI-Express device4040+ * @reg_base: MMIO register space base address4141+ * @dma_pool: for allocating DMA descriptors4242+ * @common: embedded struct dma_device4343+ * @msi: Message Signaled Interrupt number4444+ */4545+4646+struct ioat_device {4747+ struct pci_dev *pdev;4848+ void *reg_base;4949+ struct pci_pool *dma_pool;5050+ struct pci_pool *completion_pool;5151+5252+ struct dma_device common;5353+ u8 msi;5454+};5555+5656+/**5757+ * struct ioat_dma_chan - internal representation of a DMA channel5858+ * @device:5959+ * @reg_base:6060+ * @sw_in_use:6161+ * @completion:6262+ * @completion_low:6363+ * @completion_high:6464+ * @completed_cookie: last cookie seen completed on cleanup6565+ * @cookie: value of last cookie given to client6666+ * @last_completion:6767+ * @xfercap:6868+ * @desc_lock:6969+ * @free_desc:7070+ * @used_desc:7171+ * @resource:7272+ * @device_node:7373+ */7474+7575+struct ioat_dma_chan {7676+7777+ void *reg_base;7878+7979+ dma_cookie_t completed_cookie;8080+ unsigned long last_completion;8181+8282+ u32 xfercap; /* XFERCAP register value expanded out */8383+8484+ spinlock_t cleanup_lock;8585+ spinlock_t desc_lock;8686+ struct list_head free_desc;8787+ struct list_head used_desc;8888+8989+ int pending;9090+9191+ struct ioat_device *device;9292+ struct dma_chan common;9393+9494+ dma_addr_t completion_addr;9595+ union {9696+ u64 full; /* HW completion writeback */9797+ struct {9898+ u32 low;9999+ u32 high;100100+ };101101+ } *completion_virt;102102+};103103+104104+/* wrapper around hardware descriptor format + additional software fields */105105+106106+/**107107+ * struct ioat_desc_sw - wrapper around hardware descriptor108108+ * @hw: hardware DMA descriptor109109+ * @node:110110+ * @cookie:111111+ * @phys:112112+ */113113+114114+struct ioat_desc_sw {115115+ struct ioat_dma_descriptor *hw;116116+ struct list_head node;117117+ dma_cookie_t cookie;118118+ dma_addr_t phys;119119+ DECLARE_PCI_UNMAP_ADDR(src)120120+ DECLARE_PCI_UNMAP_LEN(src_len)121121+ DECLARE_PCI_UNMAP_ADDR(dst)122122+ DECLARE_PCI_UNMAP_LEN(dst_len)123123+};124124+125125+#endif /* IOATDMA_H */126126+
+52
drivers/dma/ioatdma_hw.h
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+#ifndef _IOAT_HW_H_2222+#define _IOAT_HW_H_2323+2424+/* PCI Configuration Space Values */2525+#define IOAT_PCI_VID 0x80862626+#define IOAT_PCI_DID 0x1A382727+#define IOAT_PCI_RID 0x002828+#define IOAT_PCI_SVID 0x80862929+#define IOAT_PCI_SID 0x80863030+#define IOAT_VER 0x12 /* Version 1.2 */3131+3232+struct ioat_dma_descriptor {3333+ uint32_t size;3434+ uint32_t ctl;3535+ uint64_t src_addr;3636+ uint64_t dst_addr;3737+ uint64_t next;3838+ uint64_t rsv1;3939+ uint64_t rsv2;4040+ uint64_t user1;4141+ uint64_t user2;4242+};4343+4444+#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x000000014545+#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x000000024646+#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x000000044747+#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x000000084848+#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x000000104949+#define IOAT_DMA_DESCRIPTOR_NUL 0x000000205050+#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF0000005151+5252+#endif
+118
drivers/dma/ioatdma_io.h
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+#ifndef IOATDMA_IO_H2222+#define IOATDMA_IO_H2323+2424+#include <asm/io.h>2525+2626+/*2727+ * device and per-channel MMIO register read and write functions2828+ * this is a lot of anoying inline functions, but it's typesafe2929+ */3030+3131+static inline u8 ioatdma_read8(struct ioat_device *device,3232+ unsigned int offset)3333+{3434+ return readb(device->reg_base + offset);3535+}3636+3737+static inline u16 ioatdma_read16(struct ioat_device *device,3838+ unsigned int offset)3939+{4040+ return readw(device->reg_base + offset);4141+}4242+4343+static inline u32 ioatdma_read32(struct ioat_device *device,4444+ unsigned int offset)4545+{4646+ return readl(device->reg_base + offset);4747+}4848+4949+static inline void ioatdma_write8(struct ioat_device *device,5050+ unsigned int offset, u8 value)5151+{5252+ writeb(value, device->reg_base + offset);5353+}5454+5555+static inline void ioatdma_write16(struct ioat_device *device,5656+ unsigned int offset, u16 value)5757+{5858+ writew(value, device->reg_base + offset);5959+}6060+6161+static inline void ioatdma_write32(struct ioat_device *device,6262+ unsigned int offset, u32 value)6363+{6464+ writel(value, device->reg_base + offset);6565+}6666+6767+static inline u8 ioatdma_chan_read8(struct ioat_dma_chan *chan,6868+ unsigned int offset)6969+{7070+ return readb(chan->reg_base + offset);7171+}7272+7373+static inline u16 ioatdma_chan_read16(struct ioat_dma_chan *chan,7474+ unsigned int offset)7575+{7676+ return readw(chan->reg_base + offset);7777+}7878+7979+static inline u32 ioatdma_chan_read32(struct ioat_dma_chan *chan,8080+ unsigned int offset)8181+{8282+ return readl(chan->reg_base + offset);8383+}8484+8585+static inline void ioatdma_chan_write8(struct ioat_dma_chan *chan,8686+ unsigned int offset, u8 value)8787+{8888+ writeb(value, chan->reg_base + offset);8989+}9090+9191+static inline void ioatdma_chan_write16(struct ioat_dma_chan *chan,9292+ unsigned int offset, u16 value)9393+{9494+ writew(value, chan->reg_base + offset);9595+}9696+9797+static inline void ioatdma_chan_write32(struct ioat_dma_chan *chan,9898+ unsigned int offset, u32 value)9999+{100100+ writel(value, chan->reg_base + offset);101101+}102102+103103+#if (BITS_PER_LONG == 64)104104+static inline u64 ioatdma_chan_read64(struct ioat_dma_chan *chan,105105+ unsigned int offset)106106+{107107+ return readq(chan->reg_base + offset);108108+}109109+110110+static inline void ioatdma_chan_write64(struct ioat_dma_chan *chan,111111+ unsigned int offset, u64 value)112112+{113113+ writeq(value, chan->reg_base + offset);114114+}115115+#endif116116+117117+#endif /* IOATDMA_IO_H */118118+
+126
drivers/dma/ioatdma_registers.h
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+#ifndef _IOAT_REGISTERS_H_2222+#define _IOAT_REGISTERS_H_2323+2424+2525+/* MMIO Device Registers */2626+#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */2727+2828+#define IOAT_XFERCAP_OFFSET 0x01 /* 8-bit */2929+#define IOAT_XFERCAP_4KB 123030+#define IOAT_XFERCAP_8KB 133131+#define IOAT_XFERCAP_16KB 143232+#define IOAT_XFERCAP_32KB 153333+#define IOAT_XFERCAP_32GB 03434+3535+#define IOAT_GENCTRL_OFFSET 0x02 /* 8-bit */3636+#define IOAT_GENCTRL_DEBUG_EN 0x013737+3838+#define IOAT_INTRCTRL_OFFSET 0x03 /* 8-bit */3939+#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */4040+#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */4141+#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */4242+4343+#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */4444+4545+#define IOAT_VER_OFFSET 0x08 /* 8-bit */4646+#define IOAT_VER_MAJOR_MASK 0xF04747+#define IOAT_VER_MINOR_MASK 0x0F4848+#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK)4949+#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK)5050+5151+#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */5252+5353+#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */5454+#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */5555+#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */5656+5757+#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */5858+#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x00015959+6060+6161+#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */6262+6363+/* DMA Channel Registers */6464+#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */6565+#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF0006666+#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x01006767+#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x00206868+#define IOAT_CHANCTRL_ERR_INT_EN 0x00106969+#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x00087070+#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x00047171+#define IOAT_CHANCTRL_INT_DISABLE 0x00017272+7373+#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */7474+#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */7575+7676+#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */7777+#define IOAT_CHANSTS_OFFSET_LOW 0x047878+#define IOAT_CHANSTS_OFFSET_HIGH 0x087979+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC08080+#define IOAT_CHANSTS_SOFT_ERR 0x00000000000000108181+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x00000000000000078282+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x08383+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x18484+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x28585+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x38686+8787+#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */8888+#define IOAT_CHAINADDR_OFFSET_LOW 0x0C8989+#define IOAT_CHAINADDR_OFFSET_HIGH 0x109090+9191+#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */9292+#define IOAT_CHANCMD_RESET 0x209393+#define IOAT_CHANCMD_RESUME 0x109494+#define IOAT_CHANCMD_ABORT 0x089595+#define IOAT_CHANCMD_SUSPEND 0x049696+#define IOAT_CHANCMD_APPEND 0x029797+#define IOAT_CHANCMD_START 0x019898+9999+#define IOAT_CHANCMP_OFFSET 0x18 /* 64-bit Channel Completion Address Register */100100+#define IOAT_CHANCMP_OFFSET_LOW 0x18101101+#define IOAT_CHANCMP_OFFSET_HIGH 0x1C102102+103103+#define IOAT_CDAR_OFFSET 0x20 /* 64-bit Current Descriptor Address Register */104104+#define IOAT_CDAR_OFFSET_LOW 0x20105105+#define IOAT_CDAR_OFFSET_HIGH 0x24106106+107107+#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */108108+#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001109109+#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002110110+#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004111111+#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008112112+#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010113113+#define IOAT_CHANERR_CHANCMD_ERR 0x0020114114+#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040115115+#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080116116+#define IOAT_CHANERR_READ_DATA_ERR 0x0100117117+#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200118118+#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400119119+#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800120120+#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000121121+#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000122122+#define IOAT_CHANERR_SOFT_ERR 0x4000123123+124124+#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */125125+126126+#endif /* _IOAT_REGISTERS_H_ */