···11+#22+# DMA engine configuration33+#44+55+menu "DMA Engine support"66+77+config DMA_ENGINE88+ bool "Support for DMA engines"99+ ---help---1010+ DMA engines offload copy operations from the CPU to dedicated1111+ hardware, allowing the copies to happen asynchronously.1212+1313+endmenu
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+2222+/*2323+ * This code implements the DMA subsystem. It provides a HW-neutral interface2424+ * for other kernel code to use asynchronous memory copy capabilities,2525+ * if present, and allows different HW DMA drivers to register as providing2626+ * this capability.2727+ *2828+ * Due to the fact we are accelerating what is already a relatively fast2929+ * operation, the code goes to great lengths to avoid additional overhead,3030+ * such as locking.3131+ *3232+ * LOCKING:3333+ *3434+ * The subsystem keeps two global lists, dma_device_list and dma_client_list.3535+ * Both of these are protected by a mutex, dma_list_mutex.3636+ *3737+ * Each device has a channels list, which runs unlocked but is never modified3838+ * once the device is registered, it's just setup by the driver.3939+ *4040+ * Each client has a channels list, it's only modified under the client->lock4141+ * and in an RCU callback, so it's safe to read under rcu_read_lock().4242+ *4343+ * Each device has a kref, which is initialized to 1 when the device is4444+ * registered. A kref_put is done for each class_device registered. When the4545+ * class_device is released, the coresponding kref_put is done in the release4646+ * method. Every time one of the device's channels is allocated to a client,4747+ * a kref_get occurs. When the channel is freed, the coresponding kref_put4848+ * happens. The device's release function does a completion, so4949+ * unregister_device does a remove event, class_device_unregister, a kref_put5050+ * for the first reference, then waits on the completion for all other5151+ * references to finish.5252+ *5353+ * Each channel has an open-coded implementation of Rusty Russell's "bigref,"5454+ * with a kref and a per_cpu local_t. A single reference is set when on an5555+ * ADDED event, and removed with a REMOVE event. Net DMA client takes an5656+ * extra reference per outstanding transaction. The relase function does a5757+ * kref_put on the device. -ChrisL5858+ */5959+6060+#include <linux/init.h>6161+#include <linux/module.h>6262+#include <linux/device.h>6363+#include <linux/dmaengine.h>6464+#include <linux/hardirq.h>6565+#include <linux/spinlock.h>6666+#include <linux/percpu.h>6767+#include <linux/rcupdate.h>6868+#include <linux/mutex.h>6969+7070+static DEFINE_MUTEX(dma_list_mutex);7171+static LIST_HEAD(dma_device_list);7272+static LIST_HEAD(dma_client_list);7373+7474+/* --- sysfs implementation --- */7575+7676+static ssize_t show_memcpy_count(struct class_device *cd, char *buf)7777+{7878+ struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);7979+ unsigned long count = 0;8080+ int i;8181+8282+ for_each_cpu(i)8383+ count += per_cpu_ptr(chan->local, i)->memcpy_count;8484+8585+ return sprintf(buf, "%lu\n", count);8686+}8787+8888+static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)8989+{9090+ struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);9191+ unsigned long count = 0;9292+ int i;9393+9494+ for_each_cpu(i)9595+ count += per_cpu_ptr(chan->local, i)->bytes_transferred;9696+9797+ return sprintf(buf, "%lu\n", count);9898+}9999+100100+static ssize_t show_in_use(struct class_device *cd, char *buf)101101+{102102+ struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);103103+104104+ return sprintf(buf, "%d\n", (chan->client ? 1 : 0));105105+}106106+107107+static struct class_device_attribute dma_class_attrs[] = {108108+ __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),109109+ __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),110110+ __ATTR(in_use, S_IRUGO, show_in_use, NULL),111111+ __ATTR_NULL112112+};113113+114114+static void dma_async_device_cleanup(struct kref *kref);115115+116116+static void dma_class_dev_release(struct class_device *cd)117117+{118118+ struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);119119+ kref_put(&chan->device->refcount, dma_async_device_cleanup);120120+}121121+122122+static struct class dma_devclass = {123123+ .name = "dma",124124+ .class_dev_attrs = dma_class_attrs,125125+ .release = dma_class_dev_release,126126+};127127+128128+/* --- client and device registration --- */129129+130130+/**131131+ * dma_client_chan_alloc - try to allocate a channel to a client132132+ * @client: &dma_client133133+ *134134+ * Called with dma_list_mutex held.135135+ */136136+static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)137137+{138138+ struct dma_device *device;139139+ struct dma_chan *chan;140140+ unsigned long flags;141141+ int desc; /* allocated descriptor count */142142+143143+ /* Find a channel, any DMA engine will do */144144+ list_for_each_entry(device, &dma_device_list, global_node) {145145+ list_for_each_entry(chan, &device->channels, device_node) {146146+ if (chan->client)147147+ continue;148148+149149+ desc = chan->device->device_alloc_chan_resources(chan);150150+ if (desc >= 0) {151151+ kref_get(&device->refcount);152152+ kref_init(&chan->refcount);153153+ chan->slow_ref = 0;154154+ INIT_RCU_HEAD(&chan->rcu);155155+ chan->client = client;156156+ spin_lock_irqsave(&client->lock, flags);157157+ list_add_tail_rcu(&chan->client_node,158158+ &client->channels);159159+ spin_unlock_irqrestore(&client->lock, flags);160160+ return chan;161161+ }162162+ }163163+ }164164+165165+ return NULL;166166+}167167+168168+/**169169+ * dma_client_chan_free - release a DMA channel170170+ * @chan: &dma_chan171171+ */172172+void dma_chan_cleanup(struct kref *kref)173173+{174174+ struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);175175+ chan->device->device_free_chan_resources(chan);176176+ chan->client = NULL;177177+ kref_put(&chan->device->refcount, dma_async_device_cleanup);178178+}179179+180180+static void dma_chan_free_rcu(struct rcu_head *rcu)181181+{182182+ struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);183183+ int bias = 0x7FFFFFFF;184184+ int i;185185+ for_each_cpu(i)186186+ bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);187187+ atomic_sub(bias, &chan->refcount.refcount);188188+ kref_put(&chan->refcount, dma_chan_cleanup);189189+}190190+191191+static void dma_client_chan_free(struct dma_chan *chan)192192+{193193+ atomic_add(0x7FFFFFFF, &chan->refcount.refcount);194194+ chan->slow_ref = 1;195195+ call_rcu(&chan->rcu, dma_chan_free_rcu);196196+}197197+198198+/**199199+ * dma_chans_rebalance - reallocate channels to clients200200+ *201201+ * When the number of DMA channel in the system changes,202202+ * channels need to be rebalanced among clients203203+ */204204+static void dma_chans_rebalance(void)205205+{206206+ struct dma_client *client;207207+ struct dma_chan *chan;208208+ unsigned long flags;209209+210210+ mutex_lock(&dma_list_mutex);211211+212212+ list_for_each_entry(client, &dma_client_list, global_node) {213213+ while (client->chans_desired > client->chan_count) {214214+ chan = dma_client_chan_alloc(client);215215+ if (!chan)216216+ break;217217+ client->chan_count++;218218+ client->event_callback(client,219219+ chan,220220+ DMA_RESOURCE_ADDED);221221+ }222222+ while (client->chans_desired < client->chan_count) {223223+ spin_lock_irqsave(&client->lock, flags);224224+ chan = list_entry(client->channels.next,225225+ struct dma_chan,226226+ client_node);227227+ list_del_rcu(&chan->client_node);228228+ spin_unlock_irqrestore(&client->lock, flags);229229+ client->chan_count--;230230+ client->event_callback(client,231231+ chan,232232+ DMA_RESOURCE_REMOVED);233233+ dma_client_chan_free(chan);234234+ }235235+ }236236+237237+ mutex_unlock(&dma_list_mutex);238238+}239239+240240+/**241241+ * dma_async_client_register - allocate and register a &dma_client242242+ * @event_callback: callback for notification of channel addition/removal243243+ */244244+struct dma_client *dma_async_client_register(dma_event_callback event_callback)245245+{246246+ struct dma_client *client;247247+248248+ client = kzalloc(sizeof(*client), GFP_KERNEL);249249+ if (!client)250250+ return NULL;251251+252252+ INIT_LIST_HEAD(&client->channels);253253+ spin_lock_init(&client->lock);254254+ client->chans_desired = 0;255255+ client->chan_count = 0;256256+ client->event_callback = event_callback;257257+258258+ mutex_lock(&dma_list_mutex);259259+ list_add_tail(&client->global_node, &dma_client_list);260260+ mutex_unlock(&dma_list_mutex);261261+262262+ return client;263263+}264264+265265+/**266266+ * dma_async_client_unregister - unregister a client and free the &dma_client267267+ * @client:268268+ *269269+ * Force frees any allocated DMA channels, frees the &dma_client memory270270+ */271271+void dma_async_client_unregister(struct dma_client *client)272272+{273273+ struct dma_chan *chan;274274+275275+ if (!client)276276+ return;277277+278278+ rcu_read_lock();279279+ list_for_each_entry_rcu(chan, &client->channels, client_node)280280+ dma_client_chan_free(chan);281281+ rcu_read_unlock();282282+283283+ mutex_lock(&dma_list_mutex);284284+ list_del(&client->global_node);285285+ mutex_unlock(&dma_list_mutex);286286+287287+ kfree(client);288288+ dma_chans_rebalance();289289+}290290+291291+/**292292+ * dma_async_client_chan_request - request DMA channels293293+ * @client: &dma_client294294+ * @number: count of DMA channels requested295295+ *296296+ * Clients call dma_async_client_chan_request() to specify how many297297+ * DMA channels they need, 0 to free all currently allocated.298298+ * The resulting allocations/frees are indicated to the client via the299299+ * event callback.300300+ */301301+void dma_async_client_chan_request(struct dma_client *client,302302+ unsigned int number)303303+{304304+ client->chans_desired = number;305305+ dma_chans_rebalance();306306+}307307+308308+/**309309+ * dma_async_device_register -310310+ * @device: &dma_device311311+ */312312+int dma_async_device_register(struct dma_device *device)313313+{314314+ static int id;315315+ int chancnt = 0;316316+ struct dma_chan* chan;317317+318318+ if (!device)319319+ return -ENODEV;320320+321321+ init_completion(&device->done);322322+ kref_init(&device->refcount);323323+ device->dev_id = id++;324324+325325+ /* represent channels in sysfs. Probably want devs too */326326+ list_for_each_entry(chan, &device->channels, device_node) {327327+ chan->local = alloc_percpu(typeof(*chan->local));328328+ if (chan->local == NULL)329329+ continue;330330+331331+ chan->chan_id = chancnt++;332332+ chan->class_dev.class = &dma_devclass;333333+ chan->class_dev.dev = NULL;334334+ snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",335335+ device->dev_id, chan->chan_id);336336+337337+ kref_get(&device->refcount);338338+ class_device_register(&chan->class_dev);339339+ }340340+341341+ mutex_lock(&dma_list_mutex);342342+ list_add_tail(&device->global_node, &dma_device_list);343343+ mutex_unlock(&dma_list_mutex);344344+345345+ dma_chans_rebalance();346346+347347+ return 0;348348+}349349+350350+/**351351+ * dma_async_device_unregister -352352+ * @device: &dma_device353353+ */354354+static void dma_async_device_cleanup(struct kref *kref)355355+{356356+ struct dma_device *device;357357+358358+ device = container_of(kref, struct dma_device, refcount);359359+ complete(&device->done);360360+}361361+362362+void dma_async_device_unregister(struct dma_device* device)363363+{364364+ struct dma_chan *chan;365365+ unsigned long flags;366366+367367+ mutex_lock(&dma_list_mutex);368368+ list_del(&device->global_node);369369+ mutex_unlock(&dma_list_mutex);370370+371371+ list_for_each_entry(chan, &device->channels, device_node) {372372+ if (chan->client) {373373+ spin_lock_irqsave(&chan->client->lock, flags);374374+ list_del(&chan->client_node);375375+ chan->client->chan_count--;376376+ spin_unlock_irqrestore(&chan->client->lock, flags);377377+ chan->client->event_callback(chan->client,378378+ chan,379379+ DMA_RESOURCE_REMOVED);380380+ dma_client_chan_free(chan);381381+ }382382+ class_device_unregister(&chan->class_dev);383383+ }384384+ dma_chans_rebalance();385385+386386+ kref_put(&device->refcount, dma_async_device_cleanup);387387+ wait_for_completion(&device->done);388388+}389389+390390+static int __init dma_bus_init(void)391391+{392392+ mutex_init(&dma_list_mutex);393393+ return class_register(&dma_devclass);394394+}395395+396396+subsys_initcall(dma_bus_init);397397+398398+EXPORT_SYMBOL(dma_async_client_register);399399+EXPORT_SYMBOL(dma_async_client_unregister);400400+EXPORT_SYMBOL(dma_async_client_chan_request);401401+EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);402402+EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);403403+EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);404404+EXPORT_SYMBOL(dma_async_memcpy_complete);405405+EXPORT_SYMBOL(dma_async_memcpy_issue_pending);406406+EXPORT_SYMBOL(dma_async_device_register);407407+EXPORT_SYMBOL(dma_async_device_unregister);408408+EXPORT_SYMBOL(dma_chan_cleanup);
+337
include/linux/dmaengine.h
···11+/*22+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.33+ *44+ * This program is free software; you can redistribute it and/or modify it55+ * under the terms of the GNU General Public License as published by the Free66+ * Software Foundation; either version 2 of the License, or (at your option)77+ * any later version.88+ *99+ * This program is distributed in the hope that it will be useful, but WITHOUT1010+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or1111+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for1212+ * more details.1313+ *1414+ * You should have received a copy of the GNU General Public License along with1515+ * this program; if not, write to the Free Software Foundation, Inc., 591616+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.1717+ *1818+ * The full GNU General Public License is included in this distribution in the1919+ * file called COPYING.2020+ */2121+#ifndef DMAENGINE_H2222+#define DMAENGINE_H2323+#include <linux/config.h>2424+#ifdef CONFIG_DMA_ENGINE2525+2626+#include <linux/device.h>2727+#include <linux/uio.h>2828+#include <linux/kref.h>2929+#include <linux/completion.h>3030+#include <linux/rcupdate.h>3131+3232+/**3333+ * enum dma_event - resource PNP/power managment events3434+ * @DMA_RESOURCE_SUSPEND: DMA device going into low power state3535+ * @DMA_RESOURCE_RESUME: DMA device returning to full power3636+ * @DMA_RESOURCE_ADDED: DMA device added to the system3737+ * @DMA_RESOURCE_REMOVED: DMA device removed from the system3838+ */3939+enum dma_event {4040+ DMA_RESOURCE_SUSPEND,4141+ DMA_RESOURCE_RESUME,4242+ DMA_RESOURCE_ADDED,4343+ DMA_RESOURCE_REMOVED,4444+};4545+4646+/**4747+ * typedef dma_cookie_t4848+ *4949+ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code5050+ */5151+typedef s32 dma_cookie_t;5252+5353+#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)5454+5555+/**5656+ * enum dma_status - DMA transaction status5757+ * @DMA_SUCCESS: transaction completed successfully5858+ * @DMA_IN_PROGRESS: transaction not yet processed5959+ * @DMA_ERROR: transaction failed6060+ */6161+enum dma_status {6262+ DMA_SUCCESS,6363+ DMA_IN_PROGRESS,6464+ DMA_ERROR,6565+};6666+6767+/**6868+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan6969+ * @refcount: local_t used for open-coded "bigref" counting7070+ * @memcpy_count: transaction counter7171+ * @bytes_transferred: byte counter7272+ */7373+7474+struct dma_chan_percpu {7575+ local_t refcount;7676+ /* stats */7777+ unsigned long memcpy_count;7878+ unsigned long bytes_transferred;7979+};8080+8181+/**8282+ * struct dma_chan - devices supply DMA channels, clients use them8383+ * @client: ptr to the client user of this chan, will be NULL when unused8484+ * @device: ptr to the dma device who supplies this channel, always !NULL8585+ * @cookie: last cookie value returned to client8686+ * @chan_id:8787+ * @class_dev:8888+ * @refcount: kref, used in "bigref" slow-mode8989+ * @slow_ref:9090+ * @rcu:9191+ * @client_node: used to add this to the client chan list9292+ * @device_node: used to add this to the device chan list9393+ * @local: per-cpu pointer to a struct dma_chan_percpu9494+ */9595+struct dma_chan {9696+ struct dma_client *client;9797+ struct dma_device *device;9898+ dma_cookie_t cookie;9999+100100+ /* sysfs */101101+ int chan_id;102102+ struct class_device class_dev;103103+104104+ struct kref refcount;105105+ int slow_ref;106106+ struct rcu_head rcu;107107+108108+ struct list_head client_node;109109+ struct list_head device_node;110110+ struct dma_chan_percpu *local;111111+};112112+113113+void dma_chan_cleanup(struct kref *kref);114114+115115+static inline void dma_chan_get(struct dma_chan *chan)116116+{117117+ if (unlikely(chan->slow_ref))118118+ kref_get(&chan->refcount);119119+ else {120120+ local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));121121+ put_cpu();122122+ }123123+}124124+125125+static inline void dma_chan_put(struct dma_chan *chan)126126+{127127+ if (unlikely(chan->slow_ref))128128+ kref_put(&chan->refcount, dma_chan_cleanup);129129+ else {130130+ local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));131131+ put_cpu();132132+ }133133+}134134+135135+/*136136+ * typedef dma_event_callback - function pointer to a DMA event callback137137+ */138138+typedef void (*dma_event_callback) (struct dma_client *client,139139+ struct dma_chan *chan, enum dma_event event);140140+141141+/**142142+ * struct dma_client - info on the entity making use of DMA services143143+ * @event_callback: func ptr to call when something happens144144+ * @chan_count: number of chans allocated145145+ * @chans_desired: number of chans requested. Can be +/- chan_count146146+ * @lock: protects access to the channels list147147+ * @channels: the list of DMA channels allocated148148+ * @global_node: list_head for global dma_client_list149149+ */150150+struct dma_client {151151+ dma_event_callback event_callback;152152+ unsigned int chan_count;153153+ unsigned int chans_desired;154154+155155+ spinlock_t lock;156156+ struct list_head channels;157157+ struct list_head global_node;158158+};159159+160160+/**161161+ * struct dma_device - info on the entity supplying DMA services162162+ * @chancnt: how many DMA channels are supported163163+ * @channels: the list of struct dma_chan164164+ * @global_node: list_head for global dma_device_list165165+ * @refcount:166166+ * @done:167167+ * @dev_id:168168+ * Other func ptrs: used to make use of this device's capabilities169169+ */170170+struct dma_device {171171+172172+ unsigned int chancnt;173173+ struct list_head channels;174174+ struct list_head global_node;175175+176176+ struct kref refcount;177177+ struct completion done;178178+179179+ int dev_id;180180+181181+ int (*device_alloc_chan_resources)(struct dma_chan *chan);182182+ void (*device_free_chan_resources)(struct dma_chan *chan);183183+ dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan,184184+ void *dest, void *src, size_t len);185185+ dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan,186186+ struct page *page, unsigned int offset, void *kdata,187187+ size_t len);188188+ dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan,189189+ struct page *dest_pg, unsigned int dest_off,190190+ struct page *src_pg, unsigned int src_off, size_t len);191191+ enum dma_status (*device_memcpy_complete)(struct dma_chan *chan,192192+ dma_cookie_t cookie, dma_cookie_t *last,193193+ dma_cookie_t *used);194194+ void (*device_memcpy_issue_pending)(struct dma_chan *chan);195195+};196196+197197+/* --- public DMA engine API --- */198198+199199+struct dma_client *dma_async_client_register(dma_event_callback event_callback);200200+void dma_async_client_unregister(struct dma_client *client);201201+void dma_async_client_chan_request(struct dma_client *client,202202+ unsigned int number);203203+204204+/**205205+ * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses206206+ * @chan: DMA channel to offload copy to207207+ * @dest: destination address (virtual)208208+ * @src: source address (virtual)209209+ * @len: length210210+ *211211+ * Both @dest and @src must be mappable to a bus address according to the212212+ * DMA mapping API rules for streaming mappings.213213+ * Both @dest and @src must stay memory resident (kernel memory or locked214214+ * user space pages)215215+ */216216+static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,217217+ void *dest, void *src, size_t len)218218+{219219+ int cpu = get_cpu();220220+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;221221+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;222222+ put_cpu();223223+224224+ return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len);225225+}226226+227227+/**228228+ * dma_async_memcpy_buf_to_pg - offloaded copy229229+ * @chan: DMA channel to offload copy to230230+ * @page: destination page231231+ * @offset: offset in page to copy to232232+ * @kdata: source address (virtual)233233+ * @len: length234234+ *235235+ * Both @page/@offset and @kdata must be mappable to a bus address according236236+ * to the DMA mapping API rules for streaming mappings.237237+ * Both @page/@offset and @kdata must stay memory resident (kernel memory or238238+ * locked user space pages)239239+ */240240+static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,241241+ struct page *page, unsigned int offset, void *kdata, size_t len)242242+{243243+ int cpu = get_cpu();244244+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;245245+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;246246+ put_cpu();247247+248248+ return chan->device->device_memcpy_buf_to_pg(chan, page, offset,249249+ kdata, len);250250+}251251+252252+/**253253+ * dma_async_memcpy_buf_to_pg - offloaded copy254254+ * @chan: DMA channel to offload copy to255255+ * @dest_page: destination page256256+ * @dest_off: offset in page to copy to257257+ * @src_page: source page258258+ * @src_off: offset in page to copy from259259+ * @len: length260260+ *261261+ * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus262262+ * address according to the DMA mapping API rules for streaming mappings.263263+ * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident264264+ * (kernel memory or locked user space pages)265265+ */266266+static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,267267+ struct page *dest_pg, unsigned int dest_off, struct page *src_pg,268268+ unsigned int src_off, size_t len)269269+{270270+ int cpu = get_cpu();271271+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;272272+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;273273+ put_cpu();274274+275275+ return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off,276276+ src_pg, src_off, len);277277+}278278+279279+/**280280+ * dma_async_memcpy_issue_pending - flush pending copies to HW281281+ * @chan:282282+ *283283+ * This allows drivers to push copies to HW in batches,284284+ * reducing MMIO writes where possible.285285+ */286286+static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan)287287+{288288+ return chan->device->device_memcpy_issue_pending(chan);289289+}290290+291291+/**292292+ * dma_async_memcpy_complete - poll for transaction completion293293+ * @chan: DMA channel294294+ * @cookie: transaction identifier to check status of295295+ * @last: returns last completed cookie, can be NULL296296+ * @used: returns last issued cookie, can be NULL297297+ *298298+ * If @last and @used are passed in, upon return they reflect the driver299299+ * internal state and can be used with dma_async_is_complete() to check300300+ * the status of multiple cookies without re-checking hardware state.301301+ */302302+static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan,303303+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)304304+{305305+ return chan->device->device_memcpy_complete(chan, cookie, last, used);306306+}307307+308308+/**309309+ * dma_async_is_complete - test a cookie against chan state310310+ * @cookie: transaction identifier to test status of311311+ * @last_complete: last know completed transaction312312+ * @last_used: last cookie value handed out313313+ *314314+ * dma_async_is_complete() is used in dma_async_memcpy_complete()315315+ * the test logic is seperated for lightweight testing of multiple cookies316316+ */317317+static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,318318+ dma_cookie_t last_complete, dma_cookie_t last_used)319319+{320320+ if (last_complete <= last_used) {321321+ if ((cookie <= last_complete) || (cookie > last_used))322322+ return DMA_SUCCESS;323323+ } else {324324+ if ((cookie <= last_complete) && (cookie > last_used))325325+ return DMA_SUCCESS;326326+ }327327+ return DMA_IN_PROGRESS;328328+}329329+330330+331331+/* --- DMA device --- */332332+333333+int dma_async_device_register(struct dma_device *device);334334+void dma_async_device_unregister(struct dma_device *device);335335+336336+#endif /* CONFIG_DMA_ENGINE */337337+#endif /* DMAENGINE_H */