at v2.6.29 15 kB view raw
1/* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21#ifndef DMAENGINE_H 22#define DMAENGINE_H 23 24#include <linux/device.h> 25#include <linux/uio.h> 26#include <linux/kref.h> 27#include <linux/completion.h> 28#include <linux/rcupdate.h> 29#include <linux/dma-mapping.h> 30 31/** 32 * typedef dma_cookie_t - an opaque DMA cookie 33 * 34 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 35 */ 36typedef s32 dma_cookie_t; 37 38#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) 39 40/** 41 * enum dma_status - DMA transaction status 42 * @DMA_SUCCESS: transaction completed successfully 43 * @DMA_IN_PROGRESS: transaction not yet processed 44 * @DMA_ERROR: transaction failed 45 */ 46enum dma_status { 47 DMA_SUCCESS, 48 DMA_IN_PROGRESS, 49 DMA_ERROR, 50}; 51 52/** 53 * enum dma_transaction_type - DMA transaction types/indexes 54 */ 55enum dma_transaction_type { 56 DMA_MEMCPY, 57 DMA_XOR, 58 DMA_PQ_XOR, 59 DMA_DUAL_XOR, 60 DMA_PQ_UPDATE, 61 DMA_ZERO_SUM, 62 DMA_PQ_ZERO_SUM, 63 DMA_MEMSET, 64 DMA_MEMCPY_CRC32C, 65 DMA_INTERRUPT, 66 DMA_PRIVATE, 67 DMA_SLAVE, 68}; 69 70/* last transaction type for creation of the capabilities mask */ 71#define DMA_TX_TYPE_END (DMA_SLAVE + 1) 72 73 74/** 75 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 76 * control completion, and communicate status. 77 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 78 * this transaction 79 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 80 * acknowledges receipt, i.e. has has a chance to establish any 81 * dependency chains 82 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 83 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) 84 */ 85enum dma_ctrl_flags { 86 DMA_PREP_INTERRUPT = (1 << 0), 87 DMA_CTRL_ACK = (1 << 1), 88 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), 89 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 90}; 91 92/** 93 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 94 * See linux/cpumask.h 95 */ 96typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 97 98/** 99 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 100 * @memcpy_count: transaction counter 101 * @bytes_transferred: byte counter 102 */ 103 104struct dma_chan_percpu { 105 /* stats */ 106 unsigned long memcpy_count; 107 unsigned long bytes_transferred; 108}; 109 110/** 111 * struct dma_chan - devices supply DMA channels, clients use them 112 * @device: ptr to the dma device who supplies this channel, always !%NULL 113 * @cookie: last cookie value returned to client 114 * @chan_id: channel ID for sysfs 115 * @dev: class device for sysfs 116 * @device_node: used to add this to the device chan list 117 * @local: per-cpu pointer to a struct dma_chan_percpu 118 * @client-count: how many clients are using this channel 119 * @table_count: number of appearances in the mem-to-mem allocation table 120 * @private: private data for certain client-channel associations 121 */ 122struct dma_chan { 123 struct dma_device *device; 124 dma_cookie_t cookie; 125 126 /* sysfs */ 127 int chan_id; 128 struct dma_chan_dev *dev; 129 130 struct list_head device_node; 131 struct dma_chan_percpu *local; 132 int client_count; 133 int table_count; 134 void *private; 135}; 136 137/** 138 * struct dma_chan_dev - relate sysfs device node to backing channel device 139 * @chan - driver channel device 140 * @device - sysfs device 141 * @dev_id - parent dma_device dev_id 142 * @idr_ref - reference count to gate release of dma_device dev_id 143 */ 144struct dma_chan_dev { 145 struct dma_chan *chan; 146 struct device device; 147 int dev_id; 148 atomic_t *idr_ref; 149}; 150 151static inline const char *dma_chan_name(struct dma_chan *chan) 152{ 153 return dev_name(&chan->dev->device); 154} 155 156void dma_chan_cleanup(struct kref *kref); 157 158/** 159 * typedef dma_filter_fn - callback filter for dma_request_channel 160 * @chan: channel to be reviewed 161 * @filter_param: opaque parameter passed through dma_request_channel 162 * 163 * When this optional parameter is specified in a call to dma_request_channel a 164 * suitable channel is passed to this routine for further dispositioning before 165 * being returned. Where 'suitable' indicates a non-busy channel that 166 * satisfies the given capability mask. It returns 'true' to indicate that the 167 * channel is suitable. 168 */ 169typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 170 171typedef void (*dma_async_tx_callback)(void *dma_async_param); 172/** 173 * struct dma_async_tx_descriptor - async transaction descriptor 174 * ---dma generic offload fields--- 175 * @cookie: tracking cookie for this transaction, set to -EBUSY if 176 * this tx is sitting on a dependency list 177 * @flags: flags to augment operation preparation, control completion, and 178 * communicate status 179 * @phys: physical address of the descriptor 180 * @tx_list: driver common field for operations that require multiple 181 * descriptors 182 * @chan: target channel for this operation 183 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 184 * @callback: routine to call after this operation is complete 185 * @callback_param: general parameter to pass to the callback routine 186 * ---async_tx api specific fields--- 187 * @next: at completion submit this descriptor 188 * @parent: pointer to the next level up in the dependency chain 189 * @lock: protect the parent and next pointers 190 */ 191struct dma_async_tx_descriptor { 192 dma_cookie_t cookie; 193 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ 194 dma_addr_t phys; 195 struct list_head tx_list; 196 struct dma_chan *chan; 197 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 198 dma_async_tx_callback callback; 199 void *callback_param; 200 struct dma_async_tx_descriptor *next; 201 struct dma_async_tx_descriptor *parent; 202 spinlock_t lock; 203}; 204 205/** 206 * struct dma_device - info on the entity supplying DMA services 207 * @chancnt: how many DMA channels are supported 208 * @channels: the list of struct dma_chan 209 * @global_node: list_head for global dma_device_list 210 * @cap_mask: one or more dma_capability flags 211 * @max_xor: maximum number of xor sources, 0 if no capability 212 * @dev_id: unique device ID 213 * @dev: struct device reference for dma mapping api 214 * @device_alloc_chan_resources: allocate resources and return the 215 * number of allocated descriptors 216 * @device_free_chan_resources: release DMA channel's resources 217 * @device_prep_dma_memcpy: prepares a memcpy operation 218 * @device_prep_dma_xor: prepares a xor operation 219 * @device_prep_dma_zero_sum: prepares a zero_sum operation 220 * @device_prep_dma_memset: prepares a memset operation 221 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 222 * @device_prep_slave_sg: prepares a slave dma operation 223 * @device_terminate_all: terminate all pending operations 224 * @device_is_tx_complete: poll for transaction completion 225 * @device_issue_pending: push pending transactions to hardware 226 */ 227struct dma_device { 228 229 unsigned int chancnt; 230 struct list_head channels; 231 struct list_head global_node; 232 dma_cap_mask_t cap_mask; 233 int max_xor; 234 235 int dev_id; 236 struct device *dev; 237 238 int (*device_alloc_chan_resources)(struct dma_chan *chan); 239 void (*device_free_chan_resources)(struct dma_chan *chan); 240 241 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 242 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 243 size_t len, unsigned long flags); 244 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 245 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 246 unsigned int src_cnt, size_t len, unsigned long flags); 247 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( 248 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 249 size_t len, u32 *result, unsigned long flags); 250 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 251 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 252 unsigned long flags); 253 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 254 struct dma_chan *chan, unsigned long flags); 255 256 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 257 struct dma_chan *chan, struct scatterlist *sgl, 258 unsigned int sg_len, enum dma_data_direction direction, 259 unsigned long flags); 260 void (*device_terminate_all)(struct dma_chan *chan); 261 262 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 263 dma_cookie_t cookie, dma_cookie_t *last, 264 dma_cookie_t *used); 265 void (*device_issue_pending)(struct dma_chan *chan); 266}; 267 268/* --- public DMA engine API --- */ 269 270#ifdef CONFIG_DMA_ENGINE 271void dmaengine_get(void); 272void dmaengine_put(void); 273#else 274static inline void dmaengine_get(void) 275{ 276} 277static inline void dmaengine_put(void) 278{ 279} 280#endif 281 282#ifdef CONFIG_NET_DMA 283#define net_dmaengine_get() dmaengine_get() 284#define net_dmaengine_put() dmaengine_put() 285#else 286static inline void net_dmaengine_get(void) 287{ 288} 289static inline void net_dmaengine_put(void) 290{ 291} 292#endif 293 294dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 295 void *dest, void *src, size_t len); 296dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 297 struct page *page, unsigned int offset, void *kdata, size_t len); 298dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, 299 struct page *dest_pg, unsigned int dest_off, struct page *src_pg, 300 unsigned int src_off, size_t len); 301void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 302 struct dma_chan *chan); 303 304static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 305{ 306 tx->flags |= DMA_CTRL_ACK; 307} 308 309static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) 310{ 311 tx->flags &= ~DMA_CTRL_ACK; 312} 313 314static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) 315{ 316 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; 317} 318 319#define first_dma_cap(mask) __first_dma_cap(&(mask)) 320static inline int __first_dma_cap(const dma_cap_mask_t *srcp) 321{ 322 return min_t(int, DMA_TX_TYPE_END, 323 find_first_bit(srcp->bits, DMA_TX_TYPE_END)); 324} 325 326#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) 327static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) 328{ 329 return min_t(int, DMA_TX_TYPE_END, 330 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); 331} 332 333#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) 334static inline void 335__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 336{ 337 set_bit(tx_type, dstp->bits); 338} 339 340#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 341static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 342{ 343 bitmap_zero(dstp->bits, DMA_TX_TYPE_END); 344} 345 346#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 347static inline int 348__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 349{ 350 return test_bit(tx_type, srcp->bits); 351} 352 353#define for_each_dma_cap_mask(cap, mask) \ 354 for ((cap) = first_dma_cap(mask); \ 355 (cap) < DMA_TX_TYPE_END; \ 356 (cap) = next_dma_cap((cap), (mask))) 357 358/** 359 * dma_async_issue_pending - flush pending transactions to HW 360 * @chan: target DMA channel 361 * 362 * This allows drivers to push copies to HW in batches, 363 * reducing MMIO writes where possible. 364 */ 365static inline void dma_async_issue_pending(struct dma_chan *chan) 366{ 367 chan->device->device_issue_pending(chan); 368} 369 370#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) 371 372/** 373 * dma_async_is_tx_complete - poll for transaction completion 374 * @chan: DMA channel 375 * @cookie: transaction identifier to check status of 376 * @last: returns last completed cookie, can be NULL 377 * @used: returns last issued cookie, can be NULL 378 * 379 * If @last and @used are passed in, upon return they reflect the driver 380 * internal state and can be used with dma_async_is_complete() to check 381 * the status of multiple cookies without re-checking hardware state. 382 */ 383static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, 384 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 385{ 386 return chan->device->device_is_tx_complete(chan, cookie, last, used); 387} 388 389#define dma_async_memcpy_complete(chan, cookie, last, used)\ 390 dma_async_is_tx_complete(chan, cookie, last, used) 391 392/** 393 * dma_async_is_complete - test a cookie against chan state 394 * @cookie: transaction identifier to test status of 395 * @last_complete: last know completed transaction 396 * @last_used: last cookie value handed out 397 * 398 * dma_async_is_complete() is used in dma_async_memcpy_complete() 399 * the test logic is separated for lightweight testing of multiple cookies 400 */ 401static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, 402 dma_cookie_t last_complete, dma_cookie_t last_used) 403{ 404 if (last_complete <= last_used) { 405 if ((cookie <= last_complete) || (cookie > last_used)) 406 return DMA_SUCCESS; 407 } else { 408 if ((cookie <= last_complete) && (cookie > last_used)) 409 return DMA_SUCCESS; 410 } 411 return DMA_IN_PROGRESS; 412} 413 414enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 415#ifdef CONFIG_DMA_ENGINE 416enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 417void dma_issue_pending_all(void); 418#else 419static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 420{ 421 return DMA_SUCCESS; 422} 423static inline void dma_issue_pending_all(void) 424{ 425 do { } while (0); 426} 427#endif 428 429/* --- DMA device --- */ 430 431int dma_async_device_register(struct dma_device *device); 432void dma_async_device_unregister(struct dma_device *device); 433void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 434struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 435#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 436struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); 437void dma_release_channel(struct dma_chan *chan); 438 439/* --- Helper iov-locking functions --- */ 440 441struct dma_page_list { 442 char __user *base_address; 443 int nr_pages; 444 struct page **pages; 445}; 446 447struct dma_pinned_list { 448 int nr_iovecs; 449 struct dma_page_list page_list[0]; 450}; 451 452struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); 453void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); 454 455dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, 456 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); 457dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, 458 struct dma_pinned_list *pinned_list, struct page *page, 459 unsigned int offset, size_t len); 460 461#endif /* DMAENGINE_H */