at v4.2-rc5 39 kB view raw
1/* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in the 15 * file called COPYING. 16 */ 17#ifndef LINUX_DMAENGINE_H 18#define LINUX_DMAENGINE_H 19 20#include <linux/device.h> 21#include <linux/err.h> 22#include <linux/uio.h> 23#include <linux/bug.h> 24#include <linux/scatterlist.h> 25#include <linux/bitmap.h> 26#include <linux/types.h> 27#include <asm/page.h> 28 29/** 30 * typedef dma_cookie_t - an opaque DMA cookie 31 * 32 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 33 */ 34typedef s32 dma_cookie_t; 35#define DMA_MIN_COOKIE 1 36 37static inline int dma_submit_error(dma_cookie_t cookie) 38{ 39 return cookie < 0 ? cookie : 0; 40} 41 42/** 43 * enum dma_status - DMA transaction status 44 * @DMA_COMPLETE: transaction completed 45 * @DMA_IN_PROGRESS: transaction not yet processed 46 * @DMA_PAUSED: transaction is paused 47 * @DMA_ERROR: transaction failed 48 */ 49enum dma_status { 50 DMA_COMPLETE, 51 DMA_IN_PROGRESS, 52 DMA_PAUSED, 53 DMA_ERROR, 54}; 55 56/** 57 * enum dma_transaction_type - DMA transaction types/indexes 58 * 59 * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is 60 * automatically set as dma devices are registered. 61 */ 62enum dma_transaction_type { 63 DMA_MEMCPY, 64 DMA_XOR, 65 DMA_PQ, 66 DMA_XOR_VAL, 67 DMA_PQ_VAL, 68 DMA_MEMSET, 69 DMA_INTERRUPT, 70 DMA_SG, 71 DMA_PRIVATE, 72 DMA_ASYNC_TX, 73 DMA_SLAVE, 74 DMA_CYCLIC, 75 DMA_INTERLEAVE, 76/* last transaction type for creation of the capabilities mask */ 77 DMA_TX_TYPE_END, 78}; 79 80/** 81 * enum dma_transfer_direction - dma transfer mode and direction indicator 82 * @DMA_MEM_TO_MEM: Async/Memcpy mode 83 * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device 84 * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory 85 * @DMA_DEV_TO_DEV: Slave mode & From Device to Device 86 */ 87enum dma_transfer_direction { 88 DMA_MEM_TO_MEM, 89 DMA_MEM_TO_DEV, 90 DMA_DEV_TO_MEM, 91 DMA_DEV_TO_DEV, 92 DMA_TRANS_NONE, 93}; 94 95/** 96 * Interleaved Transfer Request 97 * ---------------------------- 98 * A chunk is collection of contiguous bytes to be transfered. 99 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). 100 * ICGs may or maynot change between chunks. 101 * A FRAME is the smallest series of contiguous {chunk,icg} pairs, 102 * that when repeated an integral number of times, specifies the transfer. 103 * A transfer template is specification of a Frame, the number of times 104 * it is to be repeated and other per-transfer attributes. 105 * 106 * Practically, a client driver would have ready a template for each 107 * type of transfer it is going to need during its lifetime and 108 * set only 'src_start' and 'dst_start' before submitting the requests. 109 * 110 * 111 * | Frame-1 | Frame-2 | ~ | Frame-'numf' | 112 * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| 113 * 114 * == Chunk size 115 * ... ICG 116 */ 117 118/** 119 * struct data_chunk - Element of scatter-gather list that makes a frame. 120 * @size: Number of bytes to read from source. 121 * size_dst := fn(op, size_src), so doesn't mean much for destination. 122 * @icg: Number of bytes to jump after last src/dst address of this 123 * chunk and before first src/dst address for next chunk. 124 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. 125 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. 126 * @dst_icg: Number of bytes to jump after last dst address of this 127 * chunk and before the first dst address for next chunk. 128 * Ignored if dst_inc is true and dst_sgl is false. 129 * @src_icg: Number of bytes to jump after last src address of this 130 * chunk and before the first src address for next chunk. 131 * Ignored if src_inc is true and src_sgl is false. 132 */ 133struct data_chunk { 134 size_t size; 135 size_t icg; 136 size_t dst_icg; 137 size_t src_icg; 138}; 139 140/** 141 * struct dma_interleaved_template - Template to convey DMAC the transfer pattern 142 * and attributes. 143 * @src_start: Bus address of source for the first chunk. 144 * @dst_start: Bus address of destination for the first chunk. 145 * @dir: Specifies the type of Source and Destination. 146 * @src_inc: If the source address increments after reading from it. 147 * @dst_inc: If the destination address increments after writing to it. 148 * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). 149 * Otherwise, source is read contiguously (icg ignored). 150 * Ignored if src_inc is false. 151 * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). 152 * Otherwise, destination is filled contiguously (icg ignored). 153 * Ignored if dst_inc is false. 154 * @numf: Number of frames in this template. 155 * @frame_size: Number of chunks in a frame i.e, size of sgl[]. 156 * @sgl: Array of {chunk,icg} pairs that make up a frame. 157 */ 158struct dma_interleaved_template { 159 dma_addr_t src_start; 160 dma_addr_t dst_start; 161 enum dma_transfer_direction dir; 162 bool src_inc; 163 bool dst_inc; 164 bool src_sgl; 165 bool dst_sgl; 166 size_t numf; 167 size_t frame_size; 168 struct data_chunk sgl[0]; 169}; 170 171/** 172 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 173 * control completion, and communicate status. 174 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 175 * this transaction 176 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 177 * acknowledges receipt, i.e. has has a chance to establish any dependency 178 * chains 179 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 180 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 181 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as 182 * sources that were the result of a previous operation, in the case of a PQ 183 * operation it continues the calculation with new sources 184 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend 185 * on the result of this operation 186 */ 187enum dma_ctrl_flags { 188 DMA_PREP_INTERRUPT = (1 << 0), 189 DMA_CTRL_ACK = (1 << 1), 190 DMA_PREP_PQ_DISABLE_P = (1 << 2), 191 DMA_PREP_PQ_DISABLE_Q = (1 << 3), 192 DMA_PREP_CONTINUE = (1 << 4), 193 DMA_PREP_FENCE = (1 << 5), 194}; 195 196/** 197 * enum sum_check_bits - bit position of pq_check_flags 198 */ 199enum sum_check_bits { 200 SUM_CHECK_P = 0, 201 SUM_CHECK_Q = 1, 202}; 203 204/** 205 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations 206 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise 207 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise 208 */ 209enum sum_check_flags { 210 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), 211 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), 212}; 213 214 215/** 216 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 217 * See linux/cpumask.h 218 */ 219typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 220 221/** 222 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 223 * @memcpy_count: transaction counter 224 * @bytes_transferred: byte counter 225 */ 226 227struct dma_chan_percpu { 228 /* stats */ 229 unsigned long memcpy_count; 230 unsigned long bytes_transferred; 231}; 232 233/** 234 * struct dma_router - DMA router structure 235 * @dev: pointer to the DMA router device 236 * @route_free: function to be called when the route can be disconnected 237 */ 238struct dma_router { 239 struct device *dev; 240 void (*route_free)(struct device *dev, void *route_data); 241}; 242 243/** 244 * struct dma_chan - devices supply DMA channels, clients use them 245 * @device: ptr to the dma device who supplies this channel, always !%NULL 246 * @cookie: last cookie value returned to client 247 * @completed_cookie: last completed cookie for this channel 248 * @chan_id: channel ID for sysfs 249 * @dev: class device for sysfs 250 * @device_node: used to add this to the device chan list 251 * @local: per-cpu pointer to a struct dma_chan_percpu 252 * @client_count: how many clients are using this channel 253 * @table_count: number of appearances in the mem-to-mem allocation table 254 * @router: pointer to the DMA router structure 255 * @route_data: channel specific data for the router 256 * @private: private data for certain client-channel associations 257 */ 258struct dma_chan { 259 struct dma_device *device; 260 dma_cookie_t cookie; 261 dma_cookie_t completed_cookie; 262 263 /* sysfs */ 264 int chan_id; 265 struct dma_chan_dev *dev; 266 267 struct list_head device_node; 268 struct dma_chan_percpu __percpu *local; 269 int client_count; 270 int table_count; 271 272 /* DMA router */ 273 struct dma_router *router; 274 void *route_data; 275 276 void *private; 277}; 278 279/** 280 * struct dma_chan_dev - relate sysfs device node to backing channel device 281 * @chan: driver channel device 282 * @device: sysfs device 283 * @dev_id: parent dma_device dev_id 284 * @idr_ref: reference count to gate release of dma_device dev_id 285 */ 286struct dma_chan_dev { 287 struct dma_chan *chan; 288 struct device device; 289 int dev_id; 290 atomic_t *idr_ref; 291}; 292 293/** 294 * enum dma_slave_buswidth - defines bus width of the DMA slave 295 * device, source or target buses 296 */ 297enum dma_slave_buswidth { 298 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, 299 DMA_SLAVE_BUSWIDTH_1_BYTE = 1, 300 DMA_SLAVE_BUSWIDTH_2_BYTES = 2, 301 DMA_SLAVE_BUSWIDTH_3_BYTES = 3, 302 DMA_SLAVE_BUSWIDTH_4_BYTES = 4, 303 DMA_SLAVE_BUSWIDTH_8_BYTES = 8, 304 DMA_SLAVE_BUSWIDTH_16_BYTES = 16, 305 DMA_SLAVE_BUSWIDTH_32_BYTES = 32, 306 DMA_SLAVE_BUSWIDTH_64_BYTES = 64, 307}; 308 309/** 310 * struct dma_slave_config - dma slave channel runtime config 311 * @direction: whether the data shall go in or out on this slave 312 * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are 313 * legal values. DEPRECATED, drivers should use the direction argument 314 * to the device_prep_slave_sg and device_prep_dma_cyclic functions or 315 * the dir field in the dma_interleaved_template structure. 316 * @src_addr: this is the physical address where DMA slave data 317 * should be read (RX), if the source is memory this argument is 318 * ignored. 319 * @dst_addr: this is the physical address where DMA slave data 320 * should be written (TX), if the source is memory this argument 321 * is ignored. 322 * @src_addr_width: this is the width in bytes of the source (RX) 323 * register where DMA data shall be read. If the source 324 * is memory this may be ignored depending on architecture. 325 * Legal values: 1, 2, 4, 8. 326 * @dst_addr_width: same as src_addr_width but for destination 327 * target (TX) mutatis mutandis. 328 * @src_maxburst: the maximum number of words (note: words, as in 329 * units of the src_addr_width member, not bytes) that can be sent 330 * in one burst to the device. Typically something like half the 331 * FIFO depth on I/O peripherals so you don't overflow it. This 332 * may or may not be applicable on memory sources. 333 * @dst_maxburst: same as src_maxburst but for destination target 334 * mutatis mutandis. 335 * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill 336 * with 'true' if peripheral should be flow controller. Direction will be 337 * selected at Runtime. 338 * @slave_id: Slave requester id. Only valid for slave channels. The dma 339 * slave peripheral will have unique id as dma requester which need to be 340 * pass as slave config. 341 * 342 * This struct is passed in as configuration data to a DMA engine 343 * in order to set up a certain channel for DMA transport at runtime. 344 * The DMA device/engine has to provide support for an additional 345 * callback in the dma_device structure, device_config and this struct 346 * will then be passed in as an argument to the function. 347 * 348 * The rationale for adding configuration information to this struct is as 349 * follows: if it is likely that more than one DMA slave controllers in 350 * the world will support the configuration option, then make it generic. 351 * If not: if it is fixed so that it be sent in static from the platform 352 * data, then prefer to do that. 353 */ 354struct dma_slave_config { 355 enum dma_transfer_direction direction; 356 dma_addr_t src_addr; 357 dma_addr_t dst_addr; 358 enum dma_slave_buswidth src_addr_width; 359 enum dma_slave_buswidth dst_addr_width; 360 u32 src_maxburst; 361 u32 dst_maxburst; 362 bool device_fc; 363 unsigned int slave_id; 364}; 365 366/** 367 * enum dma_residue_granularity - Granularity of the reported transfer residue 368 * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The 369 * DMA channel is only able to tell whether a descriptor has been completed or 370 * not, which means residue reporting is not supported by this channel. The 371 * residue field of the dma_tx_state field will always be 0. 372 * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully 373 * completed segment of the transfer (For cyclic transfers this is after each 374 * period). This is typically implemented by having the hardware generate an 375 * interrupt after each transferred segment and then the drivers updates the 376 * outstanding residue by the size of the segment. Another possibility is if 377 * the hardware supports scatter-gather and the segment descriptor has a field 378 * which gets set after the segment has been completed. The driver then counts 379 * the number of segments without the flag set to compute the residue. 380 * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred 381 * burst. This is typically only supported if the hardware has a progress 382 * register of some sort (E.g. a register with the current read/write address 383 * or a register with the amount of bursts/beats/bytes that have been 384 * transferred or still need to be transferred). 385 */ 386enum dma_residue_granularity { 387 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, 388 DMA_RESIDUE_GRANULARITY_SEGMENT = 1, 389 DMA_RESIDUE_GRANULARITY_BURST = 2, 390}; 391 392/* struct dma_slave_caps - expose capabilities of a slave channel only 393 * 394 * @src_addr_widths: bit mask of src addr widths the channel supports 395 * @dst_addr_widths: bit mask of dstn addr widths the channel supports 396 * @directions: bit mask of slave direction the channel supported 397 * since the enum dma_transfer_direction is not defined as bits for each 398 * type of direction, the dma controller should fill (1 << <TYPE>) and same 399 * should be checked by controller as well 400 * @cmd_pause: true, if pause and thereby resume is supported 401 * @cmd_terminate: true, if terminate cmd is supported 402 * @residue_granularity: granularity of the reported transfer residue 403 */ 404struct dma_slave_caps { 405 u32 src_addr_widths; 406 u32 dst_addr_widths; 407 u32 directions; 408 bool cmd_pause; 409 bool cmd_terminate; 410 enum dma_residue_granularity residue_granularity; 411}; 412 413static inline const char *dma_chan_name(struct dma_chan *chan) 414{ 415 return dev_name(&chan->dev->device); 416} 417 418void dma_chan_cleanup(struct kref *kref); 419 420/** 421 * typedef dma_filter_fn - callback filter for dma_request_channel 422 * @chan: channel to be reviewed 423 * @filter_param: opaque parameter passed through dma_request_channel 424 * 425 * When this optional parameter is specified in a call to dma_request_channel a 426 * suitable channel is passed to this routine for further dispositioning before 427 * being returned. Where 'suitable' indicates a non-busy channel that 428 * satisfies the given capability mask. It returns 'true' to indicate that the 429 * channel is suitable. 430 */ 431typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 432 433typedef void (*dma_async_tx_callback)(void *dma_async_param); 434 435struct dmaengine_unmap_data { 436 u8 map_cnt; 437 u8 to_cnt; 438 u8 from_cnt; 439 u8 bidi_cnt; 440 struct device *dev; 441 struct kref kref; 442 size_t len; 443 dma_addr_t addr[0]; 444}; 445 446/** 447 * struct dma_async_tx_descriptor - async transaction descriptor 448 * ---dma generic offload fields--- 449 * @cookie: tracking cookie for this transaction, set to -EBUSY if 450 * this tx is sitting on a dependency list 451 * @flags: flags to augment operation preparation, control completion, and 452 * communicate status 453 * @phys: physical address of the descriptor 454 * @chan: target channel for this operation 455 * @tx_submit: accept the descriptor, assign ordered cookie and mark the 456 * descriptor pending. To be pushed on .issue_pending() call 457 * @callback: routine to call after this operation is complete 458 * @callback_param: general parameter to pass to the callback routine 459 * ---async_tx api specific fields--- 460 * @next: at completion submit this descriptor 461 * @parent: pointer to the next level up in the dependency chain 462 * @lock: protect the parent and next pointers 463 */ 464struct dma_async_tx_descriptor { 465 dma_cookie_t cookie; 466 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ 467 dma_addr_t phys; 468 struct dma_chan *chan; 469 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 470 dma_async_tx_callback callback; 471 void *callback_param; 472 struct dmaengine_unmap_data *unmap; 473#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 474 struct dma_async_tx_descriptor *next; 475 struct dma_async_tx_descriptor *parent; 476 spinlock_t lock; 477#endif 478}; 479 480#ifdef CONFIG_DMA_ENGINE 481static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, 482 struct dmaengine_unmap_data *unmap) 483{ 484 kref_get(&unmap->kref); 485 tx->unmap = unmap; 486} 487 488struct dmaengine_unmap_data * 489dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); 490void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); 491#else 492static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, 493 struct dmaengine_unmap_data *unmap) 494{ 495} 496static inline struct dmaengine_unmap_data * 497dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) 498{ 499 return NULL; 500} 501static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) 502{ 503} 504#endif 505 506static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) 507{ 508 if (tx->unmap) { 509 dmaengine_unmap_put(tx->unmap); 510 tx->unmap = NULL; 511 } 512} 513 514#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 515static inline void txd_lock(struct dma_async_tx_descriptor *txd) 516{ 517} 518static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 519{ 520} 521static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 522{ 523 BUG(); 524} 525static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 526{ 527} 528static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 529{ 530} 531static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 532{ 533 return NULL; 534} 535static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 536{ 537 return NULL; 538} 539 540#else 541static inline void txd_lock(struct dma_async_tx_descriptor *txd) 542{ 543 spin_lock_bh(&txd->lock); 544} 545static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 546{ 547 spin_unlock_bh(&txd->lock); 548} 549static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 550{ 551 txd->next = next; 552 next->parent = txd; 553} 554static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 555{ 556 txd->parent = NULL; 557} 558static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 559{ 560 txd->next = NULL; 561} 562static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 563{ 564 return txd->parent; 565} 566static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 567{ 568 return txd->next; 569} 570#endif 571 572/** 573 * struct dma_tx_state - filled in to report the status of 574 * a transfer. 575 * @last: last completed DMA cookie 576 * @used: last issued DMA cookie (i.e. the one in progress) 577 * @residue: the remaining number of bytes left to transmit 578 * on the selected transfer for states DMA_IN_PROGRESS and 579 * DMA_PAUSED if this is implemented in the driver, else 0 580 */ 581struct dma_tx_state { 582 dma_cookie_t last; 583 dma_cookie_t used; 584 u32 residue; 585}; 586 587/** 588 * struct dma_device - info on the entity supplying DMA services 589 * @chancnt: how many DMA channels are supported 590 * @privatecnt: how many DMA channels are requested by dma_request_channel 591 * @channels: the list of struct dma_chan 592 * @global_node: list_head for global dma_device_list 593 * @cap_mask: one or more dma_capability flags 594 * @max_xor: maximum number of xor sources, 0 if no capability 595 * @max_pq: maximum number of PQ sources and PQ-continue capability 596 * @copy_align: alignment shift for memcpy operations 597 * @xor_align: alignment shift for xor operations 598 * @pq_align: alignment shift for pq operations 599 * @fill_align: alignment shift for memset operations 600 * @dev_id: unique device ID 601 * @dev: struct device reference for dma mapping api 602 * @src_addr_widths: bit mask of src addr widths the device supports 603 * @dst_addr_widths: bit mask of dst addr widths the device supports 604 * @directions: bit mask of slave direction the device supports since 605 * the enum dma_transfer_direction is not defined as bits for 606 * each type of direction, the dma controller should fill (1 << 607 * <TYPE>) and same should be checked by controller as well 608 * @residue_granularity: granularity of the transfer residue reported 609 * by tx_status 610 * @device_alloc_chan_resources: allocate resources and return the 611 * number of allocated descriptors 612 * @device_free_chan_resources: release DMA channel's resources 613 * @device_prep_dma_memcpy: prepares a memcpy operation 614 * @device_prep_dma_xor: prepares a xor operation 615 * @device_prep_dma_xor_val: prepares a xor validation operation 616 * @device_prep_dma_pq: prepares a pq operation 617 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 618 * @device_prep_dma_memset: prepares a memset operation 619 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 620 * @device_prep_slave_sg: prepares a slave dma operation 621 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 622 * The function takes a buffer of size buf_len. The callback function will 623 * be called after period_len bytes have been transferred. 624 * @device_prep_interleaved_dma: Transfer expression in a generic way. 625 * @device_config: Pushes a new configuration to a channel, return 0 or an error 626 * code 627 * @device_pause: Pauses any transfer happening on a channel. Returns 628 * 0 or an error code 629 * @device_resume: Resumes any transfer on a channel previously 630 * paused. Returns 0 or an error code 631 * @device_terminate_all: Aborts all transfers on a channel. Returns 0 632 * or an error code 633 * @device_tx_status: poll for transaction completion, the optional 634 * txstate parameter can be supplied with a pointer to get a 635 * struct with auxiliary transfer status information, otherwise the call 636 * will just return a simple status code 637 * @device_issue_pending: push pending transactions to hardware 638 */ 639struct dma_device { 640 641 unsigned int chancnt; 642 unsigned int privatecnt; 643 struct list_head channels; 644 struct list_head global_node; 645 dma_cap_mask_t cap_mask; 646 unsigned short max_xor; 647 unsigned short max_pq; 648 u8 copy_align; 649 u8 xor_align; 650 u8 pq_align; 651 u8 fill_align; 652 #define DMA_HAS_PQ_CONTINUE (1 << 15) 653 654 int dev_id; 655 struct device *dev; 656 657 u32 src_addr_widths; 658 u32 dst_addr_widths; 659 u32 directions; 660 enum dma_residue_granularity residue_granularity; 661 662 int (*device_alloc_chan_resources)(struct dma_chan *chan); 663 void (*device_free_chan_resources)(struct dma_chan *chan); 664 665 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 666 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 667 size_t len, unsigned long flags); 668 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 669 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 670 unsigned int src_cnt, size_t len, unsigned long flags); 671 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( 672 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 673 size_t len, enum sum_check_flags *result, unsigned long flags); 674 struct dma_async_tx_descriptor *(*device_prep_dma_pq)( 675 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 676 unsigned int src_cnt, const unsigned char *scf, 677 size_t len, unsigned long flags); 678 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( 679 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 680 unsigned int src_cnt, const unsigned char *scf, size_t len, 681 enum sum_check_flags *pqres, unsigned long flags); 682 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 683 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 684 unsigned long flags); 685 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 686 struct dma_chan *chan, unsigned long flags); 687 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 688 struct dma_chan *chan, 689 struct scatterlist *dst_sg, unsigned int dst_nents, 690 struct scatterlist *src_sg, unsigned int src_nents, 691 unsigned long flags); 692 693 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 694 struct dma_chan *chan, struct scatterlist *sgl, 695 unsigned int sg_len, enum dma_transfer_direction direction, 696 unsigned long flags, void *context); 697 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 698 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 699 size_t period_len, enum dma_transfer_direction direction, 700 unsigned long flags); 701 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 702 struct dma_chan *chan, struct dma_interleaved_template *xt, 703 unsigned long flags); 704 705 int (*device_config)(struct dma_chan *chan, 706 struct dma_slave_config *config); 707 int (*device_pause)(struct dma_chan *chan); 708 int (*device_resume)(struct dma_chan *chan); 709 int (*device_terminate_all)(struct dma_chan *chan); 710 711 enum dma_status (*device_tx_status)(struct dma_chan *chan, 712 dma_cookie_t cookie, 713 struct dma_tx_state *txstate); 714 void (*device_issue_pending)(struct dma_chan *chan); 715}; 716 717static inline int dmaengine_slave_config(struct dma_chan *chan, 718 struct dma_slave_config *config) 719{ 720 if (chan->device->device_config) 721 return chan->device->device_config(chan, config); 722 723 return -ENOSYS; 724} 725 726static inline bool is_slave_direction(enum dma_transfer_direction direction) 727{ 728 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); 729} 730 731static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 732 struct dma_chan *chan, dma_addr_t buf, size_t len, 733 enum dma_transfer_direction dir, unsigned long flags) 734{ 735 struct scatterlist sg; 736 sg_init_table(&sg, 1); 737 sg_dma_address(&sg) = buf; 738 sg_dma_len(&sg) = len; 739 740 return chan->device->device_prep_slave_sg(chan, &sg, 1, 741 dir, flags, NULL); 742} 743 744static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( 745 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 746 enum dma_transfer_direction dir, unsigned long flags) 747{ 748 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 749 dir, flags, NULL); 750} 751 752#ifdef CONFIG_RAPIDIO_DMA_ENGINE 753struct rio_dma_ext; 754static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( 755 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 756 enum dma_transfer_direction dir, unsigned long flags, 757 struct rio_dma_ext *rio_ext) 758{ 759 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 760 dir, flags, rio_ext); 761} 762#endif 763 764static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( 765 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 766 size_t period_len, enum dma_transfer_direction dir, 767 unsigned long flags) 768{ 769 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, 770 period_len, dir, flags); 771} 772 773static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( 774 struct dma_chan *chan, struct dma_interleaved_template *xt, 775 unsigned long flags) 776{ 777 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 778} 779 780static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( 781 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 782 unsigned long flags) 783{ 784 if (!chan || !chan->device) 785 return NULL; 786 787 return chan->device->device_prep_dma_memset(chan, dest, value, 788 len, flags); 789} 790 791static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( 792 struct dma_chan *chan, 793 struct scatterlist *dst_sg, unsigned int dst_nents, 794 struct scatterlist *src_sg, unsigned int src_nents, 795 unsigned long flags) 796{ 797 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, 798 src_sg, src_nents, flags); 799} 800 801static inline int dmaengine_terminate_all(struct dma_chan *chan) 802{ 803 if (chan->device->device_terminate_all) 804 return chan->device->device_terminate_all(chan); 805 806 return -ENOSYS; 807} 808 809static inline int dmaengine_pause(struct dma_chan *chan) 810{ 811 if (chan->device->device_pause) 812 return chan->device->device_pause(chan); 813 814 return -ENOSYS; 815} 816 817static inline int dmaengine_resume(struct dma_chan *chan) 818{ 819 if (chan->device->device_resume) 820 return chan->device->device_resume(chan); 821 822 return -ENOSYS; 823} 824 825static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, 826 dma_cookie_t cookie, struct dma_tx_state *state) 827{ 828 return chan->device->device_tx_status(chan, cookie, state); 829} 830 831static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) 832{ 833 return desc->tx_submit(desc); 834} 835 836static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) 837{ 838 size_t mask; 839 840 if (!align) 841 return true; 842 mask = (1 << align) - 1; 843 if (mask & (off1 | off2 | len)) 844 return false; 845 return true; 846} 847 848static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, 849 size_t off2, size_t len) 850{ 851 return dmaengine_check_align(dev->copy_align, off1, off2, len); 852} 853 854static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, 855 size_t off2, size_t len) 856{ 857 return dmaengine_check_align(dev->xor_align, off1, off2, len); 858} 859 860static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, 861 size_t off2, size_t len) 862{ 863 return dmaengine_check_align(dev->pq_align, off1, off2, len); 864} 865 866static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, 867 size_t off2, size_t len) 868{ 869 return dmaengine_check_align(dev->fill_align, off1, off2, len); 870} 871 872static inline void 873dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) 874{ 875 dma->max_pq = maxpq; 876 if (has_pq_continue) 877 dma->max_pq |= DMA_HAS_PQ_CONTINUE; 878} 879 880static inline bool dmaf_continue(enum dma_ctrl_flags flags) 881{ 882 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; 883} 884 885static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) 886{ 887 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; 888 889 return (flags & mask) == mask; 890} 891 892static inline bool dma_dev_has_pq_continue(struct dma_device *dma) 893{ 894 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 895} 896 897static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) 898{ 899 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 900} 901 902/* dma_maxpq - reduce maxpq in the face of continued operations 903 * @dma - dma device with PQ capability 904 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set 905 * 906 * When an engine does not support native continuation we need 3 extra 907 * source slots to reuse P and Q with the following coefficients: 908 * 1/ {00} * P : remove P from Q', but use it as a source for P' 909 * 2/ {01} * Q : use Q to continue Q' calculation 910 * 3/ {00} * Q : subtract Q from P' to cancel (2) 911 * 912 * In the case where P is disabled we only need 1 extra source: 913 * 1/ {01} * Q : use Q to continue Q' calculation 914 */ 915static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) 916{ 917 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) 918 return dma_dev_to_maxpq(dma); 919 else if (dmaf_p_disabled_continue(flags)) 920 return dma_dev_to_maxpq(dma) - 1; 921 else if (dmaf_continue(flags)) 922 return dma_dev_to_maxpq(dma) - 3; 923 BUG(); 924} 925 926static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, 927 size_t dir_icg) 928{ 929 if (inc) { 930 if (dir_icg) 931 return dir_icg; 932 else if (sgl) 933 return icg; 934 } 935 936 return 0; 937} 938 939static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt, 940 struct data_chunk *chunk) 941{ 942 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, 943 chunk->icg, chunk->dst_icg); 944} 945 946static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt, 947 struct data_chunk *chunk) 948{ 949 return dmaengine_get_icg(xt->src_inc, xt->src_sgl, 950 chunk->icg, chunk->src_icg); 951} 952 953/* --- public DMA engine API --- */ 954 955#ifdef CONFIG_DMA_ENGINE 956void dmaengine_get(void); 957void dmaengine_put(void); 958#else 959static inline void dmaengine_get(void) 960{ 961} 962static inline void dmaengine_put(void) 963{ 964} 965#endif 966 967#ifdef CONFIG_ASYNC_TX_DMA 968#define async_dmaengine_get() dmaengine_get() 969#define async_dmaengine_put() dmaengine_put() 970#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 971#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) 972#else 973#define async_dma_find_channel(type) dma_find_channel(type) 974#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ 975#else 976static inline void async_dmaengine_get(void) 977{ 978} 979static inline void async_dmaengine_put(void) 980{ 981} 982static inline struct dma_chan * 983async_dma_find_channel(enum dma_transaction_type type) 984{ 985 return NULL; 986} 987#endif /* CONFIG_ASYNC_TX_DMA */ 988void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 989 struct dma_chan *chan); 990 991static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 992{ 993 tx->flags |= DMA_CTRL_ACK; 994} 995 996static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) 997{ 998 tx->flags &= ~DMA_CTRL_ACK; 999} 1000 1001static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) 1002{ 1003 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; 1004} 1005 1006#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) 1007static inline void 1008__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 1009{ 1010 set_bit(tx_type, dstp->bits); 1011} 1012 1013#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) 1014static inline void 1015__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 1016{ 1017 clear_bit(tx_type, dstp->bits); 1018} 1019 1020#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 1021static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 1022{ 1023 bitmap_zero(dstp->bits, DMA_TX_TYPE_END); 1024} 1025 1026#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 1027static inline int 1028__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 1029{ 1030 return test_bit(tx_type, srcp->bits); 1031} 1032 1033#define for_each_dma_cap_mask(cap, mask) \ 1034 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) 1035 1036/** 1037 * dma_async_issue_pending - flush pending transactions to HW 1038 * @chan: target DMA channel 1039 * 1040 * This allows drivers to push copies to HW in batches, 1041 * reducing MMIO writes where possible. 1042 */ 1043static inline void dma_async_issue_pending(struct dma_chan *chan) 1044{ 1045 chan->device->device_issue_pending(chan); 1046} 1047 1048/** 1049 * dma_async_is_tx_complete - poll for transaction completion 1050 * @chan: DMA channel 1051 * @cookie: transaction identifier to check status of 1052 * @last: returns last completed cookie, can be NULL 1053 * @used: returns last issued cookie, can be NULL 1054 * 1055 * If @last and @used are passed in, upon return they reflect the driver 1056 * internal state and can be used with dma_async_is_complete() to check 1057 * the status of multiple cookies without re-checking hardware state. 1058 */ 1059static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, 1060 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 1061{ 1062 struct dma_tx_state state; 1063 enum dma_status status; 1064 1065 status = chan->device->device_tx_status(chan, cookie, &state); 1066 if (last) 1067 *last = state.last; 1068 if (used) 1069 *used = state.used; 1070 return status; 1071} 1072 1073/** 1074 * dma_async_is_complete - test a cookie against chan state 1075 * @cookie: transaction identifier to test status of 1076 * @last_complete: last know completed transaction 1077 * @last_used: last cookie value handed out 1078 * 1079 * dma_async_is_complete() is used in dma_async_is_tx_complete() 1080 * the test logic is separated for lightweight testing of multiple cookies 1081 */ 1082static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, 1083 dma_cookie_t last_complete, dma_cookie_t last_used) 1084{ 1085 if (last_complete <= last_used) { 1086 if ((cookie <= last_complete) || (cookie > last_used)) 1087 return DMA_COMPLETE; 1088 } else { 1089 if ((cookie <= last_complete) && (cookie > last_used)) 1090 return DMA_COMPLETE; 1091 } 1092 return DMA_IN_PROGRESS; 1093} 1094 1095static inline void 1096dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) 1097{ 1098 if (st) { 1099 st->last = last; 1100 st->used = used; 1101 st->residue = residue; 1102 } 1103} 1104 1105#ifdef CONFIG_DMA_ENGINE 1106struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 1107enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 1108enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 1109void dma_issue_pending_all(void); 1110struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1111 dma_filter_fn fn, void *fn_param); 1112struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 1113 const char *name); 1114struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1115void dma_release_channel(struct dma_chan *chan); 1116int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); 1117#else 1118static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 1119{ 1120 return NULL; 1121} 1122static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 1123{ 1124 return DMA_COMPLETE; 1125} 1126static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1127{ 1128 return DMA_COMPLETE; 1129} 1130static inline void dma_issue_pending_all(void) 1131{ 1132} 1133static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1134 dma_filter_fn fn, void *fn_param) 1135{ 1136 return NULL; 1137} 1138static inline struct dma_chan *dma_request_slave_channel_reason( 1139 struct device *dev, const char *name) 1140{ 1141 return ERR_PTR(-ENODEV); 1142} 1143static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1144 const char *name) 1145{ 1146 return NULL; 1147} 1148static inline void dma_release_channel(struct dma_chan *chan) 1149{ 1150} 1151static inline int dma_get_slave_caps(struct dma_chan *chan, 1152 struct dma_slave_caps *caps) 1153{ 1154 return -ENXIO; 1155} 1156#endif 1157 1158/* --- DMA device --- */ 1159 1160int dma_async_device_register(struct dma_device *device); 1161void dma_async_device_unregister(struct dma_device *device); 1162void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1163struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); 1164struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); 1165#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 1166#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ 1167 __dma_request_slave_channel_compat(&(mask), x, y, dev, name) 1168 1169static inline struct dma_chan 1170*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, 1171 dma_filter_fn fn, void *fn_param, 1172 struct device *dev, char *name) 1173{ 1174 struct dma_chan *chan; 1175 1176 chan = dma_request_slave_channel(dev, name); 1177 if (chan) 1178 return chan; 1179 1180 return __dma_request_channel(mask, fn, fn_param); 1181} 1182#endif /* DMAENGINE_H */