Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rapidio: add DMA engine support for RIO data transfers

Adds DMA Engine framework support into RapidIO subsystem.

Uses DMA Engine DMA_SLAVE interface to generate data transfers to/from
remote RapidIO target devices.

Introduces RapidIO-specific wrapper for prep_slave_sg() interface with an
extra parameter to pass target specific information.

Uses scatterlist to describe local data buffer. Address flat data buffer
on a remote side.

Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Acked-by: Vinod Koul <vinod.koul@linux.intel.com>
Cc: Li Yang <leoli@freescale.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Alexandre Bounine and committed by
Linus Torvalds
e42d98eb ce2d52cc

+163
+14
drivers/rapidio/Kconfig
··· 22 22 ports for Input/Output direction to allow other traffic 23 23 than Maintenance transfers. 24 24 25 + config RAPIDIO_DMA_ENGINE 26 + bool "DMA Engine support for RapidIO" 27 + depends on RAPIDIO 28 + select DMADEVICES 29 + select DMA_ENGINE 30 + help 31 + Say Y here if you want to use DMA Engine frameork for RapidIO data 32 + transfers to/from target RIO devices. RapidIO uses NREAD and 33 + NWRITE (NWRITE_R, SWRITE) requests to transfer data between local 34 + memory and memory on remote target device. You need a DMA controller 35 + capable to perform data transfers to/from RapidIO. 36 + 37 + If you are unsure about this, say Y here. 38 + 25 39 config RAPIDIO_DEBUG 26 40 bool "RapidIO subsystem debug messages" 27 41 depends on RAPIDIO
+81
drivers/rapidio/rio.c
··· 1121 1121 return 0; 1122 1122 } 1123 1123 1124 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 1125 + 1126 + static bool rio_chan_filter(struct dma_chan *chan, void *arg) 1127 + { 1128 + struct rio_dev *rdev = arg; 1129 + 1130 + /* Check that DMA device belongs to the right MPORT */ 1131 + return (rdev->net->hport == 1132 + container_of(chan->device, struct rio_mport, dma)); 1133 + } 1134 + 1135 + /** 1136 + * rio_request_dma - request RapidIO capable DMA channel that supports 1137 + * specified target RapidIO device. 1138 + * @rdev: RIO device control structure 1139 + * 1140 + * Returns pointer to allocated DMA channel or NULL if failed. 1141 + */ 1142 + struct dma_chan *rio_request_dma(struct rio_dev *rdev) 1143 + { 1144 + dma_cap_mask_t mask; 1145 + struct dma_chan *dchan; 1146 + 1147 + dma_cap_zero(mask); 1148 + dma_cap_set(DMA_SLAVE, mask); 1149 + dchan = dma_request_channel(mask, rio_chan_filter, rdev); 1150 + 1151 + return dchan; 1152 + } 1153 + EXPORT_SYMBOL_GPL(rio_request_dma); 1154 + 1155 + /** 1156 + * rio_release_dma - release specified DMA channel 1157 + * @dchan: DMA channel to release 1158 + */ 1159 + void rio_release_dma(struct dma_chan *dchan) 1160 + { 1161 + dma_release_channel(dchan); 1162 + } 1163 + EXPORT_SYMBOL_GPL(rio_release_dma); 1164 + 1165 + /** 1166 + * rio_dma_prep_slave_sg - RapidIO specific wrapper 1167 + * for device_prep_slave_sg callback defined by DMAENGINE. 1168 + * @rdev: RIO device control structure 1169 + * @dchan: DMA channel to configure 1170 + * @data: RIO specific data descriptor 1171 + * @direction: DMA data transfer direction (TO or FROM the device) 1172 + * @flags: dmaengine defined flags 1173 + * 1174 + * Initializes RapidIO capable DMA channel for the specified data transfer. 1175 + * Uses DMA channel private extension to pass information related to remote 1176 + * target RIO device. 1177 + * Returns pointer to DMA transaction descriptor or NULL if failed. 1178 + */ 1179 + struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, 1180 + struct dma_chan *dchan, struct rio_dma_data *data, 1181 + enum dma_transfer_direction direction, unsigned long flags) 1182 + { 1183 + struct dma_async_tx_descriptor *txd = NULL; 1184 + struct rio_dma_ext rio_ext; 1185 + 1186 + if (dchan->device->device_prep_slave_sg == NULL) { 1187 + pr_err("%s: prep_rio_sg == NULL\n", __func__); 1188 + return NULL; 1189 + } 1190 + 1191 + rio_ext.destid = rdev->destid; 1192 + rio_ext.rio_addr_u = data->rio_addr_u; 1193 + rio_ext.rio_addr = data->rio_addr; 1194 + rio_ext.wr_type = data->wr_type; 1195 + 1196 + txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, 1197 + direction, flags, &rio_ext); 1198 + 1199 + return txd; 1200 + } 1201 + EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); 1202 + 1203 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 1204 + 1124 1205 static void rio_fixup_device(struct rio_dev *dev) 1125 1206 { 1126 1207 }
+12
include/linux/dmaengine.h
··· 635 635 dir, flags, NULL); 636 636 } 637 637 638 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 639 + struct rio_dma_ext; 640 + static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( 641 + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 642 + enum dma_transfer_direction dir, unsigned long flags, 643 + struct rio_dma_ext *rio_ext) 644 + { 645 + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 646 + dir, flags, rio_ext); 647 + } 648 + #endif 649 + 638 650 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( 639 651 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 640 652 size_t period_len, enum dma_transfer_direction dir)
+47
include/linux/rio.h
··· 20 20 #include <linux/errno.h> 21 21 #include <linux/device.h> 22 22 #include <linux/rio_regs.h> 23 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 24 + #include <linux/dmaengine.h> 25 + #endif 23 26 24 27 #define RIO_NO_HOPCOUNT -1 25 28 #define RIO_INVALID_DESTID 0xffff ··· 257 254 u32 phys_efptr; 258 255 unsigned char name[40]; 259 256 void *priv; /* Master port private data */ 257 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 258 + struct dma_device dma; 259 + #endif 260 260 }; 261 261 262 262 /** ··· 400 394 } em; 401 395 u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)]; 402 396 }; 397 + 398 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 399 + 400 + /** 401 + * enum rio_write_type - RIO write transaction types used in DMA transfers 402 + * 403 + * Note: RapidIO specification defines write (NWRITE) and 404 + * write-with-response (NWRITE_R) data transfer operations. 405 + * Existing DMA controllers that service RapidIO may use one of these operations 406 + * for entire data transfer or their combination with only the last data packet 407 + * requires response. 408 + */ 409 + enum rio_write_type { 410 + RDW_DEFAULT, /* default method used by DMA driver */ 411 + RDW_ALL_NWRITE, /* all packets use NWRITE */ 412 + RDW_ALL_NWRITE_R, /* all packets use NWRITE_R */ 413 + RDW_LAST_NWRITE_R, /* last packet uses NWRITE_R, others - NWRITE */ 414 + }; 415 + 416 + struct rio_dma_ext { 417 + u16 destid; 418 + u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */ 419 + u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */ 420 + enum rio_write_type wr_type; /* preferred RIO write operation type */ 421 + }; 422 + 423 + struct rio_dma_data { 424 + /* Local data (as scatterlist) */ 425 + struct scatterlist *sg; /* I/O scatter list */ 426 + unsigned int sg_len; /* size of scatter list */ 427 + /* Remote device address (flat buffer) */ 428 + u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */ 429 + u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */ 430 + enum rio_write_type wr_type; /* preferred RIO write operation type */ 431 + }; 432 + 433 + static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) 434 + { 435 + return container_of(ddev, struct rio_mport, dma); 436 + } 437 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 403 438 404 439 /* Architecture and hardware-specific functions */ 405 440 extern int rio_register_mport(struct rio_mport *);
+9
include/linux/rio_drv.h
··· 377 377 struct rio_dev *rio_dev_get(struct rio_dev *); 378 378 void rio_dev_put(struct rio_dev *); 379 379 380 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 381 + extern struct dma_chan *rio_request_dma(struct rio_dev *rdev); 382 + extern void rio_release_dma(struct dma_chan *dchan); 383 + extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg( 384 + struct rio_dev *rdev, struct dma_chan *dchan, 385 + struct rio_dma_data *data, 386 + enum dma_transfer_direction direction, unsigned long flags); 387 + #endif 388 + 380 389 /** 381 390 * rio_name - Get the unique RIO device identifier 382 391 * @rdev: RIO device