Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ib-iio-mfd-ti335x_dma' into togreg

A local branch created as Lee requested these two patches were applied
in a fashion that would later let him merge the same branch into MFD
if needed.

+154 -3
+145 -3
drivers/iio/adc/ti_am335x_adc.c
··· 30 30 #include <linux/iio/buffer.h> 31 31 #include <linux/iio/kfifo_buf.h> 32 32 33 + #include <linux/dmaengine.h> 34 + #include <linux/dma-mapping.h> 35 + 36 + #define DMA_BUFFER_SIZE SZ_2K 37 + 38 + struct tiadc_dma { 39 + struct dma_slave_config conf; 40 + struct dma_chan *chan; 41 + dma_addr_t addr; 42 + dma_cookie_t cookie; 43 + u8 *buf; 44 + int current_period; 45 + int period_size; 46 + u8 fifo_thresh; 47 + }; 48 + 33 49 struct tiadc_device { 34 50 struct ti_tscadc_dev *mfd_tscadc; 51 + struct tiadc_dma dma; 35 52 struct mutex fifo1_lock; /* to protect fifo access */ 36 53 int channels; 54 + int total_ch_enabled; 37 55 u8 channel_line[8]; 38 56 u8 channel_step[8]; 39 57 int buffer_en_ch_steps; ··· 216 198 return IRQ_HANDLED; 217 199 } 218 200 201 + static void tiadc_dma_rx_complete(void *param) 202 + { 203 + struct iio_dev *indio_dev = param; 204 + struct tiadc_device *adc_dev = iio_priv(indio_dev); 205 + struct tiadc_dma *dma = &adc_dev->dma; 206 + u8 *data; 207 + int i; 208 + 209 + data = dma->buf + dma->current_period * dma->period_size; 210 + dma->current_period = 1 - dma->current_period; /* swap the buffer ID */ 211 + 212 + for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) { 213 + iio_push_to_buffers(indio_dev, data); 214 + data += indio_dev->scan_bytes; 215 + } 216 + } 217 + 218 + static int tiadc_start_dma(struct iio_dev *indio_dev) 219 + { 220 + struct tiadc_device *adc_dev = iio_priv(indio_dev); 221 + struct tiadc_dma *dma = &adc_dev->dma; 222 + struct dma_async_tx_descriptor *desc; 223 + 224 + dma->current_period = 0; /* We start to fill period 0 */ 225 + /* 226 + * Make the fifo thresh as the multiple of total number of 227 + * channels enabled, so make sure that cyclic DMA period 228 + * length is also a multiple of total number of channels 229 + * enabled. This ensures that no invalid data is reported 230 + * to the stack via iio_push_to_buffers(). 231 + */ 232 + dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1, 233 + adc_dev->total_ch_enabled) - 1; 234 + /* Make sure that period length is multiple of fifo thresh level */ 235 + dma->period_size = rounddown(DMA_BUFFER_SIZE / 2, 236 + (dma->fifo_thresh + 1) * sizeof(u16)); 237 + 238 + dma->conf.src_maxburst = dma->fifo_thresh + 1; 239 + dmaengine_slave_config(dma->chan, &dma->conf); 240 + 241 + desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr, 242 + dma->period_size * 2, 243 + dma->period_size, DMA_DEV_TO_MEM, 244 + DMA_PREP_INTERRUPT); 245 + if (!desc) 246 + return -EBUSY; 247 + 248 + desc->callback = tiadc_dma_rx_complete; 249 + desc->callback_param = indio_dev; 250 + 251 + dma->cookie = dmaengine_submit(desc); 252 + 253 + dma_async_issue_pending(dma->chan); 254 + 255 + tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh); 256 + tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh); 257 + tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1); 258 + 259 + return 0; 260 + } 261 + 219 262 static int tiadc_buffer_preenable(struct iio_dev *indio_dev) 220 263 { 221 264 struct tiadc_device *adc_dev = iio_priv(indio_dev); ··· 297 218 static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 298 219 { 299 220 struct tiadc_device *adc_dev = iio_priv(indio_dev); 221 + struct tiadc_dma *dma = &adc_dev->dma; 222 + unsigned int irq_enable; 300 223 unsigned int enb = 0; 301 224 u8 bit; 302 225 303 226 tiadc_step_config(indio_dev); 304 - for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) 227 + for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) { 305 228 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 229 + adc_dev->total_ch_enabled++; 230 + } 306 231 adc_dev->buffer_en_ch_steps = enb; 232 + 233 + if (dma->chan) 234 + tiadc_start_dma(indio_dev); 307 235 308 236 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb); 309 237 310 238 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES 311 239 | IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW); 312 - tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES 313 - | IRQENB_FIFO1OVRRUN); 240 + 241 + irq_enable = IRQENB_FIFO1OVRRUN; 242 + if (!dma->chan) 243 + irq_enable |= IRQENB_FIFO1THRES; 244 + tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable); 314 245 315 246 return 0; 316 247 } ··· 328 239 static int tiadc_buffer_predisable(struct iio_dev *indio_dev) 329 240 { 330 241 struct tiadc_device *adc_dev = iio_priv(indio_dev); 242 + struct tiadc_dma *dma = &adc_dev->dma; 331 243 int fifo1count, i, read; 332 244 333 245 tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES | 334 246 IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW)); 335 247 am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps); 336 248 adc_dev->buffer_en_ch_steps = 0; 249 + adc_dev->total_ch_enabled = 0; 250 + if (dma->chan) { 251 + tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2); 252 + dmaengine_terminate_async(dma->chan); 253 + } 337 254 338 255 /* Flush FIFO of leftover data in the time it takes to disable adc */ 339 256 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); ··· 525 430 .driver_module = THIS_MODULE, 526 431 }; 527 432 433 + static int tiadc_request_dma(struct platform_device *pdev, 434 + struct tiadc_device *adc_dev) 435 + { 436 + struct tiadc_dma *dma = &adc_dev->dma; 437 + dma_cap_mask_t mask; 438 + 439 + /* Default slave configuration parameters */ 440 + dma->conf.direction = DMA_DEV_TO_MEM; 441 + dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 442 + dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1; 443 + 444 + dma_cap_zero(mask); 445 + dma_cap_set(DMA_CYCLIC, mask); 446 + 447 + /* Get a channel for RX */ 448 + dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1"); 449 + if (IS_ERR(dma->chan)) { 450 + int ret = PTR_ERR(dma->chan); 451 + 452 + dma->chan = NULL; 453 + return ret; 454 + } 455 + 456 + /* RX buffer */ 457 + dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 458 + &dma->addr, GFP_KERNEL); 459 + if (!dma->buf) 460 + goto err; 461 + 462 + return 0; 463 + err: 464 + dma_release_channel(dma->chan); 465 + return -ENOMEM; 466 + } 467 + 528 468 static int tiadc_parse_dt(struct platform_device *pdev, 529 469 struct tiadc_device *adc_dev) 530 470 { ··· 642 512 643 513 platform_set_drvdata(pdev, indio_dev); 644 514 515 + err = tiadc_request_dma(pdev, adc_dev); 516 + if (err && err == -EPROBE_DEFER) 517 + goto err_dma; 518 + 645 519 return 0; 646 520 521 + err_dma: 522 + iio_device_unregister(indio_dev); 647 523 err_buffer_unregister: 648 524 tiadc_iio_buffered_hardware_remove(indio_dev); 649 525 err_free_channels: ··· 661 525 { 662 526 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 663 527 struct tiadc_device *adc_dev = iio_priv(indio_dev); 528 + struct tiadc_dma *dma = &adc_dev->dma; 664 529 u32 step_en; 665 530 531 + if (dma->chan) { 532 + dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 533 + dma->buf, dma->addr); 534 + dma_release_channel(dma->chan); 535 + } 666 536 iio_device_unregister(indio_dev); 667 537 tiadc_iio_buffered_hardware_remove(indio_dev); 668 538 tiadc_channels_remove(indio_dev);
+1
drivers/mfd/ti_am335x_tscadc.c
··· 183 183 tscadc->irq = err; 184 184 185 185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 186 + tscadc->tscadc_phys_base = res->start; 186 187 tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res); 187 188 if (IS_ERR(tscadc->tscadc_base)) 188 189 return PTR_ERR(tscadc->tscadc_base);
+8
include/linux/mfd/ti_am335x_tscadc.h
··· 23 23 #define REG_IRQENABLE 0x02C 24 24 #define REG_IRQCLR 0x030 25 25 #define REG_IRQWAKEUP 0x034 26 + #define REG_DMAENABLE_SET 0x038 27 + #define REG_DMAENABLE_CLEAR 0x03c 26 28 #define REG_CTRL 0x040 27 29 #define REG_ADCFSM 0x044 28 30 #define REG_CLKDIV 0x04C ··· 38 36 #define REG_FIFO0THR 0xE8 39 37 #define REG_FIFO1CNT 0xF0 40 38 #define REG_FIFO1THR 0xF4 39 + #define REG_DMA1REQ 0xF8 41 40 #define REG_FIFO0 0x100 42 41 #define REG_FIFO1 0x200 43 42 ··· 129 126 #define FIFOREAD_DATA_MASK (0xfff << 0) 130 127 #define FIFOREAD_CHNLID_MASK (0xf << 16) 131 128 129 + /* DMA ENABLE/CLEAR Register */ 130 + #define DMA_FIFO0 BIT(0) 131 + #define DMA_FIFO1 BIT(1) 132 + 132 133 /* Sequencer Status */ 133 134 #define SEQ_STATUS BIT(5) 134 135 #define CHARGE_STEP 0x11 ··· 162 155 struct device *dev; 163 156 struct regmap *regmap; 164 157 void __iomem *tscadc_base; 158 + phys_addr_t tscadc_phys_base; 165 159 int irq; 166 160 int used_cells; /* 1-2 */ 167 161 int tsc_wires;