Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iio: buffer-dma: Enable buffer write support

Adding write support to the buffer-dma code is easy - the write()
function basically needs to do the exact same thing as the read()
function: dequeue a block, read or write the data, enqueue the block
when entirely processed.

Therefore, the iio_buffer_dma_read() and the new iio_buffer_dma_write()
now both call a function iio_buffer_dma_io(), which will perform this
task.

Note that we preemptively reset block->bytes_used to the buffer's size
in iio_dma_buffer_request_update(), as in the future the
iio_dma_buffer_enqueue() function won't reset it.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>
Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-3-5ca45b4de294@analog.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

authored by

Paul Cercueil and committed by
Jonathan Cameron
fb09feba 04ae3b1a

+75 -16
+73 -16
drivers/iio/buffer/industrialio-buffer-dma.c
··· 195 195 block->state = IIO_BLOCK_STATE_DONE; 196 196 } 197 197 198 + static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue) 199 + { 200 + __poll_t flags; 201 + 202 + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) 203 + flags = EPOLLIN | EPOLLRDNORM; 204 + else 205 + flags = EPOLLOUT | EPOLLWRNORM; 206 + 207 + wake_up_interruptible_poll(&queue->buffer.pollq, flags); 208 + } 209 + 198 210 /** 199 211 * iio_dma_buffer_block_done() - Indicate that a block has been completed 200 212 * @block: The completed block ··· 224 212 spin_unlock_irqrestore(&queue->list_lock, flags); 225 213 226 214 iio_buffer_block_put_atomic(block); 227 - wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); 215 + iio_dma_buffer_queue_wake(queue); 228 216 } 229 217 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); 230 218 ··· 253 241 } 254 242 spin_unlock_irqrestore(&queue->list_lock, flags); 255 243 256 - wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); 244 + iio_dma_buffer_queue_wake(queue); 257 245 } 258 246 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); 259 247 ··· 347 335 queue->fileio.blocks[i] = block; 348 336 } 349 337 350 - block->state = IIO_BLOCK_STATE_QUEUED; 351 - list_add_tail(&block->head, &queue->incoming); 338 + /* 339 + * block->bytes_used may have been modified previously, e.g. by 340 + * iio_dma_buffer_block_list_abort(). Reset it here to the 341 + * block's so that iio_dma_buffer_io() will work. 342 + */ 343 + block->bytes_used = block->size; 344 + 345 + /* 346 + * If it's an input buffer, mark the block as queued, and 347 + * iio_dma_buffer_enable() will submit it. Otherwise mark it as 348 + * done, which means it's ready to be dequeued. 349 + */ 350 + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { 351 + block->state = IIO_BLOCK_STATE_QUEUED; 352 + list_add_tail(&block->head, &queue->incoming); 353 + } else { 354 + block->state = IIO_BLOCK_STATE_DONE; 355 + } 352 356 } 353 357 354 358 out_unlock: ··· 516 488 return block; 517 489 } 518 490 519 - /** 520 - * iio_dma_buffer_read() - DMA buffer read callback 521 - * @buffer: Buffer to read form 522 - * @n: Number of bytes to read 523 - * @user_buffer: Userspace buffer to copy the data to 524 - * 525 - * Should be used as the read callback for iio_buffer_access_ops 526 - * struct for DMA buffers. 527 - */ 528 - int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, 529 - char __user *user_buffer) 491 + static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n, 492 + char __user *user_buffer, bool is_from_user) 530 493 { 531 494 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); 532 495 struct iio_dma_buffer_block *block; 496 + void *addr; 533 497 int ret; 534 498 535 499 if (n < buffer->bytes_per_datum) ··· 544 524 n = rounddown(n, buffer->bytes_per_datum); 545 525 if (n > block->bytes_used - queue->fileio.pos) 546 526 n = block->bytes_used - queue->fileio.pos; 527 + addr = block->vaddr + queue->fileio.pos; 547 528 548 - if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { 529 + if (is_from_user) 530 + ret = copy_from_user(addr, user_buffer, n); 531 + else 532 + ret = copy_to_user(user_buffer, addr, n); 533 + if (ret) { 549 534 ret = -EFAULT; 550 535 goto out_unlock; 551 536 } ··· 569 544 570 545 return ret; 571 546 } 547 + 548 + /** 549 + * iio_dma_buffer_read() - DMA buffer read callback 550 + * @buffer: Buffer to read form 551 + * @n: Number of bytes to read 552 + * @user_buffer: Userspace buffer to copy the data to 553 + * 554 + * Should be used as the read callback for iio_buffer_access_ops 555 + * struct for DMA buffers. 556 + */ 557 + int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, 558 + char __user *user_buffer) 559 + { 560 + return iio_dma_buffer_io(buffer, n, user_buffer, false); 561 + } 572 562 EXPORT_SYMBOL_GPL(iio_dma_buffer_read); 563 + 564 + /** 565 + * iio_dma_buffer_write() - DMA buffer write callback 566 + * @buffer: Buffer to read form 567 + * @n: Number of bytes to read 568 + * @user_buffer: Userspace buffer to copy the data from 569 + * 570 + * Should be used as the write callback for iio_buffer_access_ops 571 + * struct for DMA buffers. 572 + */ 573 + int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, 574 + const char __user *user_buffer) 575 + { 576 + return iio_dma_buffer_io(buffer, n, 577 + (__force __user char *)user_buffer, true); 578 + } 579 + EXPORT_SYMBOL_GPL(iio_dma_buffer_write); 573 580 574 581 /** 575 582 * iio_dma_buffer_usage() - DMA buffer data_available and
+2
include/linux/iio/buffer-dma.h
··· 132 132 struct iio_dev *indio_dev); 133 133 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, 134 134 char __user *user_buffer); 135 + int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, 136 + const char __user *user_buffer); 135 137 size_t iio_dma_buffer_usage(struct iio_buffer *buffer); 136 138 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); 137 139 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);