Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/workqueue.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/poll.h>
15#include <linux/iio/buffer_impl.h>
16#include <linux/iio/buffer-dma.h>
17#include <linux/dma-mapping.h>
18#include <linux/sizes.h>
19
20/*
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
23 * memory is exchanged between the hardware and the application. Increasing the
24 * basic unit of data exchange from one sample to one block decreases the
25 * management overhead that is associated with each sample. E.g. if we say the
26 * management overhead for one exchange is x and the unit of exchange is one
27 * sample the overhead will be x for each sample. Whereas when using a block
28 * which contains n samples the overhead per sample is reduced to x/n. This
29 * allows to achieve much higher samplerates than what can be sustained with
30 * the one sample approach.
31 *
32 * Blocks are exchanged between the DMA controller and the application via the
33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
36 * are waiting for the application to dequeue them and read the data.
37 *
38 * A block can be in one of the following states:
39 * * Owned by the application. In this state the application can read data from
40 * the block.
41 * * On the incoming list: Blocks on the incoming list are queued up to be
42 * processed by the DMA controller.
43 * * Owned by the DMA controller: The DMA controller is processing the block
44 * and filling it with data.
45 * * On the outgoing list: Blocks on the outgoing list have been successfully
46 * processed by the DMA controller and contain data. They can be dequeued by
47 * the application.
48 * * Dead: A block that is dead has been marked as to be freed. It might still
49 * be owned by either the application or the DMA controller at the moment.
50 * But once they are done processing it instead of going to either the
51 * incoming or outgoing queue the block will be freed.
52 *
53 * In addition to this blocks are reference counted and the memory associated
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
57 *
58 * The iio_dma_buffer implementation provides a generic infrastructure for
59 * managing the blocks.
60 *
61 * A driver for a specific piece of hardware that has DMA capabilities need to
62 * implement the submit() callback from the iio_dma_buffer_ops structure. This
63 * callback is supposed to initiate the DMA transfer copying data from the
64 * converter to the memory region of the block. Once the DMA transfer has been
65 * completed the driver must call iio_dma_buffer_block_done() for the completed
66 * block.
67 *
68 * Prior to this it must set the bytes_used field of the block contains
69 * the actual number of bytes in the buffer. Typically this will be equal to the
70 * size of the block, but if the DMA hardware has certain alignment requirements
71 * for the transfer length it might choose to use less than the full size. In
72 * either case it is expected that bytes_used is a multiple of the bytes per
73 * datum, i.e. the block must not contain partial samples.
74 *
75 * The driver must call iio_dma_buffer_block_done() for each block it has
76 * received through its submit_block() callback, even if it does not actually
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
79 * to 0.
80 *
81 * In addition it is recommended that a driver implements the abort() callback.
82 * It will be called when the buffer is disabled and can be used to cancel
83 * pending and stop active transfers.
84 *
85 * The specific driver implementation should use the default callback
86 * implementations provided by this module for the iio_buffer_access_funcs
87 * struct. It may overload some callbacks with custom variants if the hardware
88 * has special requirements that are not handled by the generic functions. If a
89 * driver chooses to overload a callback it has to ensure that the generic
90 * callback is called from within the custom callback.
91 */
92
93static void iio_buffer_block_release(struct kref *kref)
94{
95 struct iio_dma_buffer_block *block = container_of(kref,
96 struct iio_dma_buffer_block, kref);
97
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
99
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
101 block->vaddr, block->phys_addr);
102
103 iio_buffer_put(&block->queue->buffer);
104 kfree(block);
105}
106
107static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
108{
109 kref_get(&block->kref);
110}
111
112static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
113{
114 kref_put(&block->kref, iio_buffer_block_release);
115}
116
117/*
118 * dma_free_coherent can sleep, hence we need to take some special care to be
119 * able to drop a reference from an atomic context.
120 */
121static LIST_HEAD(iio_dma_buffer_dead_blocks);
122static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
123
124static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
125{
126 struct iio_dma_buffer_block *block, *_block;
127 LIST_HEAD(block_list);
128
129 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
130 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
131 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
132
133 list_for_each_entry_safe(block, _block, &block_list, head)
134 iio_buffer_block_release(&block->kref);
135}
136static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
137
138static void iio_buffer_block_release_atomic(struct kref *kref)
139{
140 struct iio_dma_buffer_block *block;
141 unsigned long flags;
142
143 block = container_of(kref, struct iio_dma_buffer_block, kref);
144
145 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
147 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
148
149 schedule_work(&iio_dma_buffer_cleanup_work);
150}
151
152/*
153 * Version of iio_buffer_block_put() that can be called from atomic context
154 */
155static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
156{
157 kref_put(&block->kref, iio_buffer_block_release_atomic);
158}
159
160static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
161{
162 return container_of(buf, struct iio_dma_buffer_queue, buffer);
163}
164
165static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
166 struct iio_dma_buffer_queue *queue, size_t size)
167{
168 struct iio_dma_buffer_block *block;
169
170 block = kzalloc(sizeof(*block), GFP_KERNEL);
171 if (!block)
172 return NULL;
173
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
175 &block->phys_addr, GFP_KERNEL);
176 if (!block->vaddr) {
177 kfree(block);
178 return NULL;
179 }
180
181 block->size = size;
182 block->state = IIO_BLOCK_STATE_DONE;
183 block->queue = queue;
184 INIT_LIST_HEAD(&block->head);
185 kref_init(&block->kref);
186
187 iio_buffer_get(&queue->buffer);
188
189 return block;
190}
191
192static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
193{
194 if (block->state != IIO_BLOCK_STATE_DEAD)
195 block->state = IIO_BLOCK_STATE_DONE;
196}
197
198static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
199{
200 __poll_t flags;
201
202 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
203 flags = EPOLLIN | EPOLLRDNORM;
204 else
205 flags = EPOLLOUT | EPOLLWRNORM;
206
207 wake_up_interruptible_poll(&queue->buffer.pollq, flags);
208}
209
210/**
211 * iio_dma_buffer_block_done() - Indicate that a block has been completed
212 * @block: The completed block
213 *
214 * Should be called when the DMA controller has finished handling the block to
215 * pass back ownership of the block to the queue.
216 */
217void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
218{
219 struct iio_dma_buffer_queue *queue = block->queue;
220 unsigned long flags;
221
222 spin_lock_irqsave(&queue->list_lock, flags);
223 _iio_dma_buffer_block_done(block);
224 spin_unlock_irqrestore(&queue->list_lock, flags);
225
226 iio_buffer_block_put_atomic(block);
227 iio_dma_buffer_queue_wake(queue);
228}
229EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
230
231/**
232 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
233 * aborted
234 * @queue: Queue for which to complete blocks.
235 * @list: List of aborted blocks. All blocks in this list must be from @queue.
236 *
237 * Typically called from the abort() callback after the DMA controller has been
238 * stopped. This will set bytes_used to 0 for each block in the list and then
239 * hand the blocks back to the queue.
240 */
241void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
242 struct list_head *list)
243{
244 struct iio_dma_buffer_block *block, *_block;
245 unsigned long flags;
246
247 spin_lock_irqsave(&queue->list_lock, flags);
248 list_for_each_entry_safe(block, _block, list, head) {
249 list_del(&block->head);
250 block->bytes_used = 0;
251 _iio_dma_buffer_block_done(block);
252 iio_buffer_block_put_atomic(block);
253 }
254 spin_unlock_irqrestore(&queue->list_lock, flags);
255
256 iio_dma_buffer_queue_wake(queue);
257}
258EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
259
260static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
261{
262 /*
263 * If the core owns the block it can be re-used. This should be the
264 * default case when enabling the buffer, unless the DMA controller does
265 * not support abort and has not given back the block yet.
266 */
267 switch (block->state) {
268 case IIO_BLOCK_STATE_QUEUED:
269 case IIO_BLOCK_STATE_DONE:
270 return true;
271 default:
272 return false;
273 }
274}
275
276/**
277 * iio_dma_buffer_request_update() - DMA buffer request_update callback
278 * @buffer: The buffer which to request an update
279 *
280 * Should be used as the iio_dma_buffer_request_update() callback for
281 * iio_buffer_access_ops struct for DMA buffers.
282 */
283int iio_dma_buffer_request_update(struct iio_buffer *buffer)
284{
285 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
286 struct iio_dma_buffer_block *block;
287 bool try_reuse = false;
288 size_t size;
289 int ret = 0;
290 int i;
291
292 /*
293 * Split the buffer into two even parts. This is used as a double
294 * buffering scheme with usually one block at a time being used by the
295 * DMA and the other one by the application.
296 */
297 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
298 queue->buffer.length, 2);
299
300 mutex_lock(&queue->lock);
301
302 /* Allocations are page aligned */
303 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
304 try_reuse = true;
305
306 queue->fileio.block_size = size;
307 queue->fileio.active_block = NULL;
308
309 spin_lock_irq(&queue->list_lock);
310 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
311 block = queue->fileio.blocks[i];
312
313 /* If we can't re-use it free it */
314 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
315 block->state = IIO_BLOCK_STATE_DEAD;
316 }
317
318 /*
319 * At this point all blocks are either owned by the core or marked as
320 * dead. This means we can reset the lists without having to fear
321 * corrution.
322 */
323 spin_unlock_irq(&queue->list_lock);
324
325 INIT_LIST_HEAD(&queue->incoming);
326
327 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
328 if (queue->fileio.blocks[i]) {
329 block = queue->fileio.blocks[i];
330 if (block->state == IIO_BLOCK_STATE_DEAD) {
331 /* Could not reuse it */
332 iio_buffer_block_put(block);
333 block = NULL;
334 } else {
335 block->size = size;
336 }
337 } else {
338 block = NULL;
339 }
340
341 if (!block) {
342 block = iio_dma_buffer_alloc_block(queue, size);
343 if (!block) {
344 ret = -ENOMEM;
345 goto out_unlock;
346 }
347 queue->fileio.blocks[i] = block;
348 }
349
350 /*
351 * block->bytes_used may have been modified previously, e.g. by
352 * iio_dma_buffer_block_list_abort(). Reset it here to the
353 * block's so that iio_dma_buffer_io() will work.
354 */
355 block->bytes_used = block->size;
356
357 /*
358 * If it's an input buffer, mark the block as queued, and
359 * iio_dma_buffer_enable() will submit it. Otherwise mark it as
360 * done, which means it's ready to be dequeued.
361 */
362 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
363 block->state = IIO_BLOCK_STATE_QUEUED;
364 list_add_tail(&block->head, &queue->incoming);
365 } else {
366 block->state = IIO_BLOCK_STATE_DONE;
367 }
368 }
369
370out_unlock:
371 mutex_unlock(&queue->lock);
372
373 return ret;
374}
375EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
376
377static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
378{
379 unsigned int i;
380
381 spin_lock_irq(&queue->list_lock);
382 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
383 if (!queue->fileio.blocks[i])
384 continue;
385 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
386 }
387 spin_unlock_irq(&queue->list_lock);
388
389 INIT_LIST_HEAD(&queue->incoming);
390
391 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
392 if (!queue->fileio.blocks[i])
393 continue;
394 iio_buffer_block_put(queue->fileio.blocks[i]);
395 queue->fileio.blocks[i] = NULL;
396 }
397 queue->fileio.active_block = NULL;
398}
399
400static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
401 struct iio_dma_buffer_block *block)
402{
403 int ret;
404
405 /*
406 * If the hardware has already been removed we put the block into
407 * limbo. It will neither be on the incoming nor outgoing list, nor will
408 * it ever complete. It will just wait to be freed eventually.
409 */
410 if (!queue->ops)
411 return;
412
413 block->state = IIO_BLOCK_STATE_ACTIVE;
414 iio_buffer_block_get(block);
415 ret = queue->ops->submit(queue, block);
416 if (ret) {
417 /*
418 * This is a bit of a problem and there is not much we can do
419 * other then wait for the buffer to be disabled and re-enabled
420 * and try again. But it should not really happen unless we run
421 * out of memory or something similar.
422 *
423 * TODO: Implement support in the IIO core to allow buffers to
424 * notify consumers that something went wrong and the buffer
425 * should be disabled.
426 */
427 iio_buffer_block_put(block);
428 }
429}
430
431/**
432 * iio_dma_buffer_enable() - Enable DMA buffer
433 * @buffer: IIO buffer to enable
434 * @indio_dev: IIO device the buffer is attached to
435 *
436 * Needs to be called when the device that the buffer is attached to starts
437 * sampling. Typically should be the iio_buffer_access_ops enable callback.
438 *
439 * This will allocate the DMA buffers and start the DMA transfers.
440 */
441int iio_dma_buffer_enable(struct iio_buffer *buffer,
442 struct iio_dev *indio_dev)
443{
444 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
445 struct iio_dma_buffer_block *block, *_block;
446
447 mutex_lock(&queue->lock);
448 queue->active = true;
449 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
450 list_del(&block->head);
451 iio_dma_buffer_submit_block(queue, block);
452 }
453 mutex_unlock(&queue->lock);
454
455 return 0;
456}
457EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
458
459/**
460 * iio_dma_buffer_disable() - Disable DMA buffer
461 * @buffer: IIO DMA buffer to disable
462 * @indio_dev: IIO device the buffer is attached to
463 *
464 * Needs to be called when the device that the buffer is attached to stops
465 * sampling. Typically should be the iio_buffer_access_ops disable callback.
466 */
467int iio_dma_buffer_disable(struct iio_buffer *buffer,
468 struct iio_dev *indio_dev)
469{
470 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
471
472 mutex_lock(&queue->lock);
473 queue->active = false;
474
475 if (queue->ops && queue->ops->abort)
476 queue->ops->abort(queue);
477 mutex_unlock(&queue->lock);
478
479 return 0;
480}
481EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
482
483static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
484 struct iio_dma_buffer_block *block)
485{
486 if (block->state == IIO_BLOCK_STATE_DEAD) {
487 iio_buffer_block_put(block);
488 } else if (queue->active) {
489 iio_dma_buffer_submit_block(queue, block);
490 } else {
491 block->state = IIO_BLOCK_STATE_QUEUED;
492 list_add_tail(&block->head, &queue->incoming);
493 }
494}
495
496static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
497 struct iio_dma_buffer_queue *queue)
498{
499 struct iio_dma_buffer_block *block;
500 unsigned int idx;
501
502 spin_lock_irq(&queue->list_lock);
503
504 idx = queue->fileio.next_dequeue;
505 block = queue->fileio.blocks[idx];
506
507 if (block->state == IIO_BLOCK_STATE_DONE) {
508 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
509 queue->fileio.next_dequeue = idx;
510 } else {
511 block = NULL;
512 }
513
514 spin_unlock_irq(&queue->list_lock);
515
516 return block;
517}
518
519static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n,
520 char __user *user_buffer, bool is_from_user)
521{
522 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
523 struct iio_dma_buffer_block *block;
524 void *addr;
525 int ret;
526
527 if (n < buffer->bytes_per_datum)
528 return -EINVAL;
529
530 mutex_lock(&queue->lock);
531
532 if (!queue->fileio.active_block) {
533 block = iio_dma_buffer_dequeue(queue);
534 if (block == NULL) {
535 ret = 0;
536 goto out_unlock;
537 }
538 queue->fileio.pos = 0;
539 queue->fileio.active_block = block;
540 } else {
541 block = queue->fileio.active_block;
542 }
543
544 n = rounddown(n, buffer->bytes_per_datum);
545 if (n > block->bytes_used - queue->fileio.pos)
546 n = block->bytes_used - queue->fileio.pos;
547 addr = block->vaddr + queue->fileio.pos;
548
549 if (is_from_user)
550 ret = copy_from_user(addr, user_buffer, n);
551 else
552 ret = copy_to_user(user_buffer, addr, n);
553 if (ret) {
554 ret = -EFAULT;
555 goto out_unlock;
556 }
557
558 queue->fileio.pos += n;
559
560 if (queue->fileio.pos == block->bytes_used) {
561 queue->fileio.active_block = NULL;
562 iio_dma_buffer_enqueue(queue, block);
563 }
564
565 ret = n;
566
567out_unlock:
568 mutex_unlock(&queue->lock);
569
570 return ret;
571}
572
573/**
574 * iio_dma_buffer_read() - DMA buffer read callback
575 * @buffer: Buffer to read form
576 * @n: Number of bytes to read
577 * @user_buffer: Userspace buffer to copy the data to
578 *
579 * Should be used as the read callback for iio_buffer_access_ops
580 * struct for DMA buffers.
581 */
582int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
583 char __user *user_buffer)
584{
585 return iio_dma_buffer_io(buffer, n, user_buffer, false);
586}
587EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
588
589/**
590 * iio_dma_buffer_write() - DMA buffer write callback
591 * @buffer: Buffer to read form
592 * @n: Number of bytes to read
593 * @user_buffer: Userspace buffer to copy the data from
594 *
595 * Should be used as the write callback for iio_buffer_access_ops
596 * struct for DMA buffers.
597 */
598int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
599 const char __user *user_buffer)
600{
601 return iio_dma_buffer_io(buffer, n,
602 (__force __user char *)user_buffer, true);
603}
604EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
605
606/**
607 * iio_dma_buffer_usage() - DMA buffer data_available and
608 * space_available callback
609 * @buf: Buffer to check for data availability
610 *
611 * Should be used as the data_available and space_available callbacks for
612 * iio_buffer_access_ops struct for DMA buffers.
613 */
614size_t iio_dma_buffer_usage(struct iio_buffer *buf)
615{
616 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
617 struct iio_dma_buffer_block *block;
618 size_t data_available = 0;
619 unsigned int i;
620
621 /*
622 * For counting the available bytes we'll use the size of the block not
623 * the number of actual bytes available in the block. Otherwise it is
624 * possible that we end up with a value that is lower than the watermark
625 * but won't increase since all blocks are in use.
626 */
627
628 mutex_lock(&queue->lock);
629 if (queue->fileio.active_block)
630 data_available += queue->fileio.active_block->size;
631
632 spin_lock_irq(&queue->list_lock);
633
634 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
635 block = queue->fileio.blocks[i];
636
637 if (block != queue->fileio.active_block
638 && block->state == IIO_BLOCK_STATE_DONE)
639 data_available += block->size;
640 }
641
642 spin_unlock_irq(&queue->list_lock);
643 mutex_unlock(&queue->lock);
644
645 return data_available;
646}
647EXPORT_SYMBOL_GPL(iio_dma_buffer_usage);
648
649/**
650 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
651 * @buffer: Buffer to set the bytes-per-datum for
652 * @bpd: The new bytes-per-datum value
653 *
654 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
655 * struct for DMA buffers.
656 */
657int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
658{
659 buffer->bytes_per_datum = bpd;
660
661 return 0;
662}
663EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
664
665/**
666 * iio_dma_buffer_set_length - DMA buffer set_length callback
667 * @buffer: Buffer to set the length for
668 * @length: The new buffer length
669 *
670 * Should be used as the set_length callback for iio_buffer_access_ops
671 * struct for DMA buffers.
672 */
673int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
674{
675 /* Avoid an invalid state */
676 if (length < 2)
677 length = 2;
678 buffer->length = length;
679 buffer->watermark = length / 2;
680
681 return 0;
682}
683EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
684
685/**
686 * iio_dma_buffer_init() - Initialize DMA buffer queue
687 * @queue: Buffer to initialize
688 * @dev: DMA device
689 * @ops: DMA buffer queue callback operations
690 *
691 * The DMA device will be used by the queue to do DMA memory allocations. So it
692 * should refer to the device that will perform the DMA to ensure that
693 * allocations are done from a memory region that can be accessed by the device.
694 */
695int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
696 struct device *dev, const struct iio_dma_buffer_ops *ops)
697{
698 iio_buffer_init(&queue->buffer);
699 queue->buffer.length = PAGE_SIZE;
700 queue->buffer.watermark = queue->buffer.length / 2;
701 queue->dev = dev;
702 queue->ops = ops;
703
704 INIT_LIST_HEAD(&queue->incoming);
705
706 mutex_init(&queue->lock);
707 spin_lock_init(&queue->list_lock);
708
709 return 0;
710}
711EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
712
713/**
714 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
715 * @queue: Buffer to cleanup
716 *
717 * After this function has completed it is safe to free any resources that are
718 * associated with the buffer and are accessed inside the callback operations.
719 */
720void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
721{
722 mutex_lock(&queue->lock);
723
724 iio_dma_buffer_fileio_free(queue);
725 queue->ops = NULL;
726
727 mutex_unlock(&queue->lock);
728}
729EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
730
731/**
732 * iio_dma_buffer_release() - Release final buffer resources
733 * @queue: Buffer to release
734 *
735 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
736 * called in the buffers release callback implementation right before freeing
737 * the memory associated with the buffer.
738 */
739void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
740{
741 mutex_destroy(&queue->lock);
742}
743EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
744
745MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
746MODULE_DESCRIPTION("DMA buffer for the IIO framework");
747MODULE_LICENSE("GPL v2");