Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iio: add watermark logic to iio read and poll

Currently the IIO buffer blocking read only wait until at least one
data element is available.
This patch makes the reader sleep until enough data is collected before
returning to userspace. This should limit the read() calls count when
trying to get data in batches.

Co-author: Yannick Bedhomme <yannick.bedhomme@mobile-devices.fr>
Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr>
[rebased and remove buffer timeout]
Signed-off-by: Octavian Purdila <octavian.purdila@intel.com>
Reviewed-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>

authored by

Josselin Costanzi and committed by
Jonathan Cameron
37d34556 9444a300

+132 -28
+15
Documentation/ABI/testing/sysfs-bus-iio
··· 1280 1280 Description: 1281 1281 Specifies number of seconds in which we compute the steps 1282 1282 that occur in order to decide if the consumer is making steps. 1283 + 1284 + What: /sys/bus/iio/devices/iio:deviceX/buffer/watermark 1285 + KernelVersion: 4.2 1286 + Contact: linux-iio@vger.kernel.org 1287 + Description: 1288 + A single positive integer specifying the maximum number of scan 1289 + elements to wait for. 1290 + Poll will block until the watermark is reached. 1291 + Blocking read will wait until the minimum between the requested 1292 + read amount or the low water mark is available. 1293 + Non-blocking read will retrieve the available samples from the 1294 + buffer even if there are less samples then watermark level. This 1295 + allows the application to block on poll with a timeout and read 1296 + the available samples after the timeout expires and thus have a 1297 + maximum delay guarantee.
+106 -16
drivers/iio/industrialio-buffer.c
··· 37 37 return !list_empty(&buf->buffer_list); 38 38 } 39 39 40 - static bool iio_buffer_data_available(struct iio_buffer *buf) 40 + static size_t iio_buffer_data_available(struct iio_buffer *buf) 41 41 { 42 42 return buf->access->data_available(buf); 43 + } 44 + 45 + static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 46 + size_t to_wait) 47 + { 48 + /* wakeup if the device was unregistered */ 49 + if (!indio_dev->info) 50 + return true; 51 + 52 + /* drain the buffer if it was disabled */ 53 + if (!iio_buffer_is_active(buf)) 54 + to_wait = min_t(size_t, to_wait, 1); 55 + 56 + if (iio_buffer_data_available(buf) >= to_wait) 57 + return true; 58 + 59 + return false; 43 60 } 44 61 45 62 /** ··· 70 53 { 71 54 struct iio_dev *indio_dev = filp->private_data; 72 55 struct iio_buffer *rb = indio_dev->buffer; 56 + size_t datum_size; 57 + size_t to_wait = 0; 73 58 int ret; 74 59 75 60 if (!indio_dev->info) ··· 80 61 if (!rb || !rb->access->read_first_n) 81 62 return -EINVAL; 82 63 83 - do { 84 - if (!iio_buffer_data_available(rb)) { 85 - if (filp->f_flags & O_NONBLOCK) 86 - return -EAGAIN; 64 + datum_size = rb->bytes_per_datum; 87 65 88 - ret = wait_event_interruptible(rb->pollq, 89 - iio_buffer_data_available(rb) || 90 - indio_dev->info == NULL); 91 - if (ret) 92 - return ret; 93 - if (indio_dev->info == NULL) 94 - return -ENODEV; 95 - } 66 + /* 67 + * If datum_size is 0 there will never be anything to read from the 68 + * buffer, so signal end of file now. 69 + */ 70 + if (!datum_size) 71 + return 0; 72 + 73 + if (!(filp->f_flags & O_NONBLOCK)) 74 + to_wait = min_t(size_t, n / datum_size, rb->watermark); 75 + 76 + do { 77 + ret = wait_event_interruptible(rb->pollq, 78 + iio_buffer_ready(indio_dev, rb, to_wait)); 79 + if (ret) 80 + return ret; 81 + 82 + if (!indio_dev->info) 83 + return -ENODEV; 96 84 97 85 ret = rb->access->read_first_n(rb, n, buf); 98 86 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) ··· 122 96 return -ENODEV; 123 97 124 98 poll_wait(filp, &rb->pollq, wait); 125 - if (iio_buffer_data_available(rb)) 99 + if (iio_buffer_ready(indio_dev, rb, rb->watermark)) 126 100 return POLLIN | POLLRDNORM; 127 - /* need a way of knowing if there may be enough data... */ 128 101 return 0; 129 102 } 130 103 ··· 148 123 INIT_LIST_HEAD(&buffer->buffer_list); 149 124 init_waitqueue_head(&buffer->pollq); 150 125 kref_init(&buffer->ref); 126 + buffer->watermark = 1; 151 127 } 152 128 EXPORT_SYMBOL(iio_buffer_init); 153 129 ··· 442 416 buffer->access->set_length(buffer, val); 443 417 ret = 0; 444 418 } 419 + if (ret) 420 + goto out; 421 + if (buffer->length && buffer->length < buffer->watermark) 422 + buffer->watermark = buffer->length; 423 + out: 445 424 mutex_unlock(&indio_dev->mlock); 446 425 447 426 return ret ? ret : len; ··· 503 472 static void iio_buffer_deactivate(struct iio_buffer *buffer) 504 473 { 505 474 list_del_init(&buffer->buffer_list); 475 + wake_up_interruptible(&buffer->pollq); 506 476 iio_buffer_put(buffer); 507 477 } 508 478 ··· 786 754 787 755 static const char * const iio_scan_elements_group_name = "scan_elements"; 788 756 757 + static ssize_t iio_buffer_show_watermark(struct device *dev, 758 + struct device_attribute *attr, 759 + char *buf) 760 + { 761 + struct iio_dev *indio_dev = dev_to_iio_dev(dev); 762 + struct iio_buffer *buffer = indio_dev->buffer; 763 + 764 + return sprintf(buf, "%u\n", buffer->watermark); 765 + } 766 + 767 + static ssize_t iio_buffer_store_watermark(struct device *dev, 768 + struct device_attribute *attr, 769 + const char *buf, 770 + size_t len) 771 + { 772 + struct iio_dev *indio_dev = dev_to_iio_dev(dev); 773 + struct iio_buffer *buffer = indio_dev->buffer; 774 + unsigned int val; 775 + int ret; 776 + 777 + ret = kstrtouint(buf, 10, &val); 778 + if (ret) 779 + return ret; 780 + if (!val) 781 + return -EINVAL; 782 + 783 + mutex_lock(&indio_dev->mlock); 784 + 785 + if (val > buffer->length) { 786 + ret = -EINVAL; 787 + goto out; 788 + } 789 + 790 + if (iio_buffer_is_active(indio_dev->buffer)) { 791 + ret = -EBUSY; 792 + goto out; 793 + } 794 + 795 + buffer->watermark = val; 796 + out: 797 + mutex_unlock(&indio_dev->mlock); 798 + 799 + return ret ? ret : len; 800 + } 801 + 789 802 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, 790 803 iio_buffer_write_length); 791 804 static struct device_attribute dev_attr_length_ro = __ATTR(length, 792 805 S_IRUGO, iio_buffer_read_length, NULL); 793 806 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, 794 807 iio_buffer_show_enable, iio_buffer_store_enable); 808 + static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, 809 + iio_buffer_show_watermark, iio_buffer_store_watermark); 795 810 796 811 static struct attribute *iio_buffer_attrs[] = { 797 812 &dev_attr_length.attr, 798 813 &dev_attr_enable.attr, 814 + &dev_attr_watermark.attr, 799 815 }; 800 816 801 817 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) ··· 1024 944 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 1025 945 { 1026 946 const void *dataout = iio_demux(buffer, data); 947 + int ret; 1027 948 1028 - return buffer->access->store_to(buffer, dataout); 949 + ret = buffer->access->store_to(buffer, dataout); 950 + if (ret) 951 + return ret; 952 + 953 + /* 954 + * We can't just test for watermark to decide if we wake the poll queue 955 + * because read may request less samples than the watermark. 956 + */ 957 + wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM); 958 + return 0; 1029 959 } 1030 960 1031 961 static void iio_buffer_demux_free(struct iio_buffer *buffer)
+4 -7
drivers/iio/kfifo_buf.c
··· 83 83 ret = kfifo_in(&kf->kf, data, 1); 84 84 if (ret != 1) 85 85 return -EBUSY; 86 - 87 - wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM); 88 - 89 86 return 0; 90 87 } 91 88 ··· 106 109 return copied; 107 110 } 108 111 109 - static bool iio_kfifo_buf_data_available(struct iio_buffer *r) 112 + static size_t iio_kfifo_buf_data_available(struct iio_buffer *r) 110 113 { 111 114 struct iio_kfifo *kf = iio_to_kfifo(r); 112 - bool empty; 115 + size_t samples; 113 116 114 117 mutex_lock(&kf->user_lock); 115 - empty = kfifo_is_empty(&kf->kf); 118 + samples = kfifo_len(&kf->kf); 116 119 mutex_unlock(&kf->user_lock); 117 120 118 - return !empty; 121 + return samples; 119 122 } 120 123 121 124 static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
+2 -2
drivers/staging/iio/accel/sca3000_ring.c
··· 129 129 return ret ? ret : num_read; 130 130 } 131 131 132 - static bool sca3000_ring_buf_data_available(struct iio_buffer *r) 132 + static size_t sca3000_ring_buf_data_available(struct iio_buffer *r) 133 133 { 134 - return r->stufftoread; 134 + return r->stufftoread ? r->watermark : 0; 135 135 } 136 136 137 137 /**
+5 -3
include/linux/iio/buffer.h
··· 21 21 * struct iio_buffer_access_funcs - access functions for buffers. 22 22 * @store_to: actually store stuff to the buffer 23 23 * @read_first_n: try to get a specified number of bytes (must exist) 24 - * @data_available: indicates whether data for reading from the buffer is 25 - * available. 24 + * @data_available: indicates how much data is available for reading from 25 + * the buffer. 26 26 * @request_update: if a parameter change has been marked, update underlying 27 27 * storage. 28 28 * @set_bytes_per_datum:set number of bytes per datum ··· 43 43 int (*read_first_n)(struct iio_buffer *buffer, 44 44 size_t n, 45 45 char __user *buf); 46 - bool (*data_available)(struct iio_buffer *buffer); 46 + size_t (*data_available)(struct iio_buffer *buffer); 47 47 48 48 int (*request_update)(struct iio_buffer *buffer); 49 49 ··· 72 72 * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. 73 73 * @buffer_list: [INTERN] entry in the devices list of current buffers. 74 74 * @ref: [INTERN] reference count of the buffer. 75 + * @watermark: [INTERN] number of datums to wait for poll/read. 75 76 */ 76 77 struct iio_buffer { 77 78 int length; ··· 91 90 void *demux_bounce; 92 91 struct list_head buffer_list; 93 92 struct kref ref; 93 + unsigned int watermark; 94 94 }; 95 95 96 96 /**