Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging:iio: Drop {mark,unmark}_in_use callbacks

These callbacks are currently used by the individual buffer implementations to
ensure that the request_update callback is not issued while the buffer is in use.
But the core already provides sufficient measures to prevent this from happening
in the first place. So it is safe to remove them.

There is one functional change due to this patch. Since the buffer is no longer
marked as in use when the chrdev is opened, it is now possible to enable the
buffer while it is opened. This did not work before, because mark_param_change
did fail if the buffer was marked as in use.

Acked-by: Jonathan Cameron <jic23@kernel.org>
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Lars-Peter Clausen and committed by
Greg Kroah-Hartman
79335140 869871b5

+1 -122
-4
drivers/staging/iio/Documentation/ring.txt
··· 23 23 as much buffer functionality as possible. Note almost all of these 24 24 are optional. 25 25 26 - mark_in_use, unmark_in_use 27 - Basically indicate that not changes should be made to the buffer state that 28 - will effect the form of the data being captures (e.g. scan elements or length) 29 - 30 26 store_to 31 27 If possible, push data to the buffer. 32 28
-5
drivers/staging/iio/buffer.h
··· 18 18 19 19 /** 20 20 * struct iio_buffer_access_funcs - access functions for buffers. 21 - * @mark_in_use: reference counting, typically to prevent module removal 22 - * @unmark_in_use: reduce reference count when no longer using buffer 23 21 * @store_to: actually store stuff to the buffer 24 22 * @read_first_n: try to get a specified number of bytes (must exist) 25 23 * @request_update: if a parameter change has been marked, update underlying ··· 36 38 * any of them not existing. 37 39 **/ 38 40 struct iio_buffer_access_funcs { 39 - void (*mark_in_use)(struct iio_buffer *buffer); 40 - void (*unmark_in_use)(struct iio_buffer *buffer); 41 - 42 41 int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp); 43 42 int (*read_first_n)(struct iio_buffer *buffer, 44 43 size_t n,
-11
drivers/staging/iio/iio_core.h
··· 33 33 #ifdef CONFIG_IIO_BUFFER 34 34 struct poll_table_struct; 35 35 36 - int iio_chrdev_buffer_open(struct iio_dev *indio_dev); 37 - void iio_chrdev_buffer_release(struct iio_dev *indio_dev); 38 - 39 36 unsigned int iio_buffer_poll(struct file *filp, 40 37 struct poll_table_struct *wait); 41 38 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, ··· 43 46 #define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer) 44 47 45 48 #else 46 - 47 - static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev) 48 - { 49 - return 0; 50 - } 51 - 52 - static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev) 53 - {} 54 49 55 50 #define iio_buffer_poll_addr NULL 56 51 #define iio_buffer_read_first_n_outer_addr NULL
-28
drivers/staging/iio/industrialio-buffer.c
··· 64 64 return 0; 65 65 } 66 66 67 - int iio_chrdev_buffer_open(struct iio_dev *indio_dev) 68 - { 69 - struct iio_buffer *rb = indio_dev->buffer; 70 - if (!rb) 71 - return 0; 72 - if (rb->access->mark_in_use) 73 - rb->access->mark_in_use(rb); 74 - return 0; 75 - } 76 - 77 - void iio_chrdev_buffer_release(struct iio_dev *indio_dev) 78 - { 79 - struct iio_buffer *rb = indio_dev->buffer; 80 - 81 - if (!rb) 82 - return; 83 - if (rb->access->unmark_in_use) 84 - rb->access->unmark_in_use(rb); 85 - } 86 - 87 67 void iio_buffer_init(struct iio_buffer *buffer) 88 68 { 89 69 INIT_LIST_HEAD(&buffer->demux_list); ··· 427 447 goto error_ret; 428 448 } 429 449 } 430 - if (buffer->access->mark_in_use) 431 - buffer->access->mark_in_use(buffer); 432 450 /* Definitely possible for devices to support both of these.*/ 433 451 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 434 452 if (!indio_dev->trig) { 435 453 printk(KERN_INFO 436 454 "Buffer not started: no trigger\n"); 437 455 ret = -EINVAL; 438 - if (buffer->access->unmark_in_use) 439 - buffer->access->unmark_in_use(buffer); 440 456 goto error_ret; 441 457 } 442 458 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; ··· 449 473 printk(KERN_INFO 450 474 "Buffer not started:" 451 475 "postenable failed\n"); 452 - if (buffer->access->unmark_in_use) 453 - buffer->access->unmark_in_use(buffer); 454 476 indio_dev->currentmode = previous_mode; 455 477 if (indio_dev->setup_ops->postdisable) 456 478 indio_dev->setup_ops-> ··· 462 488 if (ret) 463 489 goto error_ret; 464 490 } 465 - if (buffer->access->unmark_in_use) 466 - buffer->access->unmark_in_use(buffer); 467 491 indio_dev->currentmode = INDIO_DIRECT_MODE; 468 492 if (indio_dev->setup_ops->postdisable) { 469 493 ret = indio_dev->setup_ops->postdisable(indio_dev);
+1 -7
drivers/staging/iio/industrialio-core.c
··· 1083 1083 { 1084 1084 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1085 1085 struct iio_dev, chrdev); 1086 - unsigned int ret; 1087 1086 1088 1087 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) 1089 1088 return -EBUSY; 1090 1089 1091 1090 filp->private_data = indio_dev; 1092 1091 1093 - ret = iio_chrdev_buffer_open(indio_dev); 1094 - if (ret < 0) 1095 - clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 1096 - 1097 - return ret; 1092 + return 0; 1098 1093 } 1099 1094 1100 1095 /** ··· 1099 1104 { 1100 1105 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1101 1106 struct iio_dev, chrdev); 1102 - iio_chrdev_buffer_release(indio_dev); 1103 1107 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 1104 1108 return 0; 1105 1109 }
-32
drivers/staging/iio/kfifo_buf.c
··· 11 11 struct iio_kfifo { 12 12 struct iio_buffer buffer; 13 13 struct kfifo kf; 14 - int use_count; 15 14 int update_needed; 16 - struct mutex use_lock; 17 15 }; 18 16 19 17 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) ··· 31 33 int ret = 0; 32 34 struct iio_kfifo *buf = iio_to_kfifo(r); 33 35 34 - mutex_lock(&buf->use_lock); 35 36 if (!buf->update_needed) 36 37 goto error_ret; 37 - if (buf->use_count) { 38 - ret = -EAGAIN; 39 - goto error_ret; 40 - } 41 38 kfifo_free(&buf->kf); 42 39 ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum, 43 40 buf->buffer.length); 44 41 error_ret: 45 - mutex_unlock(&buf->use_lock); 46 42 return ret; 47 - } 48 - 49 - static void iio_mark_kfifo_in_use(struct iio_buffer *r) 50 - { 51 - struct iio_kfifo *buf = iio_to_kfifo(r); 52 - mutex_lock(&buf->use_lock); 53 - buf->use_count++; 54 - mutex_unlock(&buf->use_lock); 55 - } 56 - 57 - static void iio_unmark_kfifo_in_use(struct iio_buffer *r) 58 - { 59 - struct iio_kfifo *buf = iio_to_kfifo(r); 60 - mutex_lock(&buf->use_lock); 61 - buf->use_count--; 62 - mutex_unlock(&buf->use_lock); 63 43 } 64 44 65 45 static int iio_get_length_kfifo(struct iio_buffer *r) 66 46 { 67 47 return r->length; 68 - } 69 - 70 - static inline void __iio_init_kfifo(struct iio_kfifo *kf) 71 - { 72 - mutex_init(&kf->use_lock); 73 48 } 74 49 75 50 static IIO_BUFFER_ENABLE_ATTR; ··· 69 98 kf->update_needed = true; 70 99 iio_buffer_init(&kf->buffer); 71 100 kf->buffer.attrs = &iio_kfifo_attribute_group; 72 - __iio_init_kfifo(kf); 73 101 74 102 return &kf->buffer; 75 103 } ··· 138 168 } 139 169 140 170 const struct iio_buffer_access_funcs kfifo_access_funcs = { 141 - .mark_in_use = &iio_mark_kfifo_in_use, 142 - .unmark_in_use = &iio_unmark_kfifo_in_use, 143 171 .store_to = &iio_store_to_kfifo, 144 172 .read_first_n = &iio_read_first_n_kfifo, 145 173 .request_update = &iio_request_update_kfifo,
-35
drivers/staging/iio/ring_sw.c
··· 24 24 * @read_p: read pointer (oldest available) 25 25 * @write_p: write pointer 26 26 * @half_p: half buffer length behind write_p (event generation) 27 - * @use_count: reference count to prevent resizing when in use 28 27 * @update_needed: flag to indicated change in size requested 29 - * @use_lock: lock to prevent change in size when in use 30 28 * 31 29 * Note that the first element of all ring buffers must be a 32 30 * struct iio_buffer. ··· 36 38 unsigned char *write_p; 37 39 /* used to act as a point at which to signal an event */ 38 40 unsigned char *half_p; 39 - int use_count; 40 41 int update_needed; 41 - spinlock_t use_lock; 42 42 }; 43 43 44 44 #define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf) ··· 54 58 return ring->data ? 0 : -ENOMEM; 55 59 } 56 60 57 - static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring) 58 - { 59 - spin_lock_init(&ring->use_lock); 60 - } 61 - 62 61 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring) 63 62 { 64 63 kfree(ring->data); 65 64 } 66 - 67 - static void iio_mark_sw_rb_in_use(struct iio_buffer *r) 68 - { 69 - struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 70 - spin_lock(&ring->use_lock); 71 - ring->use_count++; 72 - spin_unlock(&ring->use_lock); 73 - } 74 - 75 - static void iio_unmark_sw_rb_in_use(struct iio_buffer *r) 76 - { 77 - struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 78 - spin_lock(&ring->use_lock); 79 - ring->use_count--; 80 - spin_unlock(&ring->use_lock); 81 - } 82 - 83 65 84 66 /* Ring buffer related functionality */ 85 67 /* Store to ring is typically called in the bh of a data ready interrupt handler ··· 269 295 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); 270 296 271 297 r->stufftoread = false; 272 - spin_lock(&ring->use_lock); 273 298 if (!ring->update_needed) 274 299 goto error_ret; 275 - if (ring->use_count) { 276 - ret = -EAGAIN; 277 - goto error_ret; 278 - } 279 300 __iio_free_sw_ring_buffer(ring); 280 301 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum, 281 302 ring->buf.length); 282 303 error_ret: 283 - spin_unlock(&ring->use_lock); 284 304 return ret; 285 305 } 286 306 ··· 340 372 ring->update_needed = true; 341 373 buf = &ring->buf; 342 374 iio_buffer_init(buf); 343 - __iio_init_sw_ring_buffer(ring); 344 375 buf->attrs = &iio_ring_attribute_group; 345 376 346 377 return buf; ··· 353 386 EXPORT_SYMBOL(iio_sw_rb_free); 354 387 355 388 const struct iio_buffer_access_funcs ring_sw_access_funcs = { 356 - .mark_in_use = &iio_mark_sw_rb_in_use, 357 - .unmark_in_use = &iio_unmark_sw_rb_in_use, 358 389 .store_to = &iio_store_to_sw_rb, 359 390 .read_first_n = &iio_read_first_n_sw_rb, 360 391 .request_update = &iio_request_update_sw_rb,