Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iio: Move attach/detach of the poll func to the core

All devices using a triggered buffer need to attach and detach the trigger
to the device in order to properly work. Instead of doing this in each and
every driver by hand move this into the core.

At this point in time, all drivers should have been resolved to
attach/detach the poll-function in the same order.

This patch removes all explicit calls of iio_triggered_buffer_postenable()
& iio_triggered_buffer_predisable() in all drivers, since the core handles
now the pollfunc attach/detach.

The more peculiar change is for the 'at91-sama5d2_adc' driver, since it's
not immediately obvious that removing the hooks doesn't break anything.
Eugen was able to test on at91-sama5d2-adc driver, sama5d2-xplained board.
All seems to be fine.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Alexandru Ardelean <alexandru.ardelean@analog.com>
Tested-by: Eugen Hristev <eugen.hristev@microchip.com> #for at91-sama5d2-adc
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

authored by

Lars-Peter Clausen and committed by
Jonathan Cameron
f11d59d8 044d406a

+93 -429
+5 -15
drivers/iio/accel/adxl372.c
··· 795 795 unsigned int mask; 796 796 int i, ret; 797 797 798 - ret = iio_triggered_buffer_postenable(indio_dev); 799 - if (ret < 0) 800 - return ret; 801 - 802 798 ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0); 803 799 if (ret < 0) 804 - goto err; 800 + return ret; 805 801 806 802 mask = *indio_dev->active_scan_mask; 807 803 ··· 806 810 break; 807 811 } 808 812 809 - if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) { 810 - ret = -EINVAL; 811 - goto err; 812 - } 813 + if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) 814 + return -EINVAL; 813 815 814 816 st->fifo_format = adxl372_axis_lookup_table[i].fifo_format; 815 817 st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, ··· 827 833 if (ret < 0) { 828 834 st->fifo_mode = ADXL372_FIFO_BYPASSED; 829 835 adxl372_set_interrupts(st, 0, 0); 830 - goto err; 836 + return ret; 831 837 } 832 838 833 839 return 0; 834 - 835 - err: 836 - iio_triggered_buffer_predisable(indio_dev); 837 - return ret; 838 840 } 839 841 840 842 static int adxl372_buffer_predisable(struct iio_dev *indio_dev) ··· 841 851 st->fifo_mode = ADXL372_FIFO_BYPASSED; 842 852 adxl372_configure_fifo(st); 843 853 844 - return iio_triggered_buffer_predisable(indio_dev); 854 + return 0; 845 855 } 846 856 847 857 static const struct iio_buffer_setup_ops adxl372_buffer_ops = {
+2 -2
drivers/iio/accel/bmc150-accel-core.c
··· 1411 1411 int ret = 0; 1412 1412 1413 1413 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1414 - return iio_triggered_buffer_postenable(indio_dev); 1414 + return 0; 1415 1415 1416 1416 mutex_lock(&data->mutex); 1417 1417 ··· 1443 1443 struct bmc150_accel_data *data = iio_priv(indio_dev); 1444 1444 1445 1445 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1446 - return iio_triggered_buffer_predisable(indio_dev); 1446 + return 0; 1447 1447 1448 1448 mutex_lock(&data->mutex); 1449 1449
-2
drivers/iio/accel/kxcjk-1013.c
··· 1027 1027 1028 1028 static const struct iio_buffer_setup_ops kxcjk1013_buffer_setup_ops = { 1029 1029 .preenable = kxcjk1013_buffer_preenable, 1030 - .postenable = iio_triggered_buffer_postenable, 1031 1030 .postdisable = kxcjk1013_buffer_postdisable, 1032 - .predisable = iio_triggered_buffer_predisable, 1033 1031 }; 1034 1032 1035 1033 static const struct iio_info kxcjk1013_info = {
-2
drivers/iio/accel/kxsd9.c
··· 252 252 253 253 static const struct iio_buffer_setup_ops kxsd9_buffer_setup_ops = { 254 254 .preenable = kxsd9_buffer_preenable, 255 - .postenable = iio_triggered_buffer_postenable, 256 - .predisable = iio_triggered_buffer_predisable, 257 255 .postdisable = kxsd9_buffer_postdisable, 258 256 }; 259 257
+5 -17
drivers/iio/accel/st_accel_buffer.c
··· 33 33 { 34 34 int err; 35 35 36 - err = iio_triggered_buffer_postenable(indio_dev); 37 - if (err < 0) 38 - return err; 39 - 40 36 err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]); 41 37 if (err < 0) 42 - goto st_accel_buffer_predisable; 38 + return err; 43 39 44 40 err = st_sensors_set_enable(indio_dev, true); 45 41 if (err < 0) ··· 45 49 46 50 st_accel_buffer_enable_all_axis: 47 51 st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); 48 - st_accel_buffer_predisable: 49 - iio_triggered_buffer_predisable(indio_dev); 50 52 return err; 51 53 } 52 54 53 55 static int st_accel_buffer_predisable(struct iio_dev *indio_dev) 54 56 { 55 - int err, err2; 57 + int err; 56 58 57 59 err = st_sensors_set_enable(indio_dev, false); 58 60 if (err < 0) 59 - goto st_accel_buffer_predisable; 61 + return err; 60 62 61 - err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); 62 - 63 - st_accel_buffer_predisable: 64 - err2 = iio_triggered_buffer_predisable(indio_dev); 65 - if (!err) 66 - err = err2; 67 - 68 - return err; 63 + return st_sensors_set_axis_enable(indio_dev, 64 + ST_SENSORS_ENABLE_ALL_AXIS); 69 65 } 70 66 71 67 static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
-2
drivers/iio/accel/stk8312.c
··· 492 492 493 493 static const struct iio_buffer_setup_ops stk8312_buffer_setup_ops = { 494 494 .preenable = stk8312_buffer_preenable, 495 - .postenable = iio_triggered_buffer_postenable, 496 - .predisable = iio_triggered_buffer_predisable, 497 495 .postdisable = stk8312_buffer_postdisable, 498 496 }; 499 497
-2
drivers/iio/accel/stk8ba50.c
··· 376 376 377 377 static const struct iio_buffer_setup_ops stk8ba50_buffer_setup_ops = { 378 378 .preenable = stk8ba50_buffer_preenable, 379 - .postenable = iio_triggered_buffer_postenable, 380 - .predisable = iio_triggered_buffer_predisable, 381 379 .postdisable = stk8ba50_buffer_postdisable, 382 380 }; 383 381
-2
drivers/iio/adc/ad7266.c
··· 74 74 75 75 static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { 76 76 .preenable = &ad7266_preenable, 77 - .postenable = &iio_triggered_buffer_postenable, 78 - .predisable = &iio_triggered_buffer_predisable, 79 77 .postdisable = &ad7266_postdisable, 80 78 }; 81 79
+1 -2
drivers/iio/adc/ad7606.c
··· 499 499 { 500 500 struct ad7606_state *st = iio_priv(indio_dev); 501 501 502 - iio_triggered_buffer_postenable(indio_dev); 503 502 gpiod_set_value(st->gpio_convst, 1); 504 503 505 504 return 0; ··· 510 511 511 512 gpiod_set_value(st->gpio_convst, 0); 512 513 513 - return iio_triggered_buffer_predisable(indio_dev); 514 + return 0; 514 515 } 515 516 516 517 static const struct iio_buffer_setup_ops ad7606_buffer_ops = {
-2
drivers/iio/adc/ad7766.c
··· 178 178 179 179 static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = { 180 180 .preenable = &ad7766_preenable, 181 - .postenable = &iio_triggered_buffer_postenable, 182 - .predisable = &iio_triggered_buffer_predisable, 183 181 .postdisable = &ad7766_postdisable, 184 182 }; 185 183
+1 -7
drivers/iio/adc/ad7768-1.c
··· 490 490 { 491 491 struct ad7768_state *st = iio_priv(indio_dev); 492 492 493 - iio_triggered_buffer_postenable(indio_dev); 494 493 /* 495 494 * Write a 1 to the LSB of the INTERFACE_FORMAT register to enter 496 495 * continuous read mode. Subsequent data reads do not require an ··· 501 502 static int ad7768_buffer_predisable(struct iio_dev *indio_dev) 502 503 { 503 504 struct ad7768_state *st = iio_priv(indio_dev); 504 - int ret; 505 505 506 506 /* 507 507 * To exit continuous read mode, perform a single read of the ADC_DATA 508 508 * reg (0x2C), which allows further configuration of the device. 509 509 */ 510 - ret = ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3); 511 - if (ret < 0) 512 - return ret; 513 - 514 - return iio_triggered_buffer_predisable(indio_dev); 510 + return ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3); 515 511 } 516 512 517 513 static const struct iio_buffer_setup_ops ad7768_buffer_ops = {
-2
drivers/iio/adc/ad7887.c
··· 136 136 137 137 static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = { 138 138 .preenable = &ad7887_ring_preenable, 139 - .postenable = &iio_triggered_buffer_postenable, 140 - .predisable = &iio_triggered_buffer_predisable, 141 139 .postdisable = &ad7887_ring_postdisable, 142 140 }; 143 141
-5
drivers/iio/adc/ad_sigma_delta.c
··· 345 345 unsigned int channel; 346 346 int ret; 347 347 348 - ret = iio_triggered_buffer_postenable(indio_dev); 349 - if (ret < 0) 350 - return ret; 351 - 352 348 channel = find_first_bit(indio_dev->active_scan_mask, 353 349 indio_dev->masklength); 354 350 ret = ad_sigma_delta_set_channel(sigma_delta, ··· 437 441 438 442 static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { 439 443 .postenable = &ad_sd_buffer_postenable, 440 - .predisable = &iio_triggered_buffer_predisable, 441 444 .postdisable = &ad_sd_buffer_postdisable, 442 445 .validate_scan_mask = &iio_validate_scan_mask_onehot, 443 446 };
-18
drivers/iio/adc/at91-sama5d2_adc.c
··· 937 937 return 0; 938 938 } 939 939 940 - static int at91_adc_buffer_postenable(struct iio_dev *indio_dev) 941 - { 942 - if (at91_adc_current_chan_is_touch(indio_dev)) 943 - return 0; 944 - 945 - return iio_triggered_buffer_postenable(indio_dev); 946 - } 947 - 948 940 static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) 949 941 { 950 942 struct at91_adc_state *st = iio_priv(indio_dev); ··· 987 995 return 0; 988 996 } 989 997 990 - static int at91_adc_buffer_predisable(struct iio_dev *indio_dev) 991 - { 992 - if (at91_adc_current_chan_is_touch(indio_dev)) 993 - return 0; 994 - 995 - return iio_triggered_buffer_predisable(indio_dev); 996 - } 997 - 998 998 static const struct iio_buffer_setup_ops at91_buffer_setup_ops = { 999 999 .preenable = &at91_adc_buffer_preenable, 1000 1000 .postdisable = &at91_adc_buffer_postdisable, 1001 - .postenable = &at91_adc_buffer_postenable, 1002 - .predisable = &at91_adc_buffer_predisable, 1003 1001 }; 1004 1002 1005 1003 static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
+1 -11
drivers/iio/adc/dln2-adc.c
··· 524 524 u16 conflict; 525 525 unsigned int trigger_chan; 526 526 527 - ret = iio_triggered_buffer_postenable(indio_dev); 528 - if (ret) 529 - return ret; 530 - 531 527 mutex_lock(&dln2->mutex); 532 528 533 529 /* Enable ADC */ ··· 537 541 (int)conflict); 538 542 ret = -EBUSY; 539 543 } 540 - iio_triggered_buffer_predisable(indio_dev); 541 544 return ret; 542 545 } 543 546 ··· 550 555 mutex_unlock(&dln2->mutex); 551 556 if (ret < 0) { 552 557 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); 553 - iio_triggered_buffer_predisable(indio_dev); 554 558 return ret; 555 559 } 556 560 } else { ··· 562 568 563 569 static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) 564 570 { 565 - int ret, ret2; 571 + int ret; 566 572 struct dln2_adc *dln2 = iio_priv(indio_dev); 567 573 568 574 mutex_lock(&dln2->mutex); ··· 579 585 mutex_unlock(&dln2->mutex); 580 586 if (ret < 0) 581 587 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); 582 - 583 - ret2 = iio_triggered_buffer_predisable(indio_dev); 584 - if (ret == 0) 585 - ret = ret2; 586 588 587 589 return ret; 588 590 }
-2
drivers/iio/adc/mxs-lradc-adc.c
··· 568 568 569 569 static const struct iio_buffer_setup_ops mxs_lradc_adc_buffer_ops = { 570 570 .preenable = &mxs_lradc_adc_buffer_preenable, 571 - .postenable = &iio_triggered_buffer_postenable, 572 - .predisable = &iio_triggered_buffer_predisable, 573 571 .postdisable = &mxs_lradc_adc_buffer_postdisable, 574 572 .validate_scan_mask = &mxs_lradc_adc_validate_scan_mask, 575 573 };
+5 -31
drivers/iio/adc/stm32-adc.c
··· 1492 1492 return 0; 1493 1493 } 1494 1494 1495 - static int __stm32_adc_buffer_postenable(struct iio_dev *indio_dev) 1495 + static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev) 1496 1496 { 1497 1497 struct stm32_adc *adc = iio_priv(indio_dev); 1498 1498 struct device *dev = indio_dev->dev.parent; ··· 1537 1537 return ret; 1538 1538 } 1539 1539 1540 - static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev) 1541 - { 1542 - int ret; 1543 - 1544 - ret = iio_triggered_buffer_postenable(indio_dev); 1545 - if (ret < 0) 1546 - return ret; 1547 - 1548 - ret = __stm32_adc_buffer_postenable(indio_dev); 1549 - if (ret < 0) 1550 - iio_triggered_buffer_predisable(indio_dev); 1551 - 1552 - return ret; 1553 - } 1554 - 1555 - static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev) 1540 + static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) 1556 1541 { 1557 1542 struct stm32_adc *adc = iio_priv(indio_dev); 1558 1543 struct device *dev = indio_dev->dev.parent; ··· 1556 1571 1557 1572 pm_runtime_mark_last_busy(dev); 1558 1573 pm_runtime_put_autosuspend(dev); 1559 - } 1560 1574 1561 - static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) 1562 - { 1563 - int ret; 1564 - 1565 - __stm32_adc_buffer_predisable(indio_dev); 1566 - 1567 - ret = iio_triggered_buffer_predisable(indio_dev); 1568 - if (ret < 0) 1569 - dev_err(&indio_dev->dev, "predisable failed\n"); 1570 - 1571 - return ret; 1575 + return 0; 1572 1576 } 1573 1577 1574 1578 static const struct iio_buffer_setup_ops stm32_adc_buffer_setup_ops = { ··· 1998 2024 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1999 2025 2000 2026 if (iio_buffer_enabled(indio_dev)) 2001 - __stm32_adc_buffer_predisable(indio_dev); 2027 + stm32_adc_buffer_predisable(indio_dev); 2002 2028 2003 2029 return pm_runtime_force_suspend(dev); 2004 2030 } ··· 2020 2046 if (ret < 0) 2021 2047 return ret; 2022 2048 2023 - return __stm32_adc_buffer_postenable(indio_dev); 2049 + return stm32_adc_buffer_postenable(indio_dev); 2024 2050 } 2025 2051 #endif 2026 2052
+4 -35
drivers/iio/adc/stm32-dfsdm-adc.c
··· 995 995 return 0; 996 996 } 997 997 998 - static int __stm32_dfsdm_postenable(struct iio_dev *indio_dev) 998 + static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) 999 999 { 1000 1000 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 1001 1001 int ret; ··· 1038 1038 return ret; 1039 1039 } 1040 1040 1041 - static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) 1042 - { 1043 - int ret; 1044 - 1045 - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 1046 - ret = iio_triggered_buffer_postenable(indio_dev); 1047 - if (ret < 0) 1048 - return ret; 1049 - } 1050 - 1051 - ret = __stm32_dfsdm_postenable(indio_dev); 1052 - if (ret < 0) 1053 - goto err_predisable; 1054 - 1055 - return 0; 1056 - 1057 - err_predisable: 1058 - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1059 - iio_triggered_buffer_predisable(indio_dev); 1060 - 1061 - return ret; 1062 - } 1063 - 1064 - static void __stm32_dfsdm_predisable(struct iio_dev *indio_dev) 1041 + static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) 1065 1042 { 1066 1043 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 1067 1044 ··· 1050 1073 1051 1074 if (adc->hwc) 1052 1075 iio_hw_consumer_disable(adc->hwc); 1053 - } 1054 - 1055 - static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) 1056 - { 1057 - __stm32_dfsdm_predisable(indio_dev); 1058 - 1059 - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) 1060 - iio_triggered_buffer_predisable(indio_dev); 1061 1076 1062 1077 return 0; 1063 1078 } ··· 1637 1668 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1638 1669 1639 1670 if (iio_buffer_enabled(indio_dev)) 1640 - __stm32_dfsdm_predisable(indio_dev); 1671 + stm32_dfsdm_predisable(indio_dev); 1641 1672 1642 1673 return 0; 1643 1674 } ··· 1660 1691 } 1661 1692 1662 1693 if (iio_buffer_enabled(indio_dev)) 1663 - __stm32_dfsdm_postenable(indio_dev); 1694 + stm32_dfsdm_postenable(indio_dev); 1664 1695 1665 1696 return 0; 1666 1697 }
-2
drivers/iio/adc/ti-adc084s021.c
··· 187 187 188 188 static const struct iio_buffer_setup_ops adc084s021_buffer_setup_ops = { 189 189 .preenable = adc084s021_buffer_preenable, 190 - .postenable = iio_triggered_buffer_postenable, 191 - .predisable = iio_triggered_buffer_predisable, 192 190 .postdisable = adc084s021_buffer_postdisable, 193 191 }; 194 192
-2
drivers/iio/adc/ti-ads1015.c
··· 788 788 789 789 static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { 790 790 .preenable = ads1015_buffer_preenable, 791 - .postenable = iio_triggered_buffer_postenable, 792 - .predisable = iio_triggered_buffer_predisable, 793 791 .postdisable = ads1015_buffer_postdisable, 794 792 .validate_scan_mask = &iio_validate_scan_mask_onehot, 795 793 };
+1 -6
drivers/iio/adc/vf610_adc.c
··· 724 724 { 725 725 struct vf610_adc *info = iio_priv(indio_dev); 726 726 unsigned int channel; 727 - int ret; 728 727 int val; 729 - 730 - ret = iio_triggered_buffer_postenable(indio_dev); 731 - if (ret) 732 - return ret; 733 728 734 729 val = readl(info->regs + VF610_REG_ADC_GC); 735 730 val |= VF610_ADC_ADCON; ··· 756 761 757 762 writel(hc_cfg, info->regs + VF610_REG_ADC_HC0); 758 763 759 - return iio_triggered_buffer_predisable(indio_dev); 764 + return 0; 760 765 } 761 766 762 767 static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
-2
drivers/iio/adc/xilinx-xadc-core.c
··· 839 839 840 840 static const struct iio_buffer_setup_ops xadc_buffer_ops = { 841 841 .preenable = &xadc_preenable, 842 - .postenable = &iio_triggered_buffer_postenable, 843 - .predisable = &iio_triggered_buffer_predisable, 844 842 .postdisable = &xadc_postdisable, 845 843 }; 846 844
+1 -9
drivers/iio/buffer/industrialio-triggered-buffer.c
··· 13 13 #include <linux/iio/triggered_buffer.h> 14 14 #include <linux/iio/trigger_consumer.h> 15 15 16 - static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { 17 - .postenable = &iio_triggered_buffer_postenable, 18 - .predisable = &iio_triggered_buffer_predisable, 19 - }; 20 - 21 16 /** 22 17 * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc 23 18 * @indio_dev: IIO device structure ··· 62 67 } 63 68 64 69 /* Ring buffer functions - here trigger setup related */ 65 - if (setup_ops) 66 - indio_dev->setup_ops = setup_ops; 67 - else 68 - indio_dev->setup_ops = &iio_triggered_buffer_setup_ops; 70 + indio_dev->setup_ops = setup_ops; 69 71 70 72 /* Flag that polled ring buffering is possible */ 71 73 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+1 -5
drivers/iio/chemical/atlas-sensor.c
··· 410 410 struct atlas_data *data = iio_priv(indio_dev); 411 411 int ret; 412 412 413 - ret = iio_triggered_buffer_postenable(indio_dev); 414 - if (ret) 415 - return ret; 416 - 417 413 ret = pm_runtime_get_sync(&data->client->dev); 418 414 if (ret < 0) { 419 415 pm_runtime_put_noidle(&data->client->dev); ··· 433 437 if (ret) 434 438 return ret; 435 439 436 - return iio_triggered_buffer_predisable(indio_dev); 440 + return 0; 437 441 } 438 442 439 443 static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
-14
drivers/iio/dummy/iio_simple_dummy_buffer.c
··· 99 99 } 100 100 101 101 static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = { 102 - /* 103 - * iio_triggered_buffer_postenable: 104 - * Generic function that simply attaches the pollfunc to the trigger. 105 - * Replace this to mess with hardware state before we attach the 106 - * trigger. 107 - */ 108 - .postenable = &iio_triggered_buffer_postenable, 109 - /* 110 - * iio_triggered_buffer_predisable: 111 - * Generic function that simple detaches the pollfunc from the trigger. 112 - * Replace this to put hardware state back again after the trigger is 113 - * detached but before userspace knows we have disabled the ring. 114 - */ 115 - .predisable = &iio_triggered_buffer_predisable, 116 102 }; 117 103 118 104 int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
-2
drivers/iio/gyro/bmg160_core.c
··· 1051 1051 1052 1052 static const struct iio_buffer_setup_ops bmg160_buffer_setup_ops = { 1053 1053 .preenable = bmg160_buffer_preenable, 1054 - .postenable = iio_triggered_buffer_postenable, 1055 - .predisable = iio_triggered_buffer_predisable, 1056 1054 .postdisable = bmg160_buffer_postdisable, 1057 1055 }; 1058 1056
-2
drivers/iio/gyro/mpu3050-core.c
··· 662 662 663 663 static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = { 664 664 .preenable = mpu3050_buffer_preenable, 665 - .postenable = iio_triggered_buffer_postenable, 666 - .predisable = iio_triggered_buffer_predisable, 667 665 .postdisable = mpu3050_buffer_postdisable, 668 666 }; 669 667
+4 -17
drivers/iio/gyro/st_gyro_buffer.c
··· 33 33 { 34 34 int err; 35 35 36 - err = iio_triggered_buffer_postenable(indio_dev); 37 - if (err < 0) 38 - return err; 39 - 40 36 err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]); 41 37 if (err < 0) 42 - goto st_gyro_buffer_predisable; 38 + return err; 43 39 44 40 err = st_sensors_set_enable(indio_dev, true); 45 41 if (err < 0) ··· 45 49 46 50 st_gyro_buffer_enable_all_axis: 47 51 st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); 48 - st_gyro_buffer_predisable: 49 - iio_triggered_buffer_predisable(indio_dev); 50 52 return err; 51 53 } 52 54 53 55 static int st_gyro_buffer_predisable(struct iio_dev *indio_dev) 54 56 { 55 - int err, err2; 57 + int err; 56 58 57 59 err = st_sensors_set_enable(indio_dev, false); 58 60 if (err < 0) 59 - goto st_gyro_buffer_predisable; 61 + return err; 60 62 61 - err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); 62 - 63 - st_gyro_buffer_predisable: 64 - err2 = iio_triggered_buffer_predisable(indio_dev); 65 - if (!err) 66 - err = err2; 67 - 68 - return err; 63 + return st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); 69 64 } 70 65 71 66 static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
+1 -11
drivers/iio/humidity/hdc100x.c
··· 278 278 struct hdc100x_data *data = iio_priv(indio_dev); 279 279 int ret; 280 280 281 - ret = iio_triggered_buffer_postenable(indio_dev); 282 - if (ret) 283 - return ret; 284 - 285 281 /* Buffer is enabled. First set ACQ Mode, then attach poll func */ 286 282 mutex_lock(&data->lock); 287 283 ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 288 284 HDC100X_REG_CONFIG_ACQ_MODE); 289 285 mutex_unlock(&data->lock); 290 - if (ret) 291 - iio_triggered_buffer_predisable(indio_dev); 292 286 293 287 return ret; 294 288 } ··· 290 296 static int hdc100x_buffer_predisable(struct iio_dev *indio_dev) 291 297 { 292 298 struct hdc100x_data *data = iio_priv(indio_dev); 293 - int ret, ret2; 299 + int ret; 294 300 295 301 mutex_lock(&data->lock); 296 302 ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0); 297 303 mutex_unlock(&data->lock); 298 - 299 - ret2 = iio_triggered_buffer_predisable(indio_dev); 300 - if (ret == 0) 301 - ret = ret2; 302 304 303 305 return ret; 304 306 }
-2
drivers/iio/humidity/hts221_buffer.c
··· 153 153 154 154 static const struct iio_buffer_setup_ops hts221_buffer_ops = { 155 155 .preenable = hts221_buffer_preenable, 156 - .postenable = iio_triggered_buffer_postenable, 157 - .predisable = iio_triggered_buffer_predisable, 158 156 .postdisable = hts221_buffer_postdisable, 159 157 }; 160 158
+17
drivers/iio/iio_core_trigger.h
··· 18 18 **/ 19 19 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev); 20 20 21 + 22 + int iio_trigger_attach_poll_func(struct iio_trigger *trig, 23 + struct iio_poll_func *pf); 24 + int iio_trigger_detach_poll_func(struct iio_trigger *trig, 25 + struct iio_poll_func *pf); 26 + 21 27 #else 22 28 23 29 /** ··· 41 35 **/ 42 36 static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) 43 37 { 38 + } 39 + 40 + static inline int iio_trigger_attach_poll_func(struct iio_trigger *trig, 41 + struct iio_poll_func *pf) 42 + { 43 + return 0; 44 + } 45 + static inline int iio_trigger_detach_poll_func(struct iio_trigger *trig, 46 + struct iio_poll_func *pf) 47 + { 48 + return 0; 44 49 } 45 50 46 51 #endif /* CONFIG_TRIGGER_CONSUMER */
+13
drivers/iio/industrialio-buffer.c
··· 20 20 21 21 #include <linux/iio/iio.h> 22 22 #include "iio_core.h" 23 + #include "iio_core_trigger.h" 23 24 #include <linux/iio/sysfs.h> 24 25 #include <linux/iio/buffer.h> 25 26 #include <linux/iio/buffer_impl.h> ··· 973 972 } 974 973 } 975 974 975 + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 976 + ret = iio_trigger_attach_poll_func(indio_dev->trig, 977 + indio_dev->pollfunc); 978 + if (ret) 979 + goto err_disable_buffers; 980 + } 981 + 976 982 return 0; 977 983 978 984 err_disable_buffers: ··· 1005 997 /* Wind down existing buffers - iff there are any */ 1006 998 if (list_empty(&indio_dev->buffer_list)) 1007 999 return 0; 1000 + 1001 + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 1002 + iio_trigger_detach_poll_func(indio_dev->trig, 1003 + indio_dev->pollfunc); 1004 + } 1008 1005 1009 1006 /* 1010 1007 * If things go wrong at some step in disable we still need to continue
+4 -18
drivers/iio/industrialio-trigger.c
··· 239 239 * the relevant function is in there may be the best option. 240 240 */ 241 241 /* Worth protecting against double additions? */ 242 - static int iio_trigger_attach_poll_func(struct iio_trigger *trig, 243 - struct iio_poll_func *pf) 242 + int iio_trigger_attach_poll_func(struct iio_trigger *trig, 243 + struct iio_poll_func *pf) 244 244 { 245 245 int ret = 0; 246 246 bool notinuse ··· 290 290 return ret; 291 291 } 292 292 293 - static int iio_trigger_detach_poll_func(struct iio_trigger *trig, 294 - struct iio_poll_func *pf) 293 + int iio_trigger_detach_poll_func(struct iio_trigger *trig, 294 + struct iio_poll_func *pf) 295 295 { 296 296 int ret = 0; 297 297 bool no_other_users ··· 705 705 if (indio_dev->trig) 706 706 iio_trigger_put(indio_dev->trig); 707 707 } 708 - 709 - int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) 710 - { 711 - return iio_trigger_attach_poll_func(indio_dev->trig, 712 - indio_dev->pollfunc); 713 - } 714 - EXPORT_SYMBOL(iio_triggered_buffer_postenable); 715 - 716 - int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) 717 - { 718 - return iio_trigger_detach_poll_func(indio_dev->trig, 719 - indio_dev->pollfunc); 720 - } 721 - EXPORT_SYMBOL(iio_triggered_buffer_predisable);
-10
drivers/iio/light/gp2ap020a00f.c
··· 1390 1390 1391 1391 mutex_lock(&data->lock); 1392 1392 1393 - err = iio_triggered_buffer_postenable(indio_dev); 1394 - if (err < 0) { 1395 - mutex_unlock(&data->lock); 1396 - return err; 1397 - } 1398 - 1399 1393 /* 1400 1394 * Enable triggers according to the scan_mask. Enabling either 1401 1395 * LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS ··· 1424 1430 err = -ENOMEM; 1425 1431 1426 1432 error_unlock: 1427 - if (err < 0) 1428 - iio_triggered_buffer_predisable(indio_dev); 1429 1433 mutex_unlock(&data->lock); 1430 1434 1431 1435 return err; ··· 1456 1464 1457 1465 if (err == 0) 1458 1466 kfree(data->buffer); 1459 - 1460 - iio_triggered_buffer_predisable(indio_dev); 1461 1467 1462 1468 mutex_unlock(&data->lock); 1463 1469
+2 -18
drivers/iio/light/isl29125.c
··· 216 216 static int isl29125_buffer_postenable(struct iio_dev *indio_dev) 217 217 { 218 218 struct isl29125_data *data = iio_priv(indio_dev); 219 - int err; 220 - 221 - err = iio_triggered_buffer_postenable(indio_dev); 222 - if (err) 223 - return err; 224 219 225 220 data->conf1 |= ISL29125_MODE_RGB; 226 - err = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, 221 + return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, 227 222 data->conf1); 228 - if (err) { 229 - iio_triggered_buffer_predisable(indio_dev); 230 - return err; 231 - } 232 - 233 - return 0; 234 223 } 235 224 236 225 static int isl29125_buffer_predisable(struct iio_dev *indio_dev) 237 226 { 238 227 struct isl29125_data *data = iio_priv(indio_dev); 239 - int ret; 240 228 241 229 data->conf1 &= ~ISL29125_MODE_MASK; 242 230 data->conf1 |= ISL29125_MODE_PD; 243 - ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, 231 + return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, 244 232 data->conf1); 245 - 246 - iio_triggered_buffer_predisable(indio_dev); 247 - 248 - return ret; 249 233 } 250 234 251 235 static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = {
-2
drivers/iio/light/rpr0521.c
··· 570 570 571 571 static const struct iio_buffer_setup_ops rpr0521_buffer_setup_ops = { 572 572 .preenable = rpr0521_buffer_preenable, 573 - .postenable = iio_triggered_buffer_postenable, 574 - .predisable = iio_triggered_buffer_predisable, 575 573 .postdisable = rpr0521_buffer_postdisable, 576 574 }; 577 575
-2
drivers/iio/light/si1145.c
··· 1171 1171 1172 1172 static const struct iio_buffer_setup_ops si1145_buffer_setup_ops = { 1173 1173 .preenable = si1145_buffer_preenable, 1174 - .postenable = iio_triggered_buffer_postenable, 1175 - .predisable = iio_triggered_buffer_predisable, 1176 1174 .validate_scan_mask = si1145_validate_scan_mask, 1177 1175 }; 1178 1176
-2
drivers/iio/light/st_uvis25_core.c
··· 227 227 228 228 static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = { 229 229 .preenable = st_uvis25_buffer_preenable, 230 - .postenable = iio_triggered_buffer_postenable, 231 - .predisable = iio_triggered_buffer_predisable, 232 230 .postdisable = st_uvis25_buffer_postdisable, 233 231 }; 234 232
+2 -18
drivers/iio/light/tcs3414.c
··· 243 243 static int tcs3414_buffer_postenable(struct iio_dev *indio_dev) 244 244 { 245 245 struct tcs3414_data *data = iio_priv(indio_dev); 246 - int ret; 247 - 248 - ret = iio_triggered_buffer_postenable(indio_dev); 249 - if (ret) 250 - return ret; 251 246 252 247 data->control |= TCS3414_CONTROL_ADC_EN; 253 - ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, 248 + return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, 254 249 data->control); 255 - if (ret) 256 - iio_triggered_buffer_predisable(indio_dev); 257 - 258 - return ret; 259 250 } 260 251 261 252 static int tcs3414_buffer_predisable(struct iio_dev *indio_dev) 262 253 { 263 254 struct tcs3414_data *data = iio_priv(indio_dev); 264 - int ret, ret2; 265 255 266 256 data->control &= ~TCS3414_CONTROL_ADC_EN; 267 - ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, 257 + return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, 268 258 data->control); 269 - 270 - ret2 = iio_triggered_buffer_predisable(indio_dev); 271 - if (!ret) 272 - ret = ret2; 273 - 274 - return ret; 275 259 } 276 260 277 261 static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = {
+7 -28
drivers/iio/light/vcnl4000.c
··· 957 957 int ret; 958 958 int cmd; 959 959 960 - ret = iio_triggered_buffer_postenable(indio_dev); 961 - if (ret) 962 - return ret; 963 - 964 960 /* Do not enable the buffer if we are already capturing events. */ 965 - if (vcnl4010_is_in_periodic_mode(data)) { 966 - ret = -EBUSY; 967 - goto end; 968 - } 961 + if (vcnl4010_is_in_periodic_mode(data)) 962 + return -EBUSY; 969 963 970 964 ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 971 965 VCNL4010_INT_PROX_EN); 972 966 if (ret < 0) 973 - goto end; 967 + return ret; 974 968 975 969 cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN; 976 - ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd); 977 - if (ret < 0) 978 - goto end; 979 - 980 - return 0; 981 - end: 982 - iio_triggered_buffer_predisable(indio_dev); 983 - 984 - return ret; 970 + return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd); 985 971 } 986 972 987 973 static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev) 988 974 { 989 975 struct vcnl4000_data *data = iio_priv(indio_dev); 990 - int ret, ret_disable; 976 + int ret; 991 977 992 978 ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0); 993 979 if (ret < 0) 994 - goto end; 980 + return ret; 995 981 996 - ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0); 997 - 998 - end: 999 - ret_disable = iio_triggered_buffer_predisable(indio_dev); 1000 - if (ret == 0) 1001 - ret = ret_disable; 1002 - 1003 - return ret; 982 + return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0); 1004 983 } 1005 984 1006 985 static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = {
-2
drivers/iio/magnetometer/bmc150_magn.c
··· 836 836 837 837 static const struct iio_buffer_setup_ops bmc150_magn_buffer_setup_ops = { 838 838 .preenable = bmc150_magn_buffer_preenable, 839 - .postenable = iio_triggered_buffer_postenable, 840 - .predisable = iio_triggered_buffer_predisable, 841 839 .postdisable = bmc150_magn_buffer_postdisable, 842 840 }; 843 841
-2
drivers/iio/magnetometer/rm3100-core.c
··· 463 463 464 464 static const struct iio_buffer_setup_ops rm3100_buffer_ops = { 465 465 .preenable = rm3100_buffer_preenable, 466 - .postenable = iio_triggered_buffer_postenable, 467 - .predisable = iio_triggered_buffer_predisable, 468 466 .postdisable = rm3100_buffer_postdisable, 469 467 }; 470 468
+2 -24
drivers/iio/magnetometer/st_magn_buffer.c
··· 31 31 32 32 static int st_magn_buffer_postenable(struct iio_dev *indio_dev) 33 33 { 34 - int err; 35 - 36 - err = iio_triggered_buffer_postenable(indio_dev); 37 - if (err < 0) 38 - return err; 39 - 40 - err = st_sensors_set_enable(indio_dev, true); 41 - if (err < 0) 42 - goto st_magn_buffer_predisable; 43 - 44 - return 0; 45 - 46 - st_magn_buffer_predisable: 47 - iio_triggered_buffer_predisable(indio_dev); 48 - return err; 34 + return st_sensors_set_enable(indio_dev, true); 49 35 } 50 36 51 37 static int st_magn_buffer_predisable(struct iio_dev *indio_dev) 52 38 { 53 - int err, err2; 54 - 55 - err = st_sensors_set_enable(indio_dev, false); 56 - 57 - err2 = iio_triggered_buffer_predisable(indio_dev); 58 - if (!err) 59 - err = err2; 60 - 61 - return err; 39 + return st_sensors_set_enable(indio_dev, false); 62 40 } 63 41 64 42 static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
+2 -11
drivers/iio/potentiostat/lmp91000.c
··· 278 278 static int lmp91000_buffer_postenable(struct iio_dev *indio_dev) 279 279 { 280 280 struct lmp91000_data *data = iio_priv(indio_dev); 281 - int err; 282 281 283 - err = iio_triggered_buffer_postenable(indio_dev); 284 - if (err) 285 - return err; 286 - 287 - err = iio_channel_start_all_cb(data->cb_buffer); 288 - if (err) 289 - iio_triggered_buffer_predisable(indio_dev); 290 - 291 - return err; 282 + return iio_channel_start_all_cb(data->cb_buffer); 292 283 } 293 284 294 285 static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) ··· 288 297 289 298 iio_channel_stop_all_cb(data->cb_buffer); 290 299 291 - return iio_triggered_buffer_predisable(indio_dev); 300 + return 0; 292 301 } 293 302 294 303 static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
+2 -24
drivers/iio/pressure/st_pressure_buffer.c
··· 31 31 32 32 static int st_press_buffer_postenable(struct iio_dev *indio_dev) 33 33 { 34 - int err; 35 - 36 - err = iio_triggered_buffer_postenable(indio_dev); 37 - if (err < 0) 38 - return err; 39 - 40 - err = st_sensors_set_enable(indio_dev, true); 41 - if (err < 0) 42 - goto st_press_buffer_predisable; 43 - 44 - return 0; 45 - 46 - st_press_buffer_predisable: 47 - iio_triggered_buffer_predisable(indio_dev); 48 - return err; 34 + return st_sensors_set_enable(indio_dev, true); 49 35 } 50 36 51 37 static int st_press_buffer_predisable(struct iio_dev *indio_dev) 52 38 { 53 - int err, err2; 54 - 55 - err = st_sensors_set_enable(indio_dev, false); 56 - 57 - err2 = iio_triggered_buffer_predisable(indio_dev); 58 - if (!err) 59 - err = err2; 60 - 61 - return err; 39 + return st_sensors_set_enable(indio_dev, false); 62 40 } 63 41 64 42 static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
+10 -17
drivers/iio/pressure/zpa2326.c
··· 1242 1242 const struct zpa2326_private *priv = iio_priv(indio_dev); 1243 1243 int err; 1244 1244 1245 - /* Plug our own trigger event handler. */ 1246 - err = iio_triggered_buffer_postenable(indio_dev); 1247 - if (err) 1248 - goto err; 1249 - 1250 1245 if (!priv->waken) { 1251 1246 /* 1252 1247 * We were already power supplied. Just clear hardware FIFO to 1253 1248 * get rid of samples acquired during previous rounds (if any). 1254 1249 */ 1255 1250 err = zpa2326_clear_fifo(indio_dev, 0); 1256 - if (err) 1257 - goto err_buffer_predisable; 1251 + if (err) { 1252 + zpa2326_err(indio_dev, 1253 + "failed to enable buffering (%d)", err); 1254 + return err; 1255 + } 1258 1256 } 1259 1257 1260 1258 if (!iio_trigger_using_own(indio_dev) && priv->waken) { ··· 1261 1263 * powered up: reconfigure one-shot mode. 1262 1264 */ 1263 1265 err = zpa2326_config_oneshot(indio_dev, priv->irq); 1264 - if (err) 1265 - goto err_buffer_predisable; 1266 + if (err) { 1267 + zpa2326_err(indio_dev, 1268 + "failed to enable buffering (%d)", err); 1269 + return err; 1270 + } 1266 1271 } 1267 1272 1268 1273 return 0; 1269 - 1270 - err_buffer_predisable: 1271 - iio_triggered_buffer_predisable(indio_dev); 1272 - err: 1273 - zpa2326_err(indio_dev, "failed to enable buffering (%d)", err); 1274 - 1275 - return err; 1276 1274 } 1277 1275 1278 1276 static int zpa2326_postdisable_buffer(struct iio_dev *indio_dev) ··· 1281 1287 static const struct iio_buffer_setup_ops zpa2326_buffer_setup_ops = { 1282 1288 .preenable = zpa2326_preenable_buffer, 1283 1289 .postenable = zpa2326_postenable_buffer, 1284 - .predisable = iio_triggered_buffer_predisable, 1285 1290 .postdisable = zpa2326_postdisable_buffer 1286 1291 }; 1287 1292
-2
drivers/iio/proximity/sx9310.c
··· 736 736 737 737 static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = { 738 738 .preenable = sx9310_buffer_preenable, 739 - .postenable = iio_triggered_buffer_postenable, 740 - .predisable = iio_triggered_buffer_predisable, 741 739 .postdisable = sx9310_buffer_postdisable, 742 740 }; 743 741
-9
drivers/iio/proximity/sx9500.c
··· 680 680 struct sx9500_data *data = iio_priv(indio_dev); 681 681 int ret = 0, i; 682 682 683 - ret = iio_triggered_buffer_postenable(indio_dev); 684 - if (ret) 685 - return ret; 686 - 687 683 mutex_lock(&data->mutex); 688 684 689 685 for (i = 0; i < SX9500_NUM_CHANNELS; i++) ··· 695 699 sx9500_dec_chan_users(data, i); 696 700 697 701 mutex_unlock(&data->mutex); 698 - 699 - if (ret) 700 - iio_triggered_buffer_predisable(indio_dev); 701 702 702 703 return ret; 703 704 } ··· 719 726 sx9500_inc_chan_users(data, i); 720 727 721 728 mutex_unlock(&data->mutex); 722 - 723 - iio_triggered_buffer_predisable(indio_dev); 724 729 725 730 return ret; 726 731 }
-7
include/linux/iio/trigger_consumer.h
··· 50 50 51 51 void iio_trigger_notify_done(struct iio_trigger *trig); 52 52 53 - /* 54 - * Two functions for common case where all that happens is a pollfunc 55 - * is attached and detached from a trigger 56 - */ 57 - int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); 58 - int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); 59 - 60 53 #endif