Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iio: accel: adxl372: Add support for FIFO peak mode

By default, if all three channels (x, y, z) are enabled, sample sets of
concurrent 3-axis data is stored in the FIFO. This patch adds the option
to configure the FIFO to store peak acceleration (x, y and z) of every
over-threshold event. When pushing to iio buffer we push only enabled
axis data.

Currently the driver configures adxl372 to work in loop mode.
The inactivity and activity timings decide how fast the chip
will loop through the awake and waiting states and the
thresholds on x,y,z axis decide when activity or inactivity
will be detected.

This patch adds standard events sysfs entries for the inactivity
and activity timings: thresh_falling_period/thresh_rising_period
and for the in_accel_x_thresh_falling/rising_value.

Signed-off-by: Stefan Popa <stefan.popa@analog.com>
Signed-off-by: Alexandru Tachici <alexandru.tachici@analog.com>
Link: https://lore.kernel.org/r/20200810093257.65929-2-alexandru.tachici@analog.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

authored by

Stefan Popa and committed by
Jonathan Cameron
b0fc6783 50677d28

+299 -12
+299 -12
drivers/iio/accel/adxl372.c
··· 5 5 * Copyright 2018 Analog Devices Inc. 6 6 */ 7 7 8 + #include <linux/bitfield.h> 8 9 #include <linux/bitops.h> 9 10 #include <linux/interrupt.h> 10 11 #include <linux/irq.h> ··· 114 113 #define ADXL372_STATUS_1_AWAKE(x) (((x) >> 6) & 0x1) 115 114 #define ADXL372_STATUS_1_ERR_USR_REGS(x) (((x) >> 7) & 0x1) 116 115 116 + /* ADXL372_STATUS_2 */ 117 + #define ADXL372_STATUS_2_INACT(x) (((x) >> 4) & 0x1) 118 + #define ADXL372_STATUS_2_ACT(x) (((x) >> 5) & 0x1) 119 + #define ADXL372_STATUS_2_AC2(x) (((x) >> 6) & 0x1) 120 + 117 121 /* ADXL372_INT1_MAP */ 118 122 #define ADXL372_INT1_MAP_DATA_RDY_MSK BIT(0) 119 123 #define ADXL372_INT1_MAP_DATA_RDY_MODE(x) (((x) & 0x1) << 0) ··· 137 131 #define ADXL372_INT1_MAP_LOW_MSK BIT(7) 138 132 #define ADXL372_INT1_MAP_LOW_MODE(x) (((x) & 0x1) << 7) 139 133 134 + /* ADX372_THRESH */ 135 + #define ADXL372_THRESH_VAL_H_MSK GENMASK(10, 3) 136 + #define ADXL372_THRESH_VAL_H_SEL(x) FIELD_GET(ADXL372_THRESH_VAL_H_MSK, x) 137 + #define ADXL372_THRESH_VAL_L_MSK GENMASK(2, 0) 138 + #define ADXL372_THRESH_VAL_L_SEL(x) FIELD_GET(ADXL372_THRESH_VAL_L_MSK, x) 139 + 140 140 /* The ADXL372 includes a deep, 512 sample FIFO buffer */ 141 141 #define ADXL372_FIFO_SIZE 512 142 + #define ADXL372_X_AXIS_EN(x) ((x) & BIT(0)) 143 + #define ADXL372_Y_AXIS_EN(x) ((x) & BIT(1)) 144 + #define ADXL372_Z_AXIS_EN(x) ((x) & BIT(2)) 142 145 143 146 /* 144 147 * At +/- 200g with 12-bit resolution, scale is computed as: ··· 237 222 { BIT(0) | BIT(1) | BIT(2), ADXL372_XYZ_FIFO }, 238 223 }; 239 224 225 + static const struct iio_event_spec adxl372_events[] = { 226 + { 227 + .type = IIO_EV_TYPE_THRESH, 228 + .dir = IIO_EV_DIR_RISING, 229 + .mask_separate = BIT(IIO_EV_INFO_VALUE), 230 + .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD) | BIT(IIO_EV_INFO_ENABLE), 231 + }, { 232 + .type = IIO_EV_TYPE_THRESH, 233 + .dir = IIO_EV_DIR_FALLING, 234 + .mask_separate = BIT(IIO_EV_INFO_VALUE), 235 + .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD) | BIT(IIO_EV_INFO_ENABLE), 236 + }, 237 + }; 238 + 240 239 #define ADXL372_ACCEL_CHANNEL(index, reg, axis) { \ 241 240 .type = IIO_ACCEL, \ 242 241 .address = reg, \ ··· 268 239 .shift = 4, \ 269 240 .endianness = IIO_BE, \ 270 241 }, \ 242 + .event_spec = adxl372_events, \ 243 + .num_event_specs = ARRAY_SIZE(adxl372_events) \ 271 244 } 272 245 273 246 static const struct iio_chan_spec adxl372_channels[] = { ··· 283 252 struct device *dev; 284 253 struct regmap *regmap; 285 254 struct iio_trigger *dready_trig; 255 + struct iio_trigger *peak_datardy_trig; 286 256 enum adxl372_fifo_mode fifo_mode; 287 257 enum adxl372_fifo_format fifo_format; 258 + unsigned int fifo_axis_mask; 288 259 enum adxl372_op_mode op_mode; 289 260 enum adxl372_act_proc_mode act_proc_mode; 290 261 enum adxl372_odr odr; ··· 294 261 u32 act_time_ms; 295 262 u32 inact_time_ms; 296 263 u8 fifo_set_size; 297 - u8 int1_bitmask; 298 - u8 int2_bitmask; 264 + unsigned long int1_bitmask; 265 + unsigned long int2_bitmask; 299 266 u16 watermark; 300 267 __be16 fifo_buf[ADXL372_FIFO_SIZE]; 268 + bool peak_fifo_mode_en; 269 + struct mutex threshold_m; /* lock for threshold */ 301 270 }; 302 271 303 272 static const unsigned long adxl372_channel_masks[] = { ··· 310 275 BIT(0) | BIT(1) | BIT(2), 311 276 0 312 277 }; 278 + 279 + static ssize_t adxl372_read_threshold_value(struct iio_dev *indio_dev, unsigned int addr, 280 + u16 *threshold) 281 + { 282 + struct adxl372_state *st = iio_priv(indio_dev); 283 + __be16 raw_regval; 284 + u16 regval; 285 + int ret; 286 + 287 + ret = regmap_bulk_read(st->regmap, addr, &raw_regval, sizeof(raw_regval)); 288 + if (ret < 0) 289 + return ret; 290 + 291 + regval = be16_to_cpu(raw_regval); 292 + regval >>= 5; 293 + 294 + *threshold = regval; 295 + 296 + return 0; 297 + } 298 + 299 + static ssize_t adxl372_write_threshold_value(struct iio_dev *indio_dev, unsigned int addr, 300 + u16 threshold) 301 + { 302 + struct adxl372_state *st = iio_priv(indio_dev); 303 + int ret; 304 + 305 + mutex_lock(&st->threshold_m); 306 + ret = regmap_write(st->regmap, addr, ADXL372_THRESH_VAL_H_SEL(threshold)); 307 + if (ret < 0) 308 + goto unlock; 309 + 310 + ret = regmap_update_bits(st->regmap, addr + 1, GENMASK(7, 5), 311 + ADXL372_THRESH_VAL_L_SEL(threshold) << 5); 312 + 313 + unlock: 314 + mutex_unlock(&st->threshold_m); 315 + 316 + return ret; 317 + } 313 318 314 319 static int adxl372_read_axis(struct adxl372_state *st, u8 addr) 315 320 { ··· 528 453 } 529 454 530 455 static int adxl372_set_interrupts(struct adxl372_state *st, 531 - unsigned char int1_bitmask, 532 - unsigned char int2_bitmask) 456 + unsigned long int1_bitmask, 457 + unsigned long int2_bitmask) 533 458 { 534 459 int ret; 535 460 ··· 598 523 return ret; 599 524 } 600 525 526 + static void adxl372_arrange_axis_data(struct adxl372_state *st, __be16 *sample) 527 + { 528 + __be16 axis_sample[3]; 529 + int i = 0; 530 + 531 + memset(axis_sample, 0, 3 * sizeof(__be16)); 532 + if (ADXL372_X_AXIS_EN(st->fifo_axis_mask)) 533 + axis_sample[i++] = sample[0]; 534 + if (ADXL372_Y_AXIS_EN(st->fifo_axis_mask)) 535 + axis_sample[i++] = sample[1]; 536 + if (ADXL372_Z_AXIS_EN(st->fifo_axis_mask)) 537 + axis_sample[i++] = sample[2]; 538 + 539 + memcpy(sample, axis_sample, 3 * sizeof(__be16)); 540 + } 541 + 542 + static void adxl372_push_event(struct iio_dev *indio_dev, s64 timestamp, u8 status2) 543 + { 544 + unsigned int ev_dir = IIO_EV_DIR_NONE; 545 + 546 + if (ADXL372_STATUS_2_ACT(status2)) 547 + ev_dir = IIO_EV_DIR_RISING; 548 + 549 + if (ADXL372_STATUS_2_INACT(status2)) 550 + ev_dir = IIO_EV_DIR_FALLING; 551 + 552 + if (ev_dir != IIO_EV_DIR_NONE) 553 + iio_push_event(indio_dev, 554 + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z, 555 + IIO_EV_TYPE_THRESH, ev_dir), 556 + timestamp); 557 + } 558 + 601 559 static irqreturn_t adxl372_trigger_handler(int irq, void *p) 602 560 { 603 561 struct iio_poll_func *pf = p; ··· 643 535 ret = adxl372_get_status(st, &status1, &status2, &fifo_entries); 644 536 if (ret < 0) 645 537 goto err; 538 + 539 + adxl372_push_event(indio_dev, iio_get_time_ns(indio_dev), status2); 646 540 647 541 if (st->fifo_mode != ADXL372_FIFO_BYPASSED && 648 542 ADXL372_STATUS_1_FIFO_FULL(status1)) { ··· 664 554 goto err; 665 555 666 556 /* Each sample is 2 bytes */ 667 - for (i = 0; i < fifo_entries; i += st->fifo_set_size) 557 + for (i = 0; i < fifo_entries; i += st->fifo_set_size) { 558 + /* filter peak detection data */ 559 + if (st->peak_fifo_mode_en) 560 + adxl372_arrange_axis_data(st, &st->fifo_buf[i]); 668 561 iio_push_to_buffers(indio_dev, &st->fifo_buf[i]); 562 + } 669 563 } 670 564 err: 671 565 iio_trigger_notify_done(indio_dev->trig); ··· 837 723 } 838 724 } 839 725 726 + static int adxl372_read_event_value(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, 727 + enum iio_event_type type, enum iio_event_direction dir, 728 + enum iio_event_info info, int *val, int *val2) 729 + { 730 + struct adxl372_state *st = iio_priv(indio_dev); 731 + unsigned int addr; 732 + u16 raw_value; 733 + int ret; 734 + 735 + switch (info) { 736 + case IIO_EV_INFO_VALUE: 737 + switch (dir) { 738 + case IIO_EV_DIR_RISING: 739 + addr = ADXL372_X_THRESH_ACT_H + 2 * chan->scan_index; 740 + ret = adxl372_read_threshold_value(indio_dev, addr, &raw_value); 741 + if (ret < 0) 742 + return ret; 743 + *val = raw_value * ADXL372_USCALE; 744 + *val2 = 1000000; 745 + return IIO_VAL_FRACTIONAL; 746 + case IIO_EV_DIR_FALLING: 747 + addr = ADXL372_X_THRESH_INACT_H + 2 * chan->scan_index; 748 + ret = adxl372_read_threshold_value(indio_dev, addr, &raw_value); 749 + if (ret < 0) 750 + return ret; 751 + *val = raw_value * ADXL372_USCALE; 752 + *val2 = 1000000; 753 + return IIO_VAL_FRACTIONAL; 754 + default: 755 + return -EINVAL; 756 + } 757 + case IIO_EV_INFO_PERIOD: 758 + switch (dir) { 759 + case IIO_EV_DIR_RISING: 760 + *val = st->act_time_ms; 761 + *val2 = 1000; 762 + return IIO_VAL_FRACTIONAL; 763 + case IIO_EV_DIR_FALLING: 764 + *val = st->inact_time_ms; 765 + *val2 = 1000; 766 + return IIO_VAL_FRACTIONAL; 767 + default: 768 + return -EINVAL; 769 + } 770 + default: 771 + return -EINVAL; 772 + } 773 + } 774 + 775 + static int adxl372_write_event_value(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, 776 + enum iio_event_type type, enum iio_event_direction dir, 777 + enum iio_event_info info, int val, int val2) 778 + { 779 + struct adxl372_state *st = iio_priv(indio_dev); 780 + unsigned int val_ms; 781 + unsigned int addr; 782 + u16 raw_val; 783 + 784 + switch (info) { 785 + case IIO_EV_INFO_VALUE: 786 + raw_val = DIV_ROUND_UP(val * 1000000, ADXL372_USCALE); 787 + switch (dir) { 788 + case IIO_EV_DIR_RISING: 789 + addr = ADXL372_X_THRESH_ACT_H + 2 * chan->scan_index; 790 + return adxl372_write_threshold_value(indio_dev, addr, raw_val); 791 + case IIO_EV_DIR_FALLING: 792 + addr = ADXL372_X_THRESH_INACT_H + 2 * chan->scan_index; 793 + return adxl372_write_threshold_value(indio_dev, addr, raw_val); 794 + default: 795 + return -EINVAL; 796 + } 797 + case IIO_EV_INFO_PERIOD: 798 + val_ms = val * 1000 + DIV_ROUND_UP(val2, 1000); 799 + switch (dir) { 800 + case IIO_EV_DIR_RISING: 801 + return adxl372_set_activity_time_ms(st, val_ms); 802 + case IIO_EV_DIR_FALLING: 803 + return adxl372_set_inactivity_time_ms(st, val_ms); 804 + default: 805 + return -EINVAL; 806 + } 807 + default: 808 + return -EINVAL; 809 + } 810 + } 811 + 812 + static int adxl372_read_event_config(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, 813 + enum iio_event_type type, enum iio_event_direction dir) 814 + { 815 + struct adxl372_state *st = iio_priv(indio_dev); 816 + 817 + switch (dir) { 818 + case IIO_EV_DIR_RISING: 819 + return FIELD_GET(ADXL372_INT1_MAP_ACT_MSK, st->int1_bitmask); 820 + case IIO_EV_DIR_FALLING: 821 + return FIELD_GET(ADXL372_INT1_MAP_INACT_MSK, st->int1_bitmask); 822 + default: 823 + return -EINVAL; 824 + } 825 + } 826 + 827 + static int adxl372_write_event_config(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, 828 + enum iio_event_type type, enum iio_event_direction dir, 829 + int state) 830 + { 831 + struct adxl372_state *st = iio_priv(indio_dev); 832 + 833 + switch (dir) { 834 + case IIO_EV_DIR_RISING: 835 + set_mask_bits(&st->int1_bitmask, ADXL372_INT1_MAP_ACT_MSK, 836 + ADXL372_INT1_MAP_ACT_MODE(state)); 837 + break; 838 + case IIO_EV_DIR_FALLING: 839 + set_mask_bits(&st->int1_bitmask, ADXL372_INT1_MAP_INACT_MSK, 840 + ADXL372_INT1_MAP_INACT_MODE(state)); 841 + break; 842 + default: 843 + return -EINVAL; 844 + } 845 + 846 + return adxl372_set_interrupts(st, st->int1_bitmask, 0); 847 + } 848 + 840 849 static ssize_t adxl372_show_filter_freq_avail(struct device *dev, 841 850 struct device_attribute *attr, 842 851 char *buf) ··· 1032 795 unsigned int mask; 1033 796 int i, ret; 1034 797 1035 - ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0); 798 + st->int1_bitmask |= ADXL372_INT1_MAP_FIFO_FULL_MSK; 799 + ret = adxl372_set_interrupts(st, st->int1_bitmask, 0); 1036 800 if (ret < 0) 1037 801 return ret; 1038 802 ··· 1048 810 return -EINVAL; 1049 811 1050 812 st->fifo_format = adxl372_axis_lookup_table[i].fifo_format; 813 + st->fifo_axis_mask = adxl372_axis_lookup_table[i].bits; 1051 814 st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, 1052 815 indio_dev->masklength); 816 + 817 + /* Configure the FIFO to store sets of impact event peak. */ 818 + if (st->peak_fifo_mode_en) { 819 + st->fifo_set_size = 3; 820 + st->fifo_format = ADXL372_XYZ_PEAK_FIFO; 821 + } 822 + 1053 823 /* 1054 824 * The 512 FIFO samples can be allotted in several ways, such as: 1055 825 * 170 sample sets of concurrent 3-axis data 1056 826 * 256 sample sets of concurrent 2-axis data (user selectable) 1057 827 * 512 sample sets of single-axis data 828 + * 170 sets of impact event peak (x, y, z) 1058 829 */ 1059 830 if ((st->watermark * st->fifo_set_size) > ADXL372_FIFO_SIZE) 1060 831 st->watermark = (ADXL372_FIFO_SIZE / st->fifo_set_size); ··· 1073 826 ret = adxl372_configure_fifo(st); 1074 827 if (ret < 0) { 1075 828 st->fifo_mode = ADXL372_FIFO_BYPASSED; 1076 - adxl372_set_interrupts(st, 0, 0); 829 + st->int1_bitmask &= ~ADXL372_INT1_MAP_FIFO_FULL_MSK; 830 + adxl372_set_interrupts(st, st->int1_bitmask, 0); 1077 831 return ret; 1078 832 } 1079 833 ··· 1085 837 { 1086 838 struct adxl372_state *st = iio_priv(indio_dev); 1087 839 1088 - adxl372_set_interrupts(st, 0, 0); 840 + st->int1_bitmask &= ~ADXL372_INT1_MAP_FIFO_FULL_MSK; 841 + adxl372_set_interrupts(st, st->int1_bitmask, 0); 1089 842 st->fifo_mode = ADXL372_FIFO_BYPASSED; 1090 843 adxl372_configure_fifo(st); 1091 844 ··· 1103 854 { 1104 855 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); 1105 856 struct adxl372_state *st = iio_priv(indio_dev); 1106 - unsigned long int mask = 0; 1107 857 1108 858 if (state) 1109 - mask = ADXL372_INT1_MAP_FIFO_FULL_MSK; 859 + st->int1_bitmask |= ADXL372_INT1_MAP_FIFO_FULL_MSK; 1110 860 1111 - return adxl372_set_interrupts(st, mask, 0); 861 + return adxl372_set_interrupts(st, st->int1_bitmask, 0); 1112 862 } 1113 863 1114 864 static int adxl372_validate_trigger(struct iio_dev *indio_dev, ··· 1115 867 { 1116 868 struct adxl372_state *st = iio_priv(indio_dev); 1117 869 1118 - if (st->dready_trig != trig) 870 + if (st->dready_trig != trig && st->peak_datardy_trig != trig) 1119 871 return -EINVAL; 1120 872 1121 873 return 0; ··· 1124 876 static const struct iio_trigger_ops adxl372_trigger_ops = { 1125 877 .validate_device = &iio_trigger_validate_own_device, 1126 878 .set_trigger_state = adxl372_dready_trig_set_state, 879 + }; 880 + 881 + static int adxl372_peak_dready_trig_set_state(struct iio_trigger *trig, 882 + bool state) 883 + { 884 + struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); 885 + struct adxl372_state *st = iio_priv(indio_dev); 886 + 887 + if (state) 888 + st->int1_bitmask |= ADXL372_INT1_MAP_FIFO_FULL_MSK; 889 + 890 + st->peak_fifo_mode_en = state; 891 + 892 + return adxl372_set_interrupts(st, st->int1_bitmask, 0); 893 + } 894 + 895 + static const struct iio_trigger_ops adxl372_peak_data_trigger_ops = { 896 + .validate_device = &iio_trigger_validate_own_device, 897 + .set_trigger_state = adxl372_peak_dready_trig_set_state, 1127 898 }; 1128 899 1129 900 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("400 800 1600 3200 6400"); ··· 1164 897 .attrs = &adxl372_attrs_group, 1165 898 .read_raw = adxl372_read_raw, 1166 899 .write_raw = adxl372_write_raw, 900 + .read_event_config = adxl372_read_event_config, 901 + .write_event_config = adxl372_write_event_config, 902 + .read_event_value = adxl372_read_event_value, 903 + .write_event_value = adxl372_write_event_value, 1167 904 .debugfs_reg_access = &adxl372_reg_access, 1168 905 .hwfifo_set_watermark = adxl372_set_watermark, 1169 906 }; ··· 1195 924 st->dev = dev; 1196 925 st->regmap = regmap; 1197 926 st->irq = irq; 927 + 928 + mutex_init(&st->threshold_m); 1198 929 1199 930 indio_dev->channels = adxl372_channels; 1200 931 indio_dev->num_channels = ARRAY_SIZE(adxl372_channels); ··· 1228 955 if (st->dready_trig == NULL) 1229 956 return -ENOMEM; 1230 957 958 + st->peak_datardy_trig = devm_iio_trigger_alloc(dev, 959 + "%s-dev%d-peak", 960 + indio_dev->name, 961 + indio_dev->id); 962 + if (!st->peak_datardy_trig) 963 + return -ENOMEM; 964 + 1231 965 st->dready_trig->ops = &adxl372_trigger_ops; 966 + st->peak_datardy_trig->ops = &adxl372_peak_data_trigger_ops; 1232 967 st->dready_trig->dev.parent = dev; 968 + st->peak_datardy_trig->dev.parent = dev; 1233 969 iio_trigger_set_drvdata(st->dready_trig, indio_dev); 970 + iio_trigger_set_drvdata(st->peak_datardy_trig, indio_dev); 1234 971 ret = devm_iio_trigger_register(dev, st->dready_trig); 972 + if (ret < 0) 973 + return ret; 974 + 975 + ret = devm_iio_trigger_register(dev, st->peak_datardy_trig); 1235 976 if (ret < 0) 1236 977 return ret; 1237 978