Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* The industrial I/O core, trigger handling functions
3 *
4 * Copyright (c) 2008 Jonathan Cameron
5 */
6
7#include <linux/kernel.h>
8#include <linux/idr.h>
9#include <linux/err.h>
10#include <linux/device.h>
11#include <linux/interrupt.h>
12#include <linux/list.h>
13#include <linux/slab.h>
14
15#include <linux/iio/iio.h>
16#include <linux/iio/iio-opaque.h>
17#include <linux/iio/trigger.h>
18#include "iio_core.h"
19#include "iio_core_trigger.h"
20#include <linux/iio/trigger_consumer.h>
21
22/* RFC - Question of approach
23 * Make the common case (single sensor single trigger)
24 * simple by starting trigger capture from when first sensors
25 * is added.
26 *
27 * Complex simultaneous start requires use of 'hold' functionality
28 * of the trigger. (not implemented)
29 *
30 * Any other suggestions?
31 */
32
33static DEFINE_IDA(iio_trigger_ida);
34
35/* Single list of all available triggers */
36static LIST_HEAD(iio_trigger_list);
37static DEFINE_MUTEX(iio_trigger_list_lock);
38
39/**
40 * name_show() - retrieve useful identifying name
41 * @dev: device associated with the iio_trigger
42 * @attr: pointer to the device_attribute structure that is
43 * being processed
44 * @buf: buffer to print the name into
45 *
46 * Return: a negative number on failure or the number of written
47 * characters on success.
48 */
49static ssize_t name_show(struct device *dev, struct device_attribute *attr,
50 char *buf)
51{
52 struct iio_trigger *trig = to_iio_trigger(dev);
53 return sysfs_emit(buf, "%s\n", trig->name);
54}
55
56static DEVICE_ATTR_RO(name);
57
58static struct attribute *iio_trig_dev_attrs[] = {
59 &dev_attr_name.attr,
60 NULL,
61};
62ATTRIBUTE_GROUPS(iio_trig_dev);
63
64static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
65
66int iio_trigger_register(struct iio_trigger *trig_info)
67{
68 int ret;
69
70 trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
71 if (trig_info->id < 0)
72 return trig_info->id;
73
74 /* Set the name used for the sysfs directory etc */
75 dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
76
77 ret = device_add(&trig_info->dev);
78 if (ret)
79 goto error_unregister_id;
80
81 /* Add to list of available triggers held by the IIO core */
82 mutex_lock(&iio_trigger_list_lock);
83 if (__iio_trigger_find_by_name(trig_info->name)) {
84 pr_err("Duplicate trigger name '%s'\n", trig_info->name);
85 ret = -EEXIST;
86 goto error_device_del;
87 }
88 list_add_tail(&trig_info->list, &iio_trigger_list);
89 mutex_unlock(&iio_trigger_list_lock);
90
91 return 0;
92
93error_device_del:
94 mutex_unlock(&iio_trigger_list_lock);
95 device_del(&trig_info->dev);
96error_unregister_id:
97 ida_free(&iio_trigger_ida, trig_info->id);
98 return ret;
99}
100EXPORT_SYMBOL(iio_trigger_register);
101
102void iio_trigger_unregister(struct iio_trigger *trig_info)
103{
104 mutex_lock(&iio_trigger_list_lock);
105 list_del(&trig_info->list);
106 mutex_unlock(&iio_trigger_list_lock);
107
108 ida_free(&iio_trigger_ida, trig_info->id);
109 /* Possible issue in here */
110 device_del(&trig_info->dev);
111}
112EXPORT_SYMBOL(iio_trigger_unregister);
113
114int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
115{
116 struct iio_dev_opaque *iio_dev_opaque;
117
118 if (!indio_dev || !trig)
119 return -EINVAL;
120
121 iio_dev_opaque = to_iio_dev_opaque(indio_dev);
122 mutex_lock(&indio_dev->mlock);
123 WARN_ON(iio_dev_opaque->trig_readonly);
124
125 indio_dev->trig = iio_trigger_get(trig);
126 iio_dev_opaque->trig_readonly = true;
127 mutex_unlock(&indio_dev->mlock);
128
129 return 0;
130}
131EXPORT_SYMBOL(iio_trigger_set_immutable);
132
133/* Search for trigger by name, assuming iio_trigger_list_lock held */
134static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
135{
136 struct iio_trigger *iter;
137
138 list_for_each_entry(iter, &iio_trigger_list, list)
139 if (!strcmp(iter->name, name))
140 return iter;
141
142 return NULL;
143}
144
145static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
146{
147 struct iio_trigger *trig = NULL, *iter;
148
149 mutex_lock(&iio_trigger_list_lock);
150 list_for_each_entry(iter, &iio_trigger_list, list)
151 if (sysfs_streq(iter->name, name)) {
152 trig = iter;
153 iio_trigger_get(trig);
154 break;
155 }
156 mutex_unlock(&iio_trigger_list_lock);
157
158 return trig;
159}
160
161static void iio_reenable_work_fn(struct work_struct *work)
162{
163 struct iio_trigger *trig = container_of(work, struct iio_trigger,
164 reenable_work);
165
166 /*
167 * This 'might' occur after the trigger state is set to disabled -
168 * in that case the driver should skip reenabling.
169 */
170 trig->ops->reenable(trig);
171}
172
173/*
174 * In general, reenable callbacks may need to sleep and this path is
175 * not performance sensitive, so just queue up a work item
176 * to reneable the trigger for us.
177 *
178 * Races that can cause this.
179 * 1) A handler occurs entirely in interrupt context so the counter
180 * the final decrement is still in this interrupt.
181 * 2) The trigger has been removed, but one last interrupt gets through.
182 *
183 * For (1) we must call reenable, but not in atomic context.
184 * For (2) it should be safe to call reenanble, if drivers never blindly
185 * reenable after state is off.
186 */
187static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
188{
189 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
190 trig->ops->reenable)
191 schedule_work(&trig->reenable_work);
192}
193
194void iio_trigger_poll(struct iio_trigger *trig)
195{
196 int i;
197
198 if (!atomic_read(&trig->use_count)) {
199 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
200
201 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
202 if (trig->subirqs[i].enabled)
203 generic_handle_irq(trig->subirq_base + i);
204 else
205 iio_trigger_notify_done_atomic(trig);
206 }
207 }
208}
209EXPORT_SYMBOL(iio_trigger_poll);
210
211irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
212{
213 iio_trigger_poll(private);
214 return IRQ_HANDLED;
215}
216EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
217
218void iio_trigger_poll_chained(struct iio_trigger *trig)
219{
220 int i;
221
222 if (!atomic_read(&trig->use_count)) {
223 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
224
225 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
226 if (trig->subirqs[i].enabled)
227 handle_nested_irq(trig->subirq_base + i);
228 else
229 iio_trigger_notify_done(trig);
230 }
231 }
232}
233EXPORT_SYMBOL(iio_trigger_poll_chained);
234
235void iio_trigger_notify_done(struct iio_trigger *trig)
236{
237 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
238 trig->ops->reenable)
239 trig->ops->reenable(trig);
240}
241EXPORT_SYMBOL(iio_trigger_notify_done);
242
243/* Trigger Consumer related functions */
244static int iio_trigger_get_irq(struct iio_trigger *trig)
245{
246 int ret;
247
248 mutex_lock(&trig->pool_lock);
249 ret = bitmap_find_free_region(trig->pool,
250 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
251 ilog2(1));
252 mutex_unlock(&trig->pool_lock);
253 if (ret >= 0)
254 ret += trig->subirq_base;
255
256 return ret;
257}
258
259static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
260{
261 mutex_lock(&trig->pool_lock);
262 clear_bit(irq - trig->subirq_base, trig->pool);
263 mutex_unlock(&trig->pool_lock);
264}
265
266/* Complexity in here. With certain triggers (datardy) an acknowledgement
267 * may be needed if the pollfuncs do not include the data read for the
268 * triggering device.
269 * This is not currently handled. Alternative of not enabling trigger unless
270 * the relevant function is in there may be the best option.
271 */
272/* Worth protecting against double additions? */
273int iio_trigger_attach_poll_func(struct iio_trigger *trig,
274 struct iio_poll_func *pf)
275{
276 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
277 bool notinuse =
278 bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
279 int ret = 0;
280
281 /* Prevent the module from being removed whilst attached to a trigger */
282 __module_get(iio_dev_opaque->driver_module);
283
284 /* Get irq number */
285 pf->irq = iio_trigger_get_irq(trig);
286 if (pf->irq < 0) {
287 pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
288 trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
289 goto out_put_module;
290 }
291
292 /* Request irq */
293 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
294 pf->type, pf->name,
295 pf);
296 if (ret < 0)
297 goto out_put_irq;
298
299 /* Enable trigger in driver */
300 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
301 ret = trig->ops->set_trigger_state(trig, true);
302 if (ret < 0)
303 goto out_free_irq;
304 }
305
306 /*
307 * Check if we just registered to our own trigger: we determine that
308 * this is the case if the IIO device and the trigger device share the
309 * same parent device.
310 */
311 if (pf->indio_dev->dev.parent == trig->dev.parent)
312 trig->attached_own_device = true;
313
314 return ret;
315
316out_free_irq:
317 free_irq(pf->irq, pf);
318out_put_irq:
319 iio_trigger_put_irq(trig, pf->irq);
320out_put_module:
321 module_put(iio_dev_opaque->driver_module);
322 return ret;
323}
324
325int iio_trigger_detach_poll_func(struct iio_trigger *trig,
326 struct iio_poll_func *pf)
327{
328 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
329 bool no_other_users =
330 bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
331 int ret = 0;
332
333 if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
334 ret = trig->ops->set_trigger_state(trig, false);
335 if (ret)
336 return ret;
337 }
338 if (pf->indio_dev->dev.parent == trig->dev.parent)
339 trig->attached_own_device = false;
340 iio_trigger_put_irq(trig, pf->irq);
341 free_irq(pf->irq, pf);
342 module_put(iio_dev_opaque->driver_module);
343
344 return ret;
345}
346
347irqreturn_t iio_pollfunc_store_time(int irq, void *p)
348{
349 struct iio_poll_func *pf = p;
350
351 pf->timestamp = iio_get_time_ns(pf->indio_dev);
352 return IRQ_WAKE_THREAD;
353}
354EXPORT_SYMBOL(iio_pollfunc_store_time);
355
356struct iio_poll_func
357*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
358 irqreturn_t (*thread)(int irq, void *p),
359 int type,
360 struct iio_dev *indio_dev,
361 const char *fmt,
362 ...)
363{
364 va_list vargs;
365 struct iio_poll_func *pf;
366
367 pf = kmalloc(sizeof(*pf), GFP_KERNEL);
368 if (!pf)
369 return NULL;
370 va_start(vargs, fmt);
371 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
372 va_end(vargs);
373 if (pf->name == NULL) {
374 kfree(pf);
375 return NULL;
376 }
377 pf->h = h;
378 pf->thread = thread;
379 pf->type = type;
380 pf->indio_dev = indio_dev;
381
382 return pf;
383}
384EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
385
386void iio_dealloc_pollfunc(struct iio_poll_func *pf)
387{
388 kfree(pf->name);
389 kfree(pf);
390}
391EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
392
393/**
394 * current_trigger_show() - trigger consumer sysfs query current trigger
395 * @dev: device associated with an industrial I/O device
396 * @attr: pointer to the device_attribute structure that
397 * is being processed
398 * @buf: buffer where the current trigger name will be printed into
399 *
400 * For trigger consumers the current_trigger interface allows the trigger
401 * used by the device to be queried.
402 *
403 * Return: a negative number on failure, the number of characters written
404 * on success or 0 if no trigger is available
405 */
406static ssize_t current_trigger_show(struct device *dev,
407 struct device_attribute *attr, char *buf)
408{
409 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
410
411 if (indio_dev->trig)
412 return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
413 return 0;
414}
415
416/**
417 * current_trigger_store() - trigger consumer sysfs set current trigger
418 * @dev: device associated with an industrial I/O device
419 * @attr: device attribute that is being processed
420 * @buf: string buffer that holds the name of the trigger
421 * @len: length of the trigger name held by buf
422 *
423 * For trigger consumers the current_trigger interface allows the trigger
424 * used for this device to be specified at run time based on the trigger's
425 * name.
426 *
427 * Return: negative error code on failure or length of the buffer
428 * on success
429 */
430static ssize_t current_trigger_store(struct device *dev,
431 struct device_attribute *attr,
432 const char *buf, size_t len)
433{
434 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
435 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
436 struct iio_trigger *oldtrig = indio_dev->trig;
437 struct iio_trigger *trig;
438 int ret;
439
440 mutex_lock(&indio_dev->mlock);
441 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
442 mutex_unlock(&indio_dev->mlock);
443 return -EBUSY;
444 }
445 if (iio_dev_opaque->trig_readonly) {
446 mutex_unlock(&indio_dev->mlock);
447 return -EPERM;
448 }
449 mutex_unlock(&indio_dev->mlock);
450
451 trig = iio_trigger_acquire_by_name(buf);
452 if (oldtrig == trig) {
453 ret = len;
454 goto out_trigger_put;
455 }
456
457 if (trig && indio_dev->info->validate_trigger) {
458 ret = indio_dev->info->validate_trigger(indio_dev, trig);
459 if (ret)
460 goto out_trigger_put;
461 }
462
463 if (trig && trig->ops && trig->ops->validate_device) {
464 ret = trig->ops->validate_device(trig, indio_dev);
465 if (ret)
466 goto out_trigger_put;
467 }
468
469 indio_dev->trig = trig;
470
471 if (oldtrig) {
472 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
473 iio_trigger_detach_poll_func(oldtrig,
474 indio_dev->pollfunc_event);
475 iio_trigger_put(oldtrig);
476 }
477 if (indio_dev->trig) {
478 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
479 iio_trigger_attach_poll_func(indio_dev->trig,
480 indio_dev->pollfunc_event);
481 }
482
483 return len;
484
485out_trigger_put:
486 if (trig)
487 iio_trigger_put(trig);
488 return ret;
489}
490
491static DEVICE_ATTR_RW(current_trigger);
492
493static struct attribute *iio_trigger_consumer_attrs[] = {
494 &dev_attr_current_trigger.attr,
495 NULL,
496};
497
498static const struct attribute_group iio_trigger_consumer_attr_group = {
499 .name = "trigger",
500 .attrs = iio_trigger_consumer_attrs,
501};
502
503static void iio_trig_release(struct device *device)
504{
505 struct iio_trigger *trig = to_iio_trigger(device);
506 int i;
507
508 if (trig->subirq_base) {
509 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
510 irq_modify_status(trig->subirq_base + i,
511 IRQ_NOAUTOEN,
512 IRQ_NOREQUEST | IRQ_NOPROBE);
513 irq_set_chip(trig->subirq_base + i,
514 NULL);
515 irq_set_handler(trig->subirq_base + i,
516 NULL);
517 }
518
519 irq_free_descs(trig->subirq_base,
520 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
521 }
522 kfree(trig->name);
523 kfree(trig);
524}
525
526static const struct device_type iio_trig_type = {
527 .release = iio_trig_release,
528 .groups = iio_trig_dev_groups,
529};
530
531static void iio_trig_subirqmask(struct irq_data *d)
532{
533 struct irq_chip *chip = irq_data_get_irq_chip(d);
534 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
535
536 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
537}
538
539static void iio_trig_subirqunmask(struct irq_data *d)
540{
541 struct irq_chip *chip = irq_data_get_irq_chip(d);
542 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
543
544 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
545}
546
547static __printf(3, 0)
548struct iio_trigger *viio_trigger_alloc(struct device *parent,
549 struct module *this_mod,
550 const char *fmt,
551 va_list vargs)
552{
553 struct iio_trigger *trig;
554 int i;
555
556 trig = kzalloc(sizeof(*trig), GFP_KERNEL);
557 if (!trig)
558 return NULL;
559
560 trig->dev.parent = parent;
561 trig->dev.type = &iio_trig_type;
562 trig->dev.bus = &iio_bus_type;
563 device_initialize(&trig->dev);
564 INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
565
566 mutex_init(&trig->pool_lock);
567 trig->subirq_base = irq_alloc_descs(-1, 0,
568 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
569 0);
570 if (trig->subirq_base < 0)
571 goto free_trig;
572
573 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
574 if (trig->name == NULL)
575 goto free_descs;
576
577 INIT_LIST_HEAD(&trig->list);
578
579 trig->owner = this_mod;
580
581 trig->subirq_chip.name = trig->name;
582 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
583 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
584 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
585 irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
586 irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
587 irq_modify_status(trig->subirq_base + i,
588 IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
589 }
590
591 return trig;
592
593free_descs:
594 irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
595free_trig:
596 kfree(trig);
597 return NULL;
598}
599
600/**
601 * __iio_trigger_alloc - Allocate a trigger
602 * @parent: Device to allocate iio_trigger for
603 * @this_mod: module allocating the trigger
604 * @fmt: trigger name format. If it includes format
605 * specifiers, the additional arguments following
606 * format are formatted and inserted in the resulting
607 * string replacing their respective specifiers.
608 * RETURNS:
609 * Pointer to allocated iio_trigger on success, NULL on failure.
610 */
611struct iio_trigger *__iio_trigger_alloc(struct device *parent,
612 struct module *this_mod,
613 const char *fmt, ...)
614{
615 struct iio_trigger *trig;
616 va_list vargs;
617
618 va_start(vargs, fmt);
619 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
620 va_end(vargs);
621
622 return trig;
623}
624EXPORT_SYMBOL(__iio_trigger_alloc);
625
626void iio_trigger_free(struct iio_trigger *trig)
627{
628 if (trig)
629 put_device(&trig->dev);
630}
631EXPORT_SYMBOL(iio_trigger_free);
632
633static void devm_iio_trigger_release(struct device *dev, void *res)
634{
635 iio_trigger_free(*(struct iio_trigger **)res);
636}
637
638/**
639 * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
640 * Managed iio_trigger_alloc. iio_trigger allocated with this function is
641 * automatically freed on driver detach.
642 * @parent: Device to allocate iio_trigger for
643 * @this_mod: module allocating the trigger
644 * @fmt: trigger name format. If it includes format
645 * specifiers, the additional arguments following
646 * format are formatted and inserted in the resulting
647 * string replacing their respective specifiers.
648 *
649 *
650 * RETURNS:
651 * Pointer to allocated iio_trigger on success, NULL on failure.
652 */
653struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
654 struct module *this_mod,
655 const char *fmt, ...)
656{
657 struct iio_trigger **ptr, *trig;
658 va_list vargs;
659
660 ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
661 GFP_KERNEL);
662 if (!ptr)
663 return NULL;
664
665 /* use raw alloc_dr for kmalloc caller tracing */
666 va_start(vargs, fmt);
667 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
668 va_end(vargs);
669 if (trig) {
670 *ptr = trig;
671 devres_add(parent, ptr);
672 } else {
673 devres_free(ptr);
674 }
675
676 return trig;
677}
678EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
679
680static void devm_iio_trigger_unreg(void *trigger_info)
681{
682 iio_trigger_unregister(trigger_info);
683}
684
685/**
686 * devm_iio_trigger_register - Resource-managed iio_trigger_register()
687 * @dev: device this trigger was allocated for
688 * @trig_info: trigger to register
689 *
690 * Managed iio_trigger_register(). The IIO trigger registered with this
691 * function is automatically unregistered on driver detach. This function
692 * calls iio_trigger_register() internally. Refer to that function for more
693 * information.
694 *
695 * RETURNS:
696 * 0 on success, negative error number on failure.
697 */
698int devm_iio_trigger_register(struct device *dev,
699 struct iio_trigger *trig_info)
700{
701 int ret;
702
703 ret = iio_trigger_register(trig_info);
704 if (ret)
705 return ret;
706
707 return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
708}
709EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
710
711bool iio_trigger_using_own(struct iio_dev *indio_dev)
712{
713 return indio_dev->trig->attached_own_device;
714}
715EXPORT_SYMBOL(iio_trigger_using_own);
716
717/**
718 * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
719 * the same device
720 * @trig: The IIO trigger to check
721 * @indio_dev: the IIO device to check
722 *
723 * This function can be used as the validate_device callback for triggers that
724 * can only be attached to their own device.
725 *
726 * Return: 0 if both the trigger and the IIO device belong to the same
727 * device, -EINVAL otherwise.
728 */
729int iio_trigger_validate_own_device(struct iio_trigger *trig,
730 struct iio_dev *indio_dev)
731{
732 if (indio_dev->dev.parent != trig->dev.parent)
733 return -EINVAL;
734 return 0;
735}
736EXPORT_SYMBOL(iio_trigger_validate_own_device);
737
738int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
739{
740 return iio_device_register_sysfs_group(indio_dev,
741 &iio_trigger_consumer_attr_group);
742}
743
744void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
745{
746 /* Clean up an associated but not attached trigger reference */
747 if (indio_dev->trig)
748 iio_trigger_put(indio_dev->trig);
749}