commit | author | age
|
a07526
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
H |
2 |
/* The industrial I/O core, trigger handling functions |
|
3 |
* |
|
4 |
* Copyright (c) 2008 Jonathan Cameron |
|
5 |
*/ |
|
6 |
|
|
7 |
#include <linux/kernel.h> |
|
8 |
#include <linux/idr.h> |
|
9 |
#include <linux/err.h> |
|
10 |
#include <linux/device.h> |
|
11 |
#include <linux/interrupt.h> |
|
12 |
#include <linux/list.h> |
|
13 |
#include <linux/slab.h> |
|
14 |
|
|
15 |
#include <linux/iio/iio.h> |
|
16 |
#include <linux/iio/trigger.h> |
|
17 |
#include "iio_core.h" |
|
18 |
#include "iio_core_trigger.h" |
|
19 |
#include <linux/iio/trigger_consumer.h> |
|
20 |
|
|
21 |
/* RFC - Question of approach |
|
22 |
* Make the common case (single sensor single trigger) |
|
23 |
* simple by starting trigger capture from when first sensors |
|
24 |
* is added. |
|
25 |
* |
|
26 |
* Complex simultaneous start requires use of 'hold' functionality |
|
27 |
* of the trigger. (not implemented) |
|
28 |
* |
|
29 |
* Any other suggestions? |
|
30 |
*/ |
|
31 |
|
|
32 |
static DEFINE_IDA(iio_trigger_ida); |
|
33 |
|
|
34 |
/* Single list of all available triggers */ |
|
35 |
static LIST_HEAD(iio_trigger_list); |
|
36 |
static DEFINE_MUTEX(iio_trigger_list_lock); |
|
37 |
|
|
38 |
/** |
|
39 |
* iio_trigger_read_name() - retrieve useful identifying name |
|
40 |
* @dev: device associated with the iio_trigger |
|
41 |
* @attr: pointer to the device_attribute structure that is |
|
42 |
* being processed |
|
43 |
* @buf: buffer to print the name into |
|
44 |
* |
|
45 |
* Return: a negative number on failure or the number of written |
|
46 |
* characters on success. |
|
47 |
*/ |
|
48 |
static ssize_t iio_trigger_read_name(struct device *dev, |
|
49 |
struct device_attribute *attr, |
|
50 |
char *buf) |
|
51 |
{ |
|
52 |
struct iio_trigger *trig = to_iio_trigger(dev); |
|
53 |
return sprintf(buf, "%s\n", trig->name); |
|
54 |
} |
|
55 |
|
|
56 |
static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL); |
|
57 |
|
|
58 |
static struct attribute *iio_trig_dev_attrs[] = { |
|
59 |
&dev_attr_name.attr, |
|
60 |
NULL, |
|
61 |
}; |
|
62 |
ATTRIBUTE_GROUPS(iio_trig_dev); |
|
63 |
|
|
64 |
static struct iio_trigger *__iio_trigger_find_by_name(const char *name); |
|
65 |
|
|
66 |
int __iio_trigger_register(struct iio_trigger *trig_info, |
|
67 |
struct module *this_mod) |
|
68 |
{ |
|
69 |
int ret; |
|
70 |
|
|
71 |
trig_info->owner = this_mod; |
|
72 |
|
|
73 |
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); |
|
74 |
if (trig_info->id < 0) |
|
75 |
return trig_info->id; |
|
76 |
|
|
77 |
/* Set the name used for the sysfs directory etc */ |
|
78 |
dev_set_name(&trig_info->dev, "trigger%ld", |
|
79 |
(unsigned long) trig_info->id); |
|
80 |
|
|
81 |
ret = device_add(&trig_info->dev); |
|
82 |
if (ret) |
|
83 |
goto error_unregister_id; |
|
84 |
|
|
85 |
/* Add to list of available triggers held by the IIO core */ |
|
86 |
mutex_lock(&iio_trigger_list_lock); |
|
87 |
if (__iio_trigger_find_by_name(trig_info->name)) { |
|
88 |
pr_err("Duplicate trigger name '%s'\n", trig_info->name); |
|
89 |
ret = -EEXIST; |
|
90 |
goto error_device_del; |
|
91 |
} |
|
92 |
list_add_tail(&trig_info->list, &iio_trigger_list); |
|
93 |
mutex_unlock(&iio_trigger_list_lock); |
|
94 |
|
|
95 |
return 0; |
|
96 |
|
|
97 |
error_device_del: |
|
98 |
mutex_unlock(&iio_trigger_list_lock); |
|
99 |
device_del(&trig_info->dev); |
|
100 |
error_unregister_id: |
|
101 |
ida_simple_remove(&iio_trigger_ida, trig_info->id); |
|
102 |
return ret; |
|
103 |
} |
|
104 |
EXPORT_SYMBOL(__iio_trigger_register); |
|
105 |
|
|
106 |
void iio_trigger_unregister(struct iio_trigger *trig_info) |
|
107 |
{ |
|
108 |
mutex_lock(&iio_trigger_list_lock); |
|
109 |
list_del(&trig_info->list); |
|
110 |
mutex_unlock(&iio_trigger_list_lock); |
|
111 |
|
|
112 |
ida_simple_remove(&iio_trigger_ida, trig_info->id); |
|
113 |
/* Possible issue in here */ |
|
114 |
device_del(&trig_info->dev); |
|
115 |
} |
|
116 |
EXPORT_SYMBOL(iio_trigger_unregister); |
|
117 |
|
|
118 |
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig) |
|
119 |
{ |
|
120 |
if (!indio_dev || !trig) |
|
121 |
return -EINVAL; |
|
122 |
|
|
123 |
mutex_lock(&indio_dev->mlock); |
|
124 |
WARN_ON(indio_dev->trig_readonly); |
|
125 |
|
|
126 |
indio_dev->trig = iio_trigger_get(trig); |
|
127 |
indio_dev->trig_readonly = true; |
|
128 |
mutex_unlock(&indio_dev->mlock); |
|
129 |
|
|
130 |
return 0; |
|
131 |
} |
|
132 |
EXPORT_SYMBOL(iio_trigger_set_immutable); |
|
133 |
|
|
134 |
/* Search for trigger by name, assuming iio_trigger_list_lock held */ |
|
135 |
static struct iio_trigger *__iio_trigger_find_by_name(const char *name) |
|
136 |
{ |
|
137 |
struct iio_trigger *iter; |
|
138 |
|
|
139 |
list_for_each_entry(iter, &iio_trigger_list, list) |
|
140 |
if (!strcmp(iter->name, name)) |
|
141 |
return iter; |
|
142 |
|
|
143 |
return NULL; |
|
144 |
} |
|
145 |
|
|
146 |
static struct iio_trigger *iio_trigger_acquire_by_name(const char *name) |
|
147 |
{ |
|
148 |
struct iio_trigger *trig = NULL, *iter; |
|
149 |
|
|
150 |
mutex_lock(&iio_trigger_list_lock); |
|
151 |
list_for_each_entry(iter, &iio_trigger_list, list) |
|
152 |
if (sysfs_streq(iter->name, name)) { |
|
153 |
trig = iter; |
|
154 |
iio_trigger_get(trig); |
|
155 |
break; |
|
156 |
} |
|
157 |
mutex_unlock(&iio_trigger_list_lock); |
|
158 |
|
|
159 |
return trig; |
|
160 |
} |
|
161 |
|
|
162 |
void iio_trigger_poll(struct iio_trigger *trig) |
|
163 |
{ |
|
164 |
int i; |
|
165 |
|
|
166 |
if (!atomic_read(&trig->use_count)) { |
|
167 |
atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
|
168 |
|
|
169 |
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
|
170 |
if (trig->subirqs[i].enabled) |
|
171 |
generic_handle_irq(trig->subirq_base + i); |
|
172 |
else |
|
173 |
iio_trigger_notify_done(trig); |
|
174 |
} |
|
175 |
} |
|
176 |
} |
|
177 |
EXPORT_SYMBOL(iio_trigger_poll); |
|
178 |
|
|
179 |
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) |
|
180 |
{ |
|
181 |
iio_trigger_poll(private); |
|
182 |
return IRQ_HANDLED; |
|
183 |
} |
|
184 |
EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); |
|
185 |
|
|
186 |
void iio_trigger_poll_chained(struct iio_trigger *trig) |
|
187 |
{ |
|
188 |
int i; |
|
189 |
|
|
190 |
if (!atomic_read(&trig->use_count)) { |
|
191 |
atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
|
192 |
|
|
193 |
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
|
194 |
if (trig->subirqs[i].enabled) |
|
195 |
handle_nested_irq(trig->subirq_base + i); |
|
196 |
else |
|
197 |
iio_trigger_notify_done(trig); |
|
198 |
} |
|
199 |
} |
|
200 |
} |
|
201 |
EXPORT_SYMBOL(iio_trigger_poll_chained); |
|
202 |
|
|
203 |
void iio_trigger_notify_done(struct iio_trigger *trig) |
|
204 |
{ |
|
205 |
if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
|
206 |
trig->ops->try_reenable) |
|
207 |
if (trig->ops->try_reenable(trig)) |
|
208 |
/* Missed an interrupt so launch new poll now */ |
|
209 |
iio_trigger_poll(trig); |
|
210 |
} |
|
211 |
EXPORT_SYMBOL(iio_trigger_notify_done); |
|
212 |
|
|
213 |
/* Trigger Consumer related functions */ |
|
214 |
static int iio_trigger_get_irq(struct iio_trigger *trig) |
|
215 |
{ |
|
216 |
int ret; |
|
217 |
mutex_lock(&trig->pool_lock); |
|
218 |
ret = bitmap_find_free_region(trig->pool, |
|
219 |
CONFIG_IIO_CONSUMERS_PER_TRIGGER, |
|
220 |
ilog2(1)); |
|
221 |
mutex_unlock(&trig->pool_lock); |
|
222 |
if (ret >= 0) |
|
223 |
ret += trig->subirq_base; |
|
224 |
|
|
225 |
return ret; |
|
226 |
} |
|
227 |
|
|
228 |
static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) |
|
229 |
{ |
|
230 |
mutex_lock(&trig->pool_lock); |
|
231 |
clear_bit(irq - trig->subirq_base, trig->pool); |
|
232 |
mutex_unlock(&trig->pool_lock); |
|
233 |
} |
|
234 |
|
|
235 |
/* Complexity in here. With certain triggers (datardy) an acknowledgement |
|
236 |
* may be needed if the pollfuncs do not include the data read for the |
|
237 |
* triggering device. |
|
238 |
* This is not currently handled. Alternative of not enabling trigger unless |
|
239 |
* the relevant function is in there may be the best option. |
|
240 |
*/ |
|
241 |
/* Worth protecting against double additions? */ |
|
242 |
int iio_trigger_attach_poll_func(struct iio_trigger *trig, |
|
243 |
struct iio_poll_func *pf) |
|
244 |
{ |
|
245 |
int ret = 0; |
|
246 |
bool notinuse |
|
247 |
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
|
248 |
|
|
249 |
/* Prevent the module from being removed whilst attached to a trigger */ |
|
250 |
__module_get(pf->indio_dev->driver_module); |
|
251 |
|
|
252 |
/* Get irq number */ |
|
253 |
pf->irq = iio_trigger_get_irq(trig); |
|
254 |
if (pf->irq < 0) { |
|
255 |
pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n", |
|
256 |
trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
|
257 |
goto out_put_module; |
|
258 |
} |
|
259 |
|
|
260 |
/* Request irq */ |
|
261 |
ret = request_threaded_irq(pf->irq, pf->h, pf->thread, |
|
262 |
pf->type, pf->name, |
|
263 |
pf); |
|
264 |
if (ret < 0) |
|
265 |
goto out_put_irq; |
|
266 |
|
|
267 |
/* Enable trigger in driver */ |
|
268 |
if (trig->ops && trig->ops->set_trigger_state && notinuse) { |
|
269 |
ret = trig->ops->set_trigger_state(trig, true); |
|
270 |
if (ret < 0) |
|
271 |
goto out_free_irq; |
|
272 |
} |
|
273 |
|
|
274 |
/* |
|
275 |
* Check if we just registered to our own trigger: we determine that |
|
276 |
* this is the case if the IIO device and the trigger device share the |
|
277 |
* same parent device. |
|
278 |
*/ |
|
279 |
if (pf->indio_dev->dev.parent == trig->dev.parent) |
|
280 |
trig->attached_own_device = true; |
|
281 |
|
|
282 |
return ret; |
|
283 |
|
|
284 |
out_free_irq: |
|
285 |
free_irq(pf->irq, pf); |
|
286 |
out_put_irq: |
|
287 |
iio_trigger_put_irq(trig, pf->irq); |
|
288 |
out_put_module: |
|
289 |
module_put(pf->indio_dev->driver_module); |
|
290 |
return ret; |
|
291 |
} |
|
292 |
|
|
293 |
int iio_trigger_detach_poll_func(struct iio_trigger *trig, |
|
294 |
struct iio_poll_func *pf) |
|
295 |
{ |
|
296 |
int ret = 0; |
|
297 |
bool no_other_users |
|
298 |
= (bitmap_weight(trig->pool, |
|
299 |
CONFIG_IIO_CONSUMERS_PER_TRIGGER) |
|
300 |
== 1); |
|
301 |
if (trig->ops && trig->ops->set_trigger_state && no_other_users) { |
|
302 |
ret = trig->ops->set_trigger_state(trig, false); |
|
303 |
if (ret) |
|
304 |
return ret; |
|
305 |
} |
|
306 |
if (pf->indio_dev->dev.parent == trig->dev.parent) |
|
307 |
trig->attached_own_device = false; |
|
308 |
iio_trigger_put_irq(trig, pf->irq); |
|
309 |
free_irq(pf->irq, pf); |
|
310 |
module_put(pf->indio_dev->driver_module); |
|
311 |
|
|
312 |
return ret; |
|
313 |
} |
|
314 |
|
|
315 |
irqreturn_t iio_pollfunc_store_time(int irq, void *p) |
|
316 |
{ |
|
317 |
struct iio_poll_func *pf = p; |
|
318 |
pf->timestamp = iio_get_time_ns(pf->indio_dev); |
|
319 |
return IRQ_WAKE_THREAD; |
|
320 |
} |
|
321 |
EXPORT_SYMBOL(iio_pollfunc_store_time); |
|
322 |
|
|
323 |
struct iio_poll_func |
|
324 |
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), |
|
325 |
irqreturn_t (*thread)(int irq, void *p), |
|
326 |
int type, |
|
327 |
struct iio_dev *indio_dev, |
|
328 |
const char *fmt, |
|
329 |
...) |
|
330 |
{ |
|
331 |
va_list vargs; |
|
332 |
struct iio_poll_func *pf; |
|
333 |
|
|
334 |
pf = kmalloc(sizeof *pf, GFP_KERNEL); |
|
335 |
if (pf == NULL) |
|
336 |
return NULL; |
|
337 |
va_start(vargs, fmt); |
|
338 |
pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
|
339 |
va_end(vargs); |
|
340 |
if (pf->name == NULL) { |
|
341 |
kfree(pf); |
|
342 |
return NULL; |
|
343 |
} |
|
344 |
pf->h = h; |
|
345 |
pf->thread = thread; |
|
346 |
pf->type = type; |
|
347 |
pf->indio_dev = indio_dev; |
|
348 |
|
|
349 |
return pf; |
|
350 |
} |
|
351 |
EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); |
|
352 |
|
|
353 |
void iio_dealloc_pollfunc(struct iio_poll_func *pf) |
|
354 |
{ |
|
355 |
kfree(pf->name); |
|
356 |
kfree(pf); |
|
357 |
} |
|
358 |
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); |
|
359 |
|
|
360 |
/** |
|
361 |
* iio_trigger_read_current() - trigger consumer sysfs query current trigger |
|
362 |
* @dev: device associated with an industrial I/O device |
|
363 |
* @attr: pointer to the device_attribute structure that |
|
364 |
* is being processed |
|
365 |
* @buf: buffer where the current trigger name will be printed into |
|
366 |
* |
|
367 |
* For trigger consumers the current_trigger interface allows the trigger |
|
368 |
* used by the device to be queried. |
|
369 |
* |
|
370 |
* Return: a negative number on failure, the number of characters written |
|
371 |
* on success or 0 if no trigger is available |
|
372 |
*/ |
|
373 |
static ssize_t iio_trigger_read_current(struct device *dev, |
|
374 |
struct device_attribute *attr, |
|
375 |
char *buf) |
|
376 |
{ |
|
377 |
struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
|
378 |
|
|
379 |
if (indio_dev->trig) |
|
380 |
return sprintf(buf, "%s\n", indio_dev->trig->name); |
|
381 |
return 0; |
|
382 |
} |
|
383 |
|
|
384 |
/** |
|
385 |
* iio_trigger_write_current() - trigger consumer sysfs set current trigger |
|
386 |
* @dev: device associated with an industrial I/O device |
|
387 |
* @attr: device attribute that is being processed |
|
388 |
* @buf: string buffer that holds the name of the trigger |
|
389 |
* @len: length of the trigger name held by buf |
|
390 |
* |
|
391 |
* For trigger consumers the current_trigger interface allows the trigger |
|
392 |
* used for this device to be specified at run time based on the trigger's |
|
393 |
* name. |
|
394 |
* |
|
395 |
* Return: negative error code on failure or length of the buffer |
|
396 |
* on success |
|
397 |
*/ |
|
398 |
static ssize_t iio_trigger_write_current(struct device *dev, |
|
399 |
struct device_attribute *attr, |
|
400 |
const char *buf, |
|
401 |
size_t len) |
|
402 |
{ |
|
403 |
struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
|
404 |
struct iio_trigger *oldtrig = indio_dev->trig; |
|
405 |
struct iio_trigger *trig; |
|
406 |
int ret; |
|
407 |
|
|
408 |
mutex_lock(&indio_dev->mlock); |
|
409 |
if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { |
|
410 |
mutex_unlock(&indio_dev->mlock); |
|
411 |
return -EBUSY; |
|
412 |
} |
|
413 |
if (indio_dev->trig_readonly) { |
|
414 |
mutex_unlock(&indio_dev->mlock); |
|
415 |
return -EPERM; |
|
416 |
} |
|
417 |
mutex_unlock(&indio_dev->mlock); |
|
418 |
|
|
419 |
trig = iio_trigger_acquire_by_name(buf); |
|
420 |
if (oldtrig == trig) { |
|
421 |
ret = len; |
|
422 |
goto out_trigger_put; |
|
423 |
} |
|
424 |
|
|
425 |
if (trig && indio_dev->info->validate_trigger) { |
|
426 |
ret = indio_dev->info->validate_trigger(indio_dev, trig); |
|
427 |
if (ret) |
|
428 |
goto out_trigger_put; |
|
429 |
} |
|
430 |
|
|
431 |
if (trig && trig->ops && trig->ops->validate_device) { |
|
432 |
ret = trig->ops->validate_device(trig, indio_dev); |
|
433 |
if (ret) |
|
434 |
goto out_trigger_put; |
|
435 |
} |
|
436 |
|
|
437 |
indio_dev->trig = trig; |
|
438 |
|
|
439 |
if (oldtrig) { |
|
440 |
if (indio_dev->modes & INDIO_EVENT_TRIGGERED) |
|
441 |
iio_trigger_detach_poll_func(oldtrig, |
|
442 |
indio_dev->pollfunc_event); |
|
443 |
iio_trigger_put(oldtrig); |
|
444 |
} |
|
445 |
if (indio_dev->trig) { |
|
446 |
if (indio_dev->modes & INDIO_EVENT_TRIGGERED) |
|
447 |
iio_trigger_attach_poll_func(indio_dev->trig, |
|
448 |
indio_dev->pollfunc_event); |
|
449 |
} |
|
450 |
|
|
451 |
return len; |
|
452 |
|
|
453 |
out_trigger_put: |
|
454 |
if (trig) |
|
455 |
iio_trigger_put(trig); |
|
456 |
return ret; |
|
457 |
} |
|
458 |
|
|
459 |
static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR, |
|
460 |
iio_trigger_read_current, |
|
461 |
iio_trigger_write_current); |
|
462 |
|
|
463 |
static struct attribute *iio_trigger_consumer_attrs[] = { |
|
464 |
&dev_attr_current_trigger.attr, |
|
465 |
NULL, |
|
466 |
}; |
|
467 |
|
|
468 |
static const struct attribute_group iio_trigger_consumer_attr_group = { |
|
469 |
.name = "trigger", |
|
470 |
.attrs = iio_trigger_consumer_attrs, |
|
471 |
}; |
|
472 |
|
|
473 |
static void iio_trig_release(struct device *device) |
|
474 |
{ |
|
475 |
struct iio_trigger *trig = to_iio_trigger(device); |
|
476 |
int i; |
|
477 |
|
|
478 |
if (trig->subirq_base) { |
|
479 |
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
|
480 |
irq_modify_status(trig->subirq_base + i, |
|
481 |
IRQ_NOAUTOEN, |
|
482 |
IRQ_NOREQUEST | IRQ_NOPROBE); |
|
483 |
irq_set_chip(trig->subirq_base + i, |
|
484 |
NULL); |
|
485 |
irq_set_handler(trig->subirq_base + i, |
|
486 |
NULL); |
|
487 |
} |
|
488 |
|
|
489 |
irq_free_descs(trig->subirq_base, |
|
490 |
CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
|
491 |
} |
|
492 |
kfree(trig->name); |
|
493 |
kfree(trig); |
|
494 |
} |
|
495 |
|
|
496 |
static const struct device_type iio_trig_type = { |
|
497 |
.release = iio_trig_release, |
|
498 |
.groups = iio_trig_dev_groups, |
|
499 |
}; |
|
500 |
|
|
501 |
static void iio_trig_subirqmask(struct irq_data *d) |
|
502 |
{ |
|
503 |
struct irq_chip *chip = irq_data_get_irq_chip(d); |
|
504 |
struct iio_trigger *trig |
|
505 |
= container_of(chip, |
|
506 |
struct iio_trigger, subirq_chip); |
|
507 |
trig->subirqs[d->irq - trig->subirq_base].enabled = false; |
|
508 |
} |
|
509 |
|
|
510 |
static void iio_trig_subirqunmask(struct irq_data *d) |
|
511 |
{ |
|
512 |
struct irq_chip *chip = irq_data_get_irq_chip(d); |
|
513 |
struct iio_trigger *trig |
|
514 |
= container_of(chip, |
|
515 |
struct iio_trigger, subirq_chip); |
|
516 |
trig->subirqs[d->irq - trig->subirq_base].enabled = true; |
|
517 |
} |
|
518 |
|
|
519 |
static __printf(1, 0) |
|
520 |
struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs) |
|
521 |
{ |
|
522 |
struct iio_trigger *trig; |
|
523 |
int i; |
|
524 |
|
|
525 |
trig = kzalloc(sizeof *trig, GFP_KERNEL); |
|
526 |
if (!trig) |
|
527 |
return NULL; |
|
528 |
|
|
529 |
trig->dev.type = &iio_trig_type; |
|
530 |
trig->dev.bus = &iio_bus_type; |
|
531 |
device_initialize(&trig->dev); |
|
532 |
|
|
533 |
mutex_init(&trig->pool_lock); |
|
534 |
trig->subirq_base = irq_alloc_descs(-1, 0, |
|
535 |
CONFIG_IIO_CONSUMERS_PER_TRIGGER, |
|
536 |
0); |
|
537 |
if (trig->subirq_base < 0) |
|
538 |
goto free_trig; |
|
539 |
|
|
540 |
trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
|
541 |
if (trig->name == NULL) |
|
542 |
goto free_descs; |
|
543 |
|
|
544 |
trig->subirq_chip.name = trig->name; |
|
545 |
trig->subirq_chip.irq_mask = &iio_trig_subirqmask; |
|
546 |
trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; |
2f529f
|
547 |
trig->subirq_chip.flags = IRQCHIP_PIPELINE_SAFE; |
a07526
|
548 |
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
H |
549 |
irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); |
|
550 |
irq_set_handler(trig->subirq_base + i, &handle_simple_irq); |
|
551 |
irq_modify_status(trig->subirq_base + i, |
|
552 |
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); |
|
553 |
} |
|
554 |
|
|
555 |
return trig; |
|
556 |
|
|
557 |
free_descs: |
|
558 |
irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
|
559 |
free_trig: |
|
560 |
kfree(trig); |
|
561 |
return NULL; |
|
562 |
} |
|
563 |
|
|
564 |
struct iio_trigger *iio_trigger_alloc(const char *fmt, ...) |
|
565 |
{ |
|
566 |
struct iio_trigger *trig; |
|
567 |
va_list vargs; |
|
568 |
|
|
569 |
va_start(vargs, fmt); |
|
570 |
trig = viio_trigger_alloc(fmt, vargs); |
|
571 |
va_end(vargs); |
|
572 |
|
|
573 |
return trig; |
|
574 |
} |
|
575 |
EXPORT_SYMBOL(iio_trigger_alloc); |
|
576 |
|
|
577 |
void iio_trigger_free(struct iio_trigger *trig) |
|
578 |
{ |
|
579 |
if (trig) |
|
580 |
put_device(&trig->dev); |
|
581 |
} |
|
582 |
EXPORT_SYMBOL(iio_trigger_free); |
|
583 |
|
|
584 |
static void devm_iio_trigger_release(struct device *dev, void *res) |
|
585 |
{ |
|
586 |
iio_trigger_free(*(struct iio_trigger **)res); |
|
587 |
} |
|
588 |
|
|
589 |
/** |
|
590 |
* devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc() |
|
591 |
* @dev: Device to allocate iio_trigger for |
|
592 |
* @fmt: trigger name format. If it includes format |
|
593 |
* specifiers, the additional arguments following |
|
594 |
* format are formatted and inserted in the resulting |
|
595 |
* string replacing their respective specifiers. |
|
596 |
* |
|
597 |
* Managed iio_trigger_alloc. iio_trigger allocated with this function is |
|
598 |
* automatically freed on driver detach. |
|
599 |
* |
|
600 |
* RETURNS: |
|
601 |
* Pointer to allocated iio_trigger on success, NULL on failure. |
|
602 |
*/ |
|
603 |
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, |
|
604 |
const char *fmt, ...) |
|
605 |
{ |
|
606 |
struct iio_trigger **ptr, *trig; |
|
607 |
va_list vargs; |
|
608 |
|
|
609 |
ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr), |
|
610 |
GFP_KERNEL); |
|
611 |
if (!ptr) |
|
612 |
return NULL; |
|
613 |
|
|
614 |
/* use raw alloc_dr for kmalloc caller tracing */ |
|
615 |
va_start(vargs, fmt); |
|
616 |
trig = viio_trigger_alloc(fmt, vargs); |
|
617 |
va_end(vargs); |
|
618 |
if (trig) { |
|
619 |
*ptr = trig; |
|
620 |
devres_add(dev, ptr); |
|
621 |
} else { |
|
622 |
devres_free(ptr); |
|
623 |
} |
|
624 |
|
|
625 |
return trig; |
|
626 |
} |
|
627 |
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc); |
|
628 |
|
|
629 |
static void devm_iio_trigger_unreg(struct device *dev, void *res) |
|
630 |
{ |
|
631 |
iio_trigger_unregister(*(struct iio_trigger **)res); |
|
632 |
} |
|
633 |
|
|
634 |
/** |
|
635 |
* __devm_iio_trigger_register - Resource-managed iio_trigger_register() |
|
636 |
* @dev: device this trigger was allocated for |
|
637 |
* @trig_info: trigger to register |
|
638 |
* @this_mod: module registering the trigger |
|
639 |
* |
|
640 |
* Managed iio_trigger_register(). The IIO trigger registered with this |
|
641 |
* function is automatically unregistered on driver detach. This function |
|
642 |
* calls iio_trigger_register() internally. Refer to that function for more |
|
643 |
* information. |
|
644 |
* |
|
645 |
* RETURNS: |
|
646 |
* 0 on success, negative error number on failure. |
|
647 |
*/ |
|
648 |
int __devm_iio_trigger_register(struct device *dev, |
|
649 |
struct iio_trigger *trig_info, |
|
650 |
struct module *this_mod) |
|
651 |
{ |
|
652 |
struct iio_trigger **ptr; |
|
653 |
int ret; |
|
654 |
|
|
655 |
ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL); |
|
656 |
if (!ptr) |
|
657 |
return -ENOMEM; |
|
658 |
|
|
659 |
*ptr = trig_info; |
|
660 |
ret = __iio_trigger_register(trig_info, this_mod); |
|
661 |
if (!ret) |
|
662 |
devres_add(dev, ptr); |
|
663 |
else |
|
664 |
devres_free(ptr); |
|
665 |
|
|
666 |
return ret; |
|
667 |
} |
|
668 |
EXPORT_SYMBOL_GPL(__devm_iio_trigger_register); |
|
669 |
|
|
670 |
bool iio_trigger_using_own(struct iio_dev *indio_dev) |
|
671 |
{ |
|
672 |
return indio_dev->trig->attached_own_device; |
|
673 |
} |
|
674 |
EXPORT_SYMBOL(iio_trigger_using_own); |
|
675 |
|
|
676 |
/** |
|
677 |
* iio_trigger_validate_own_device - Check if a trigger and IIO device belong to |
|
678 |
* the same device |
|
679 |
* @trig: The IIO trigger to check |
|
680 |
* @indio_dev: the IIO device to check |
|
681 |
* |
|
682 |
* This function can be used as the validate_device callback for triggers that |
|
683 |
* can only be attached to their own device. |
|
684 |
* |
|
685 |
* Return: 0 if both the trigger and the IIO device belong to the same |
|
686 |
* device, -EINVAL otherwise. |
|
687 |
*/ |
|
688 |
int iio_trigger_validate_own_device(struct iio_trigger *trig, |
|
689 |
struct iio_dev *indio_dev) |
|
690 |
{ |
|
691 |
if (indio_dev->dev.parent != trig->dev.parent) |
|
692 |
return -EINVAL; |
|
693 |
return 0; |
|
694 |
} |
|
695 |
EXPORT_SYMBOL(iio_trigger_validate_own_device); |
|
696 |
|
|
697 |
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev) |
|
698 |
{ |
|
699 |
indio_dev->groups[indio_dev->groupcounter++] = |
|
700 |
&iio_trigger_consumer_attr_group; |
|
701 |
} |
|
702 |
|
|
703 |
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) |
|
704 |
{ |
|
705 |
/* Clean up an associated but not attached trigger reference */ |
|
706 |
if (indio_dev->trig) |
|
707 |
iio_trigger_put(indio_dev->trig); |
|
708 |
} |