.. | .. |
---|
42 | 42 | if (dev->enabled_protocols & |
---|
43 | 43 | handler->protocols || !handler->protocols) |
---|
44 | 44 | handler->decode(dev, ev); |
---|
45 | | - ir_lirc_raw_event(dev, ev); |
---|
| 45 | + lirc_raw_event(dev, ev); |
---|
46 | 46 | raw->prev_ev = ev; |
---|
47 | 47 | } |
---|
48 | 48 | mutex_unlock(&ir_raw_handler_lock); |
---|
.. | .. |
---|
77 | 77 | return -EINVAL; |
---|
78 | 78 | |
---|
79 | 79 | dev_dbg(&dev->dev, "sample: (%05dus %s)\n", |
---|
80 | | - TO_US(ev->duration), TO_STR(ev->pulse)); |
---|
| 80 | + ev->duration, TO_STR(ev->pulse)); |
---|
81 | 81 | |
---|
82 | 82 | if (!kfifo_put(&dev->raw->kfifo, *ev)) { |
---|
83 | 83 | dev_err(&dev->dev, "IR event FIFO is full!\n"); |
---|
.. | .. |
---|
102 | 102 | int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse) |
---|
103 | 103 | { |
---|
104 | 104 | ktime_t now; |
---|
105 | | - DEFINE_IR_RAW_EVENT(ev); |
---|
| 105 | + struct ir_raw_event ev = {}; |
---|
106 | 106 | |
---|
107 | 107 | if (!dev->raw) |
---|
108 | 108 | return -EINVAL; |
---|
109 | 109 | |
---|
110 | 110 | now = ktime_get(); |
---|
111 | | - ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); |
---|
| 111 | + ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event)); |
---|
112 | 112 | ev.pulse = !pulse; |
---|
113 | 113 | |
---|
114 | 114 | return ir_raw_event_store_with_timeout(dev, &ev); |
---|
.. | .. |
---|
186 | 186 | dev->raw->this_ev = *ev; |
---|
187 | 187 | } |
---|
188 | 188 | |
---|
189 | | - /* Enter idle mode if nessesary */ |
---|
| 189 | + /* Enter idle mode if necessary */ |
---|
190 | 190 | if (!ev->pulse && dev->timeout && |
---|
191 | 191 | dev->raw->this_ev.duration >= dev->timeout) |
---|
192 | 192 | ir_raw_event_set_idle(dev, true); |
---|
.. | .. |
---|
210 | 210 | if (idle) { |
---|
211 | 211 | dev->raw->this_ev.timeout = true; |
---|
212 | 212 | ir_raw_event_store(dev, &dev->raw->this_ev); |
---|
213 | | - init_ir_raw_event(&dev->raw->this_ev); |
---|
| 213 | + dev->raw->this_ev = (struct ir_raw_event) {}; |
---|
214 | 214 | } |
---|
215 | 215 | |
---|
216 | 216 | if (dev->s_idle) |
---|
.. | .. |
---|
275 | 275 | if (timeout == 0) |
---|
276 | 276 | timeout = IR_DEFAULT_TIMEOUT; |
---|
277 | 277 | else |
---|
278 | | - timeout += MS_TO_NS(10); |
---|
| 278 | + timeout += MS_TO_US(10); |
---|
279 | 279 | |
---|
280 | 280 | if (timeout < dev->min_timeout) |
---|
281 | 281 | timeout = dev->min_timeout; |
---|
.. | .. |
---|
561 | 561 | |
---|
562 | 562 | spin_lock_irqsave(&dev->raw->edge_spinlock, flags); |
---|
563 | 563 | interval = ktime_sub(ktime_get(), dev->raw->last_event); |
---|
564 | | - if (ktime_to_ns(interval) >= dev->timeout) { |
---|
565 | | - DEFINE_IR_RAW_EVENT(ev); |
---|
566 | | - |
---|
567 | | - ev.timeout = true; |
---|
568 | | - ev.duration = ktime_to_ns(interval); |
---|
| 564 | + if (ktime_to_us(interval) >= dev->timeout) { |
---|
| 565 | + struct ir_raw_event ev = { |
---|
| 566 | + .timeout = true, |
---|
| 567 | + .duration = ktime_to_us(interval) |
---|
| 568 | + }; |
---|
569 | 569 | |
---|
570 | 570 | ir_raw_event_store(dev, &ev); |
---|
571 | 571 | } else { |
---|
572 | 572 | mod_timer(&dev->raw->edge_handle, |
---|
573 | | - jiffies + nsecs_to_jiffies(dev->timeout - |
---|
574 | | - ktime_to_ns(interval))); |
---|
| 573 | + jiffies + usecs_to_jiffies(dev->timeout - |
---|
| 574 | + ktime_to_us(interval))); |
---|
575 | 575 | } |
---|
576 | 576 | spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags); |
---|
577 | 577 | |
---|