.. | .. |
---|
8 | 8 | #include <linux/bpf_lirc.h> |
---|
9 | 9 | #include "rc-core-priv.h" |
---|
10 | 10 | |
---|
| 11 | +#define lirc_rcu_dereference(p) \ |
---|
| 12 | + rcu_dereference_protected(p, lockdep_is_held(&ir_raw_handler_lock)) |
---|
| 13 | + |
---|
11 | 14 | /* |
---|
12 | 15 | * BPF interface for raw IR |
---|
13 | 16 | */ |
---|
.. | .. |
---|
32 | 35 | .arg1_type = ARG_PTR_TO_CTX, |
---|
33 | 36 | }; |
---|
34 | 37 | |
---|
35 | | -/* |
---|
36 | | - * Currently rc-core does not support 64-bit scancodes, but there are many |
---|
37 | | - * known protocols with more than 32 bits. So, define the interface as u64 |
---|
38 | | - * as a future-proof. |
---|
39 | | - */ |
---|
40 | 38 | BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode, |
---|
41 | 39 | u32, toggle) |
---|
42 | 40 | { |
---|
.. | .. |
---|
59 | 57 | .arg4_type = ARG_ANYTHING, |
---|
60 | 58 | }; |
---|
61 | 59 | |
---|
| 60 | +BPF_CALL_3(bpf_rc_pointer_rel, u32*, sample, s32, rel_x, s32, rel_y) |
---|
| 61 | +{ |
---|
| 62 | + struct ir_raw_event_ctrl *ctrl; |
---|
| 63 | + |
---|
| 64 | + ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample); |
---|
| 65 | + |
---|
| 66 | + input_report_rel(ctrl->dev->input_dev, REL_X, rel_x); |
---|
| 67 | + input_report_rel(ctrl->dev->input_dev, REL_Y, rel_y); |
---|
| 68 | + input_sync(ctrl->dev->input_dev); |
---|
| 69 | + |
---|
| 70 | + return 0; |
---|
| 71 | +} |
---|
| 72 | + |
---|
| 73 | +static const struct bpf_func_proto rc_pointer_rel_proto = { |
---|
| 74 | + .func = bpf_rc_pointer_rel, |
---|
| 75 | + .gpl_only = true, |
---|
| 76 | + .ret_type = RET_INTEGER, |
---|
| 77 | + .arg1_type = ARG_PTR_TO_CTX, |
---|
| 78 | + .arg2_type = ARG_ANYTHING, |
---|
| 79 | + .arg3_type = ARG_ANYTHING, |
---|
| 80 | +}; |
---|
| 81 | + |
---|
62 | 82 | static const struct bpf_func_proto * |
---|
63 | 83 | lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
---|
64 | 84 | { |
---|
.. | .. |
---|
67 | 87 | return &rc_repeat_proto; |
---|
68 | 88 | case BPF_FUNC_rc_keydown: |
---|
69 | 89 | return &rc_keydown_proto; |
---|
| 90 | + case BPF_FUNC_rc_pointer_rel: |
---|
| 91 | + return &rc_pointer_rel_proto; |
---|
70 | 92 | case BPF_FUNC_map_lookup_elem: |
---|
71 | 93 | return &bpf_map_lookup_elem_proto; |
---|
72 | 94 | case BPF_FUNC_map_update_elem: |
---|
73 | 95 | return &bpf_map_update_elem_proto; |
---|
74 | 96 | case BPF_FUNC_map_delete_elem: |
---|
75 | 97 | return &bpf_map_delete_elem_proto; |
---|
| 98 | + case BPF_FUNC_map_push_elem: |
---|
| 99 | + return &bpf_map_push_elem_proto; |
---|
| 100 | + case BPF_FUNC_map_pop_elem: |
---|
| 101 | + return &bpf_map_pop_elem_proto; |
---|
| 102 | + case BPF_FUNC_map_peek_elem: |
---|
| 103 | + return &bpf_map_peek_elem_proto; |
---|
76 | 104 | case BPF_FUNC_ktime_get_ns: |
---|
77 | 105 | return &bpf_ktime_get_ns_proto; |
---|
78 | 106 | case BPF_FUNC_ktime_get_boot_ns: |
---|
.. | .. |
---|
82 | 110 | case BPF_FUNC_get_prandom_u32: |
---|
83 | 111 | return &bpf_get_prandom_u32_proto; |
---|
84 | 112 | case BPF_FUNC_trace_printk: |
---|
85 | | - if (capable(CAP_SYS_ADMIN)) |
---|
| 113 | + if (perfmon_capable()) |
---|
86 | 114 | return bpf_get_trace_printk_proto(); |
---|
87 | | - /* fall through */ |
---|
| 115 | + fallthrough; |
---|
88 | 116 | default: |
---|
89 | 117 | return NULL; |
---|
90 | 118 | } |
---|
.. | .. |
---|
108 | 136 | |
---|
109 | 137 | static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog) |
---|
110 | 138 | { |
---|
111 | | - struct bpf_prog_array __rcu *old_array; |
---|
| 139 | + struct bpf_prog_array *old_array; |
---|
112 | 140 | struct bpf_prog_array *new_array; |
---|
113 | 141 | struct ir_raw_event_ctrl *raw; |
---|
114 | 142 | int ret; |
---|
.. | .. |
---|
126 | 154 | goto unlock; |
---|
127 | 155 | } |
---|
128 | 156 | |
---|
129 | | - if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) { |
---|
| 157 | + old_array = lirc_rcu_dereference(raw->progs); |
---|
| 158 | + if (old_array && bpf_prog_array_length(old_array) >= BPF_MAX_PROGS) { |
---|
130 | 159 | ret = -E2BIG; |
---|
131 | 160 | goto unlock; |
---|
132 | 161 | } |
---|
133 | 162 | |
---|
134 | | - old_array = raw->progs; |
---|
135 | 163 | ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); |
---|
136 | 164 | if (ret < 0) |
---|
137 | 165 | goto unlock; |
---|
.. | .. |
---|
146 | 174 | |
---|
147 | 175 | static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog) |
---|
148 | 176 | { |
---|
149 | | - struct bpf_prog_array __rcu *old_array; |
---|
| 177 | + struct bpf_prog_array *old_array; |
---|
150 | 178 | struct bpf_prog_array *new_array; |
---|
151 | 179 | struct ir_raw_event_ctrl *raw; |
---|
152 | 180 | int ret; |
---|
.. | .. |
---|
164 | 192 | goto unlock; |
---|
165 | 193 | } |
---|
166 | 194 | |
---|
167 | | - old_array = raw->progs; |
---|
| 195 | + old_array = lirc_rcu_dereference(raw->progs); |
---|
168 | 196 | ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array); |
---|
169 | 197 | /* |
---|
170 | 198 | * Do not use bpf_prog_array_delete_safe() as we would end up |
---|
.. | .. |
---|
195 | 223 | /* |
---|
196 | 224 | * This should be called once the rc thread has been stopped, so there can be |
---|
197 | 225 | * no concurrent bpf execution. |
---|
| 226 | + * |
---|
| 227 | + * Should be called with the ir_raw_handler_lock held. |
---|
198 | 228 | */ |
---|
199 | 229 | void lirc_bpf_free(struct rc_dev *rcdev) |
---|
200 | 230 | { |
---|
201 | 231 | struct bpf_prog_array_item *item; |
---|
| 232 | + struct bpf_prog_array *array; |
---|
202 | 233 | |
---|
203 | | - if (!rcdev->raw->progs) |
---|
| 234 | + array = lirc_rcu_dereference(rcdev->raw->progs); |
---|
| 235 | + if (!array) |
---|
204 | 236 | return; |
---|
205 | 237 | |
---|
206 | | - item = rcu_dereference(rcdev->raw->progs)->items; |
---|
207 | | - while (item->prog) { |
---|
| 238 | + for (item = array->items; item->prog; item++) |
---|
208 | 239 | bpf_prog_put(item->prog); |
---|
209 | | - item++; |
---|
210 | | - } |
---|
211 | 240 | |
---|
212 | | - bpf_prog_array_free(rcdev->raw->progs); |
---|
| 241 | + bpf_prog_array_free(array); |
---|
213 | 242 | } |
---|
214 | 243 | |
---|
215 | 244 | int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
---|
.. | .. |
---|
262 | 291 | int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) |
---|
263 | 292 | { |
---|
264 | 293 | __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); |
---|
265 | | - struct bpf_prog_array __rcu *progs; |
---|
| 294 | + struct bpf_prog_array *progs; |
---|
266 | 295 | struct rc_dev *rcdev; |
---|
267 | 296 | u32 cnt, flags = 0; |
---|
268 | 297 | int ret; |
---|
.. | .. |
---|
283 | 312 | if (ret) |
---|
284 | 313 | goto put; |
---|
285 | 314 | |
---|
286 | | - progs = rcdev->raw->progs; |
---|
| 315 | + progs = lirc_rcu_dereference(rcdev->raw->progs); |
---|
287 | 316 | cnt = progs ? bpf_prog_array_length(progs) : 0; |
---|
288 | 317 | |
---|
289 | 318 | if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) { |
---|