hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/kernel/bpf/offload.c
....@@ -33,7 +33,9 @@
3333 static DECLARE_RWSEM(bpf_devs_lock);
3434
3535 struct bpf_offload_dev {
36
+ const struct bpf_prog_offload_ops *ops;
3637 struct list_head netdevs;
38
+ void *priv;
3739 };
3840
3941 struct bpf_offload_netdev {
....@@ -106,6 +108,7 @@
106108 err = -EINVAL;
107109 goto err_unlock;
108110 }
111
+ offload->offdev = ondev->offdev;
109112 prog->aux->offload = offload;
110113 list_add_tail(&offload->offloads, &ondev->progs);
111114 dev_put(offload->netdev);
....@@ -121,40 +124,20 @@
121124 return err;
122125 }
123126
124
-static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
125
- struct netdev_bpf *data)
127
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
126128 {
127
- struct bpf_prog_offload *offload = prog->aux->offload;
128
- struct net_device *netdev;
129
+ struct bpf_prog_offload *offload;
130
+ int ret = -ENODEV;
129131
130
- ASSERT_RTNL();
132
+ down_read(&bpf_devs_lock);
133
+ offload = prog->aux->offload;
134
+ if (offload) {
135
+ ret = offload->offdev->ops->prepare(prog);
136
+ offload->dev_state = !ret;
137
+ }
138
+ up_read(&bpf_devs_lock);
131139
132
- if (!offload)
133
- return -ENODEV;
134
- netdev = offload->netdev;
135
-
136
- data->command = cmd;
137
-
138
- return netdev->netdev_ops->ndo_bpf(netdev, data);
139
-}
140
-
141
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
142
-{
143
- struct netdev_bpf data = {};
144
- int err;
145
-
146
- data.verifier.prog = env->prog;
147
-
148
- rtnl_lock();
149
- err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
150
- if (err)
151
- goto exit_unlock;
152
-
153
- env->prog->aux->offload->dev_ops = data.verifier.ops;
154
- env->prog->aux->offload->dev_state = true;
155
-exit_unlock:
156
- rtnl_unlock();
157
- return err;
140
+ return ret;
158141 }
159142
160143 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
....@@ -166,21 +149,72 @@
166149 down_read(&bpf_devs_lock);
167150 offload = env->prog->aux->offload;
168151 if (offload)
169
- ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
152
+ ret = offload->offdev->ops->insn_hook(env, insn_idx,
153
+ prev_insn_idx);
170154 up_read(&bpf_devs_lock);
171155
172156 return ret;
173157 }
174158
159
+int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
160
+{
161
+ struct bpf_prog_offload *offload;
162
+ int ret = -ENODEV;
163
+
164
+ down_read(&bpf_devs_lock);
165
+ offload = env->prog->aux->offload;
166
+ if (offload) {
167
+ if (offload->offdev->ops->finalize)
168
+ ret = offload->offdev->ops->finalize(env);
169
+ else
170
+ ret = 0;
171
+ }
172
+ up_read(&bpf_devs_lock);
173
+
174
+ return ret;
175
+}
176
+
177
+void
178
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
179
+ struct bpf_insn *insn)
180
+{
181
+ const struct bpf_prog_offload_ops *ops;
182
+ struct bpf_prog_offload *offload;
183
+ int ret = -EOPNOTSUPP;
184
+
185
+ down_read(&bpf_devs_lock);
186
+ offload = env->prog->aux->offload;
187
+ if (offload) {
188
+ ops = offload->offdev->ops;
189
+ if (!offload->opt_failed && ops->replace_insn)
190
+ ret = ops->replace_insn(env, off, insn);
191
+ offload->opt_failed |= ret;
192
+ }
193
+ up_read(&bpf_devs_lock);
194
+}
195
+
196
+void
197
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
198
+{
199
+ struct bpf_prog_offload *offload;
200
+ int ret = -EOPNOTSUPP;
201
+
202
+ down_read(&bpf_devs_lock);
203
+ offload = env->prog->aux->offload;
204
+ if (offload) {
205
+ if (!offload->opt_failed && offload->offdev->ops->remove_insns)
206
+ ret = offload->offdev->ops->remove_insns(env, off, cnt);
207
+ offload->opt_failed |= ret;
208
+ }
209
+ up_read(&bpf_devs_lock);
210
+}
211
+
175212 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
176213 {
177214 struct bpf_prog_offload *offload = prog->aux->offload;
178
- struct netdev_bpf data = {};
179
-
180
- data.offload.prog = prog;
181215
182216 if (offload->dev_state)
183
- WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
217
+ offload->offdev->ops->destroy(prog);
184218
185219 /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
186220 bpf_prog_free_id(prog, true);
....@@ -192,24 +226,22 @@
192226
193227 void bpf_prog_offload_destroy(struct bpf_prog *prog)
194228 {
195
- rtnl_lock();
196229 down_write(&bpf_devs_lock);
197230 if (prog->aux->offload)
198231 __bpf_prog_offload_destroy(prog);
199232 up_write(&bpf_devs_lock);
200
- rtnl_unlock();
201233 }
202234
203235 static int bpf_prog_offload_translate(struct bpf_prog *prog)
204236 {
205
- struct netdev_bpf data = {};
206
- int ret;
237
+ struct bpf_prog_offload *offload;
238
+ int ret = -ENODEV;
207239
208
- data.offload.prog = prog;
209
-
210
- rtnl_lock();
211
- ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
212
- rtnl_unlock();
240
+ down_read(&bpf_devs_lock);
241
+ offload = prog->aux->offload;
242
+ if (offload)
243
+ ret = offload->offdev->ops->translate(prog);
244
+ up_read(&bpf_devs_lock);
213245
214246 return ret;
215247 }
....@@ -270,14 +302,14 @@
270302 struct inode *ns_inode;
271303 struct path ns_path;
272304 char __user *uinsns;
273
- void *res;
305
+ int res;
274306 u32 ulen;
275307
276308 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
277
- if (IS_ERR(res)) {
309
+ if (res) {
278310 if (!info->ifindex)
279311 return -ENODEV;
280
- return PTR_ERR(res);
312
+ return res;
281313 }
282314
283315 down_read(&bpf_devs_lock);
....@@ -494,13 +526,13 @@
494526 };
495527 struct inode *ns_inode;
496528 struct path ns_path;
497
- void *res;
529
+ int res;
498530
499531 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
500
- if (IS_ERR(res)) {
532
+ if (res) {
501533 if (!info->ifindex)
502534 return -ENODEV;
503
- return PTR_ERR(res);
535
+ return res;
504536 }
505537
506538 ns_inode = ns_path.dentry->d_inode;
....@@ -637,7 +669,8 @@
637669 }
638670 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
639671
640
-struct bpf_offload_dev *bpf_offload_dev_create(void)
672
+struct bpf_offload_dev *
673
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
641674 {
642675 struct bpf_offload_dev *offdev;
643676 int err;
....@@ -657,6 +690,8 @@
657690 if (!offdev)
658691 return ERR_PTR(-ENOMEM);
659692
693
+ offdev->ops = ops;
694
+ offdev->priv = priv;
660695 INIT_LIST_HEAD(&offdev->netdevs);
661696
662697 return offdev;
....@@ -669,3 +704,9 @@
669704 kfree(offdev);
670705 }
671706 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
707
+
708
+void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
709
+{
710
+ return offdev->priv;
711
+}
712
+EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);