forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/net/sched/cls_flower.c
....@@ -1,12 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/cls_flower.c Flower classifier
34 *
45 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or
9
- * (at your option) any later version.
106 */
117
128 #include <linux/kernel.h>
....@@ -14,6 +10,7 @@
1410 #include <linux/module.h>
1511 #include <linux/rhashtable.h>
1612 #include <linux/workqueue.h>
13
+#include <linux/refcount.h>
1714
1815 #include <linux/if_ether.h>
1916 #include <linux/in6.h>
....@@ -25,12 +22,21 @@
2522 #include <net/ip.h>
2623 #include <net/flow_dissector.h>
2724 #include <net/geneve.h>
25
+#include <net/vxlan.h>
26
+#include <net/erspan.h>
2827
2928 #include <net/dst.h>
3029 #include <net/dst_metadata.h>
3130
31
+#include <uapi/linux/netfilter/nf_conntrack_common.h>
32
+
33
+#define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34
+ ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35
+#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36
+ (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37
+
3238 struct fl_flow_key {
33
- int indev_ifindex;
39
+ struct flow_dissector_key_meta meta;
3440 struct flow_dissector_key_control control;
3541 struct flow_dissector_key_control enc_control;
3642 struct flow_dissector_key_basic basic;
....@@ -55,6 +61,15 @@
5561 struct flow_dissector_key_ip ip;
5662 struct flow_dissector_key_ip enc_ip;
5763 struct flow_dissector_key_enc_opts enc_opts;
64
+ union {
65
+ struct flow_dissector_key_ports tp;
66
+ struct {
67
+ struct flow_dissector_key_ports tp_min;
68
+ struct flow_dissector_key_ports tp_max;
69
+ };
70
+ } tp_range;
71
+ struct flow_dissector_key_ct ct;
72
+ struct flow_dissector_key_hash hash;
5873 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
5974
6075 struct fl_flow_mask_range {
....@@ -65,6 +80,7 @@
6580 struct fl_flow_mask {
6681 struct fl_flow_key key;
6782 struct fl_flow_mask_range range;
83
+ u32 flags;
6884 struct rhash_head ht_node;
6985 struct rhashtable ht;
7086 struct rhashtable_params filter_ht_params;
....@@ -72,6 +88,7 @@
7288 struct list_head filters;
7389 struct rcu_work rwork;
7490 struct list_head list;
91
+ refcount_t refcnt;
7592 };
7693
7794 struct fl_flow_tmplt {
....@@ -83,7 +100,9 @@
83100
84101 struct cls_fl_head {
85102 struct rhashtable ht;
103
+ spinlock_t masks_lock; /* Protect masks list */
86104 struct list_head masks;
105
+ struct list_head hw_filters;
87106 struct rcu_work rwork;
88107 struct idr handle_idr;
89108 };
....@@ -96,11 +115,18 @@
96115 struct tcf_result res;
97116 struct fl_flow_key key;
98117 struct list_head list;
118
+ struct list_head hw_list;
99119 u32 handle;
100120 u32 flags;
101
- unsigned int in_hw_count;
121
+ u32 in_hw_count;
102122 struct rcu_work rwork;
103123 struct net_device *hw_dev;
124
+ /* Flower classifier is unlocked, which means that its reference counter
125
+ * can be changed concurrently without any kind of external
126
+ * synchronization. Use atomic reference counter to be concurrency-safe.
127
+ */
128
+ refcount_t refcnt;
129
+ bool deleted;
104130 };
105131
106132 static const struct rhashtable_params mask_ht_params = {
....@@ -179,37 +205,129 @@
179205 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
180206 }
181207
182
-static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
183
- struct fl_flow_key *mkey)
208
+static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209
+ struct fl_flow_key *key,
210
+ struct fl_flow_key *mkey)
211
+{
212
+ u16 min_mask, max_mask, min_val, max_val;
213
+
214
+ min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215
+ max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216
+ min_val = ntohs(filter->key.tp_range.tp_min.dst);
217
+ max_val = ntohs(filter->key.tp_range.tp_max.dst);
218
+
219
+ if (min_mask && max_mask) {
220
+ if (ntohs(key->tp_range.tp.dst) < min_val ||
221
+ ntohs(key->tp_range.tp.dst) > max_val)
222
+ return false;
223
+
224
+ /* skb does not have min and max values */
225
+ mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226
+ mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227
+ }
228
+ return true;
229
+}
230
+
231
+static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232
+ struct fl_flow_key *key,
233
+ struct fl_flow_key *mkey)
234
+{
235
+ u16 min_mask, max_mask, min_val, max_val;
236
+
237
+ min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238
+ max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239
+ min_val = ntohs(filter->key.tp_range.tp_min.src);
240
+ max_val = ntohs(filter->key.tp_range.tp_max.src);
241
+
242
+ if (min_mask && max_mask) {
243
+ if (ntohs(key->tp_range.tp.src) < min_val ||
244
+ ntohs(key->tp_range.tp.src) > max_val)
245
+ return false;
246
+
247
+ /* skb does not have min and max values */
248
+ mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249
+ mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250
+ }
251
+ return true;
252
+}
253
+
254
+static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255
+ struct fl_flow_key *mkey)
184256 {
185257 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
186258 mask->filter_ht_params);
187259 }
188260
261
+static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262
+ struct fl_flow_key *mkey,
263
+ struct fl_flow_key *key)
264
+{
265
+ struct cls_fl_filter *filter, *f;
266
+
267
+ list_for_each_entry_rcu(filter, &mask->filters, list) {
268
+ if (!fl_range_port_dst_cmp(filter, key, mkey))
269
+ continue;
270
+
271
+ if (!fl_range_port_src_cmp(filter, key, mkey))
272
+ continue;
273
+
274
+ f = __fl_lookup(mask, mkey);
275
+ if (f)
276
+ return f;
277
+ }
278
+ return NULL;
279
+}
280
+
281
+static noinline_for_stack
282
+struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283
+{
284
+ struct fl_flow_key mkey;
285
+
286
+ fl_set_masked_key(&mkey, key, mask);
287
+ if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288
+ return fl_lookup_range(mask, &mkey, key);
289
+
290
+ return __fl_lookup(mask, &mkey);
291
+}
292
+
293
+static u16 fl_ct_info_to_flower_map[] = {
294
+ [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296
+ [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297
+ TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298
+ [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
300
+ [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301
+ TCA_FLOWER_KEY_CT_FLAGS_RELATED,
302
+ [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303
+ TCA_FLOWER_KEY_CT_FLAGS_NEW,
304
+};
305
+
189306 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
190307 struct tcf_result *res)
191308 {
192309 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
193
- struct cls_fl_filter *f;
194
- struct fl_flow_mask *mask;
195310 struct fl_flow_key skb_key;
196
- struct fl_flow_key skb_mkey;
311
+ struct fl_flow_mask *mask;
312
+ struct cls_fl_filter *f;
197313
198314 list_for_each_entry_rcu(mask, &head->masks, list) {
199315 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
200316 fl_clear_masked_range(&skb_key, mask);
201317
202
- skb_key.indev_ifindex = skb->skb_iif;
318
+ skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
203319 /* skb_flow_dissect() does not set n_proto in case an unknown
204320 * protocol, so do it rather here.
205321 */
206322 skb_key.basic.n_proto = skb_protocol(skb, false);
207323 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
324
+ skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
325
+ fl_ct_info_to_flower_map,
326
+ ARRAY_SIZE(fl_ct_info_to_flower_map));
327
+ skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
208328 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
209329
210
- fl_set_masked_key(&skb_mkey, &skb_key, mask);
211
-
212
- f = fl_lookup(mask, &skb_mkey);
330
+ f = fl_mask_lookup(mask, &skb_key);
213331 if (f && !tc_skip_sw(f->flags)) {
214332 *res = f->res;
215333 return tcf_exts_exec(skb, &f->exts, res);
....@@ -226,16 +344,22 @@
226344 if (!head)
227345 return -ENOBUFS;
228346
347
+ spin_lock_init(&head->masks_lock);
229348 INIT_LIST_HEAD_RCU(&head->masks);
349
+ INIT_LIST_HEAD(&head->hw_filters);
230350 rcu_assign_pointer(tp->root, head);
231351 idr_init(&head->handle_idr);
232352
233353 return rhashtable_init(&head->ht, &mask_ht_params);
234354 }
235355
236
-static void fl_mask_free(struct fl_flow_mask *mask)
356
+static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
237357 {
238
- rhashtable_destroy(&mask->ht);
358
+ /* temporary masks don't have their filters list and ht initialized */
359
+ if (mask_init_done) {
360
+ WARN_ON(!list_empty(&mask->filters));
361
+ rhashtable_destroy(&mask->ht);
362
+ }
239363 kfree(mask);
240364 }
241365
....@@ -244,23 +368,41 @@
244368 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
245369 struct fl_flow_mask, rwork);
246370
247
- fl_mask_free(mask);
371
+ fl_mask_free(mask, true);
248372 }
249373
250
-static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
251
- bool async)
374
+static void fl_uninit_mask_free_work(struct work_struct *work)
252375 {
253
- if (!list_empty(&mask->filters))
376
+ struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377
+ struct fl_flow_mask, rwork);
378
+
379
+ fl_mask_free(mask, false);
380
+}
381
+
382
+static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
383
+{
384
+ if (!refcount_dec_and_test(&mask->refcnt))
254385 return false;
255386
256387 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
388
+
389
+ spin_lock(&head->masks_lock);
257390 list_del_rcu(&mask->list);
258
- if (async)
259
- tcf_queue_work(&mask->rwork, fl_mask_free_work);
260
- else
261
- fl_mask_free(mask);
391
+ spin_unlock(&head->masks_lock);
392
+
393
+ tcf_queue_work(&mask->rwork, fl_mask_free_work);
262394
263395 return true;
396
+}
397
+
398
+static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
399
+{
400
+ /* Flower classifier only changes root pointer during init and destroy.
401
+ * Users must obtain reference to tcf_proto instance before calling its
402
+ * API, so tp->root pointer is protected from concurrent call to
403
+ * fl_destroy() by reference counting.
404
+ */
405
+ return rcu_dereference_raw(tp->root);
264406 }
265407
266408 static void __fl_destroy_filter(struct cls_fl_filter *f)
....@@ -275,52 +417,63 @@
275417 struct cls_fl_filter *f = container_of(to_rcu_work(work),
276418 struct cls_fl_filter, rwork);
277419
278
- rtnl_lock();
279420 __fl_destroy_filter(f);
280
- rtnl_unlock();
281421 }
282422
283423 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
284
- struct netlink_ext_ack *extack)
424
+ bool rtnl_held, struct netlink_ext_ack *extack)
285425 {
286
- struct tc_cls_flower_offload cls_flower = {};
287426 struct tcf_block *block = tp->chain->block;
427
+ struct flow_cls_offload cls_flower = {};
288428
289429 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
290
- cls_flower.command = TC_CLSFLOWER_DESTROY;
430
+ cls_flower.command = FLOW_CLS_DESTROY;
291431 cls_flower.cookie = (unsigned long) f;
292432
293
- tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
294
- &cls_flower, false);
295
- tcf_block_offload_dec(block, &f->flags);
433
+ tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
434
+ &f->flags, &f->in_hw_count, rtnl_held);
435
+
296436 }
297437
298438 static int fl_hw_replace_filter(struct tcf_proto *tp,
299
- struct cls_fl_filter *f,
439
+ struct cls_fl_filter *f, bool rtnl_held,
300440 struct netlink_ext_ack *extack)
301441 {
302
- struct tc_cls_flower_offload cls_flower = {};
303442 struct tcf_block *block = tp->chain->block;
443
+ struct flow_cls_offload cls_flower = {};
304444 bool skip_sw = tc_skip_sw(f->flags);
305
- int err;
445
+ int err = 0;
446
+
447
+ cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
448
+ if (!cls_flower.rule)
449
+ return -ENOMEM;
306450
307451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
308
- cls_flower.command = TC_CLSFLOWER_REPLACE;
452
+ cls_flower.command = FLOW_CLS_REPLACE;
309453 cls_flower.cookie = (unsigned long) f;
310
- cls_flower.dissector = &f->mask->dissector;
311
- cls_flower.mask = &f->mask->key;
312
- cls_flower.key = &f->mkey;
313
- cls_flower.exts = &f->exts;
454
+ cls_flower.rule->match.dissector = &f->mask->dissector;
455
+ cls_flower.rule->match.mask = &f->mask->key;
456
+ cls_flower.rule->match.key = &f->mkey;
314457 cls_flower.classid = f->res.classid;
315458
316
- err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
317
- &cls_flower, skip_sw);
318
- if (err < 0) {
319
- fl_hw_destroy_filter(tp, f, NULL);
459
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
460
+ if (err) {
461
+ kfree(cls_flower.rule);
462
+ if (skip_sw) {
463
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
464
+ return err;
465
+ }
466
+ return 0;
467
+ }
468
+
469
+ err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
470
+ skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
471
+ tc_cleanup_flow_action(&cls_flower.rule->action);
472
+ kfree(cls_flower.rule);
473
+
474
+ if (err) {
475
+ fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
320476 return err;
321
- } else if (err > 0) {
322
- f->in_hw_count = err;
323
- tcf_block_offload_inc(block, &f->flags);
324477 }
325478
326479 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
....@@ -329,40 +482,80 @@
329482 return 0;
330483 }
331484
332
-static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
485
+static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
486
+ bool rtnl_held)
333487 {
334
- struct tc_cls_flower_offload cls_flower = {};
335488 struct tcf_block *block = tp->chain->block;
489
+ struct flow_cls_offload cls_flower = {};
336490
337491 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
338
- cls_flower.command = TC_CLSFLOWER_STATS;
492
+ cls_flower.command = FLOW_CLS_STATS;
339493 cls_flower.cookie = (unsigned long) f;
340
- cls_flower.exts = &f->exts;
341494 cls_flower.classid = f->res.classid;
342495
343
- tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
344
- &cls_flower, false);
496
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
497
+ rtnl_held);
498
+
499
+ tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
500
+ cls_flower.stats.pkts,
501
+ cls_flower.stats.drops,
502
+ cls_flower.stats.lastused,
503
+ cls_flower.stats.used_hw_stats,
504
+ cls_flower.stats.used_hw_stats_valid);
345505 }
346506
347
-static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
348
- struct netlink_ext_ack *extack)
507
+static void __fl_put(struct cls_fl_filter *f)
349508 {
350
- struct cls_fl_head *head = rtnl_dereference(tp->root);
351
- bool async = tcf_exts_get_net(&f->exts);
352
- bool last;
509
+ if (!refcount_dec_and_test(&f->refcnt))
510
+ return;
353511
354
- idr_remove(&head->handle_idr, f->handle);
355
- list_del_rcu(&f->list);
356
- last = fl_mask_put(head, f->mask, async);
357
- if (!tc_skip_hw(f->flags))
358
- fl_hw_destroy_filter(tp, f, extack);
359
- tcf_unbind_filter(tp, &f->res);
360
- if (async)
512
+ if (tcf_exts_get_net(&f->exts))
361513 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
362514 else
363515 __fl_destroy_filter(f);
516
+}
364517
365
- return last;
518
+static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
519
+{
520
+ struct cls_fl_filter *f;
521
+
522
+ rcu_read_lock();
523
+ f = idr_find(&head->handle_idr, handle);
524
+ if (f && !refcount_inc_not_zero(&f->refcnt))
525
+ f = NULL;
526
+ rcu_read_unlock();
527
+
528
+ return f;
529
+}
530
+
531
+static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
532
+ bool *last, bool rtnl_held,
533
+ struct netlink_ext_ack *extack)
534
+{
535
+ struct cls_fl_head *head = fl_head_dereference(tp);
536
+
537
+ *last = false;
538
+
539
+ spin_lock(&tp->lock);
540
+ if (f->deleted) {
541
+ spin_unlock(&tp->lock);
542
+ return -ENOENT;
543
+ }
544
+
545
+ f->deleted = true;
546
+ rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
547
+ f->mask->filter_ht_params);
548
+ idr_remove(&head->handle_idr, f->handle);
549
+ list_del_rcu(&f->list);
550
+ spin_unlock(&tp->lock);
551
+
552
+ *last = fl_mask_put(head, f->mask);
553
+ if (!tc_skip_hw(f->flags))
554
+ fl_hw_destroy_filter(tp, f, rtnl_held, extack);
555
+ tcf_unbind_filter(tp, &f->res);
556
+ __fl_put(f);
557
+
558
+ return 0;
366559 }
367560
368561 static void fl_destroy_sleepable(struct work_struct *work)
....@@ -376,15 +569,18 @@
376569 module_put(THIS_MODULE);
377570 }
378571
379
-static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
572
+static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
573
+ struct netlink_ext_ack *extack)
380574 {
381
- struct cls_fl_head *head = rtnl_dereference(tp->root);
575
+ struct cls_fl_head *head = fl_head_dereference(tp);
382576 struct fl_flow_mask *mask, *next_mask;
383577 struct cls_fl_filter *f, *next;
578
+ bool last;
384579
385580 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
386581 list_for_each_entry_safe(f, next, &mask->filters, list) {
387
- if (__fl_delete(tp, f, extack))
582
+ __fl_delete(tp, f, &last, rtnl_held, extack);
583
+ if (last)
388584 break;
389585 }
390586 }
....@@ -394,11 +590,18 @@
394590 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
395591 }
396592
593
+static void fl_put(struct tcf_proto *tp, void *arg)
594
+{
595
+ struct cls_fl_filter *f = arg;
596
+
597
+ __fl_put(f);
598
+}
599
+
397600 static void *fl_get(struct tcf_proto *tp, u32 handle)
398601 {
399
- struct cls_fl_head *head = rtnl_dereference(tp->root);
602
+ struct cls_fl_head *head = fl_head_dereference(tp);
400603
401
- return idr_find(&head->handle_idr, handle);
604
+ return __fl_get(head, handle);
402605 }
403606
404607 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
....@@ -472,6 +675,7 @@
472675 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
473676 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
474677 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
678
+ [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
475679 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
476680 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
477681 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
....@@ -487,12 +691,31 @@
487691 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
488692 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
489693 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
694
+ [TCA_FLOWER_KEY_CT_STATE] =
695
+ NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
696
+ [TCA_FLOWER_KEY_CT_STATE_MASK] =
697
+ NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
698
+ [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
699
+ [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
700
+ [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
701
+ [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
702
+ [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
703
+ .len = 128 / BITS_PER_BYTE },
704
+ [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
705
+ .len = 128 / BITS_PER_BYTE },
490706 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
707
+ [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
708
+ [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
709
+
491710 };
492711
493712 static const struct nla_policy
494713 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
714
+ [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
715
+ .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
495716 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
717
+ [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
718
+ [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
496719 };
497720
498721 static const struct nla_policy
....@@ -503,50 +726,271 @@
503726 .len = 128 },
504727 };
505728
729
+static const struct nla_policy
730
+vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
731
+ [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
732
+};
733
+
734
+static const struct nla_policy
735
+erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
736
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
737
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
738
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
739
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
740
+};
741
+
742
+static const struct nla_policy
743
+mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
744
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
745
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
746
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
747
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
748
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
749
+};
750
+
506751 static void fl_set_key_val(struct nlattr **tb,
507752 void *val, int val_type,
508753 void *mask, int mask_type, int len)
509754 {
510755 if (!tb[val_type])
511756 return;
512
- memcpy(val, nla_data(tb[val_type]), len);
757
+ nla_memcpy(val, tb[val_type], len);
513758 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
514759 memset(mask, 0xff, len);
515760 else
516
- memcpy(mask, nla_data(tb[mask_type]), len);
761
+ nla_memcpy(mask, tb[mask_type], len);
762
+}
763
+
764
+static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
765
+ struct fl_flow_key *mask,
766
+ struct netlink_ext_ack *extack)
767
+{
768
+ fl_set_key_val(tb, &key->tp_range.tp_min.dst,
769
+ TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
770
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
771
+ fl_set_key_val(tb, &key->tp_range.tp_max.dst,
772
+ TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
773
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
774
+ fl_set_key_val(tb, &key->tp_range.tp_min.src,
775
+ TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
776
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
777
+ fl_set_key_val(tb, &key->tp_range.tp_max.src,
778
+ TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
779
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
780
+
781
+ if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
782
+ ntohs(key->tp_range.tp_max.dst) <=
783
+ ntohs(key->tp_range.tp_min.dst)) {
784
+ NL_SET_ERR_MSG_ATTR(extack,
785
+ tb[TCA_FLOWER_KEY_PORT_DST_MIN],
786
+ "Invalid destination port range (min must be strictly smaller than max)");
787
+ return -EINVAL;
788
+ }
789
+ if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
790
+ ntohs(key->tp_range.tp_max.src) <=
791
+ ntohs(key->tp_range.tp_min.src)) {
792
+ NL_SET_ERR_MSG_ATTR(extack,
793
+ tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
794
+ "Invalid source port range (min must be strictly smaller than max)");
795
+ return -EINVAL;
796
+ }
797
+
798
+ return 0;
799
+}
800
+
801
+static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
802
+ struct flow_dissector_key_mpls *key_val,
803
+ struct flow_dissector_key_mpls *key_mask,
804
+ struct netlink_ext_ack *extack)
805
+{
806
+ struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
807
+ struct flow_dissector_mpls_lse *lse_mask;
808
+ struct flow_dissector_mpls_lse *lse_val;
809
+ u8 lse_index;
810
+ u8 depth;
811
+ int err;
812
+
813
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
814
+ mpls_stack_entry_policy, extack);
815
+ if (err < 0)
816
+ return err;
817
+
818
+ if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
819
+ NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
820
+ return -EINVAL;
821
+ }
822
+
823
+ depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
824
+
825
+ /* LSE depth starts at 1, for consistency with terminology used by
826
+ * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
827
+ */
828
+ if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
829
+ NL_SET_ERR_MSG_ATTR(extack,
830
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
831
+ "Invalid MPLS depth");
832
+ return -EINVAL;
833
+ }
834
+ lse_index = depth - 1;
835
+
836
+ dissector_set_mpls_lse(key_val, lse_index);
837
+ dissector_set_mpls_lse(key_mask, lse_index);
838
+
839
+ lse_val = &key_val->ls[lse_index];
840
+ lse_mask = &key_mask->ls[lse_index];
841
+
842
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
843
+ lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
844
+ lse_mask->mpls_ttl = MPLS_TTL_MASK;
845
+ }
846
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
847
+ u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
848
+
849
+ if (bos & ~MPLS_BOS_MASK) {
850
+ NL_SET_ERR_MSG_ATTR(extack,
851
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
852
+ "Bottom Of Stack (BOS) must be 0 or 1");
853
+ return -EINVAL;
854
+ }
855
+ lse_val->mpls_bos = bos;
856
+ lse_mask->mpls_bos = MPLS_BOS_MASK;
857
+ }
858
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
859
+ u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
860
+
861
+ if (tc & ~MPLS_TC_MASK) {
862
+ NL_SET_ERR_MSG_ATTR(extack,
863
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
864
+ "Traffic Class (TC) must be between 0 and 7");
865
+ return -EINVAL;
866
+ }
867
+ lse_val->mpls_tc = tc;
868
+ lse_mask->mpls_tc = MPLS_TC_MASK;
869
+ }
870
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
871
+ u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
872
+
873
+ if (label & ~MPLS_LABEL_MASK) {
874
+ NL_SET_ERR_MSG_ATTR(extack,
875
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
876
+ "Label must be between 0 and 1048575");
877
+ return -EINVAL;
878
+ }
879
+ lse_val->mpls_label = label;
880
+ lse_mask->mpls_label = MPLS_LABEL_MASK;
881
+ }
882
+
883
+ return 0;
884
+}
885
+
886
+static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
887
+ struct flow_dissector_key_mpls *key_val,
888
+ struct flow_dissector_key_mpls *key_mask,
889
+ struct netlink_ext_ack *extack)
890
+{
891
+ struct nlattr *nla_lse;
892
+ int rem;
893
+ int err;
894
+
895
+ if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
896
+ NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
897
+ "NLA_F_NESTED is missing");
898
+ return -EINVAL;
899
+ }
900
+
901
+ nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
902
+ if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
903
+ NL_SET_ERR_MSG_ATTR(extack, nla_lse,
904
+ "Invalid MPLS option type");
905
+ return -EINVAL;
906
+ }
907
+
908
+ err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
909
+ if (err < 0)
910
+ return err;
911
+ }
912
+ if (rem) {
913
+ NL_SET_ERR_MSG(extack,
914
+ "Bytes leftover after parsing MPLS options");
915
+ return -EINVAL;
916
+ }
917
+
918
+ return 0;
517919 }
518920
519921 static int fl_set_key_mpls(struct nlattr **tb,
520922 struct flow_dissector_key_mpls *key_val,
521
- struct flow_dissector_key_mpls *key_mask)
923
+ struct flow_dissector_key_mpls *key_mask,
924
+ struct netlink_ext_ack *extack)
522925 {
926
+ struct flow_dissector_mpls_lse *lse_mask;
927
+ struct flow_dissector_mpls_lse *lse_val;
928
+
929
+ if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
930
+ if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
931
+ tb[TCA_FLOWER_KEY_MPLS_BOS] ||
932
+ tb[TCA_FLOWER_KEY_MPLS_TC] ||
933
+ tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
934
+ NL_SET_ERR_MSG_ATTR(extack,
935
+ tb[TCA_FLOWER_KEY_MPLS_OPTS],
936
+ "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
937
+ return -EBADMSG;
938
+ }
939
+
940
+ return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
941
+ key_val, key_mask, extack);
942
+ }
943
+
944
+ lse_val = &key_val->ls[0];
945
+ lse_mask = &key_mask->ls[0];
946
+
523947 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
524
- key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
525
- key_mask->mpls_ttl = MPLS_TTL_MASK;
948
+ lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
949
+ lse_mask->mpls_ttl = MPLS_TTL_MASK;
950
+ dissector_set_mpls_lse(key_val, 0);
951
+ dissector_set_mpls_lse(key_mask, 0);
526952 }
527953 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
528954 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
529955
530
- if (bos & ~MPLS_BOS_MASK)
956
+ if (bos & ~MPLS_BOS_MASK) {
957
+ NL_SET_ERR_MSG_ATTR(extack,
958
+ tb[TCA_FLOWER_KEY_MPLS_BOS],
959
+ "Bottom Of Stack (BOS) must be 0 or 1");
531960 return -EINVAL;
532
- key_val->mpls_bos = bos;
533
- key_mask->mpls_bos = MPLS_BOS_MASK;
961
+ }
962
+ lse_val->mpls_bos = bos;
963
+ lse_mask->mpls_bos = MPLS_BOS_MASK;
964
+ dissector_set_mpls_lse(key_val, 0);
965
+ dissector_set_mpls_lse(key_mask, 0);
534966 }
535967 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
536968 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
537969
538
- if (tc & ~MPLS_TC_MASK)
970
+ if (tc & ~MPLS_TC_MASK) {
971
+ NL_SET_ERR_MSG_ATTR(extack,
972
+ tb[TCA_FLOWER_KEY_MPLS_TC],
973
+ "Traffic Class (TC) must be between 0 and 7");
539974 return -EINVAL;
540
- key_val->mpls_tc = tc;
541
- key_mask->mpls_tc = MPLS_TC_MASK;
975
+ }
976
+ lse_val->mpls_tc = tc;
977
+ lse_mask->mpls_tc = MPLS_TC_MASK;
978
+ dissector_set_mpls_lse(key_val, 0);
979
+ dissector_set_mpls_lse(key_mask, 0);
542980 }
543981 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
544982 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
545983
546
- if (label & ~MPLS_LABEL_MASK)
984
+ if (label & ~MPLS_LABEL_MASK) {
985
+ NL_SET_ERR_MSG_ATTR(extack,
986
+ tb[TCA_FLOWER_KEY_MPLS_LABEL],
987
+ "Label must be between 0 and 1048575");
547988 return -EINVAL;
548
- key_val->mpls_label = label;
549
- key_mask->mpls_label = MPLS_LABEL_MASK;
989
+ }
990
+ lse_val->mpls_label = label;
991
+ lse_mask->mpls_label = MPLS_LABEL_MASK;
992
+ dissector_set_mpls_lse(key_val, 0);
993
+ dissector_set_mpls_lse(key_mask, 0);
550994 }
551995 return 0;
552996 }
....@@ -554,6 +998,7 @@
554998 static void fl_set_key_vlan(struct nlattr **tb,
555999 __be16 ethertype,
5561000 int vlan_id_key, int vlan_prio_key,
1001
+ int vlan_next_eth_type_key,
5571002 struct flow_dissector_key_vlan *key_val,
5581003 struct flow_dissector_key_vlan *key_mask)
5591004 {
....@@ -572,6 +1017,11 @@
5721017 }
5731018 key_val->vlan_tpid = ethertype;
5741019 key_mask->vlan_tpid = cpu_to_be16(~0);
1020
+ if (tb[vlan_next_eth_type_key]) {
1021
+ key_val->vlan_eth_type =
1022
+ nla_get_be16(tb[vlan_next_eth_type_key]);
1023
+ key_mask->vlan_eth_type = cpu_to_be16(~0);
1024
+ }
5751025 }
5761026
5771027 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
....@@ -585,14 +1035,16 @@
5851035 }
5861036 }
5871037
588
-static int fl_set_key_flags(struct nlattr **tb,
589
- u32 *flags_key, u32 *flags_mask)
1038
+static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1039
+ u32 *flags_mask, struct netlink_ext_ack *extack)
5901040 {
5911041 u32 key, mask;
5921042
5931043 /* mask is mandatory for flags */
594
- if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
1044
+ if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1045
+ NL_SET_ERR_MSG(extack, "Missing flags mask");
5951046 return -EINVAL;
1047
+ }
5961048
5971049 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
5981050 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
....@@ -650,8 +1102,9 @@
6501102 return -EINVAL;
6511103 }
6521104
653
- err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
654
- nla, geneve_opt_policy, extack);
1105
+ err = nla_parse_nested_deprecated(tb,
1106
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1107
+ nla, geneve_opt_policy, extack);
6551108 if (err < 0)
6561109 return err;
6571110
....@@ -706,6 +1159,108 @@
7061159 return sizeof(struct geneve_opt) + data_len;
7071160 }
7081161
1162
+static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1163
+ int depth, int option_len,
1164
+ struct netlink_ext_ack *extack)
1165
+{
1166
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1167
+ struct vxlan_metadata *md;
1168
+ int err;
1169
+
1170
+ md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1171
+ memset(md, 0xff, sizeof(*md));
1172
+
1173
+ if (!depth)
1174
+ return sizeof(*md);
1175
+
1176
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1177
+ NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1178
+ return -EINVAL;
1179
+ }
1180
+
1181
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1182
+ vxlan_opt_policy, extack);
1183
+ if (err < 0)
1184
+ return err;
1185
+
1186
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1187
+ NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1188
+ return -EINVAL;
1189
+ }
1190
+
1191
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1192
+ md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1193
+ md->gbp &= VXLAN_GBP_MASK;
1194
+ }
1195
+
1196
+ return sizeof(*md);
1197
+}
1198
+
1199
+static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1200
+ int depth, int option_len,
1201
+ struct netlink_ext_ack *extack)
1202
+{
1203
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1204
+ struct erspan_metadata *md;
1205
+ int err;
1206
+
1207
+ md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1208
+ memset(md, 0xff, sizeof(*md));
1209
+ md->version = 1;
1210
+
1211
+ if (!depth)
1212
+ return sizeof(*md);
1213
+
1214
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1215
+ NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1216
+ return -EINVAL;
1217
+ }
1218
+
1219
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1220
+ erspan_opt_policy, extack);
1221
+ if (err < 0)
1222
+ return err;
1223
+
1224
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1225
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1226
+ return -EINVAL;
1227
+ }
1228
+
1229
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1230
+ md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1231
+
1232
+ if (md->version == 1) {
1233
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1234
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1235
+ return -EINVAL;
1236
+ }
1237
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1238
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1239
+ memset(&md->u, 0x00, sizeof(md->u));
1240
+ md->u.index = nla_get_be32(nla);
1241
+ }
1242
+ } else if (md->version == 2) {
1243
+ if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1244
+ !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1245
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1246
+ return -EINVAL;
1247
+ }
1248
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1249
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1250
+ md->u.md2.dir = nla_get_u8(nla);
1251
+ }
1252
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1253
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1254
+ set_hwid(&md->u.md2, nla_get_u8(nla));
1255
+ }
1256
+ } else {
1257
+ NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1258
+ return -EINVAL;
1259
+ }
1260
+
1261
+ return sizeof(*md);
1262
+}
1263
+
7091264 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
7101265 struct fl_flow_key *mask,
7111266 struct netlink_ext_ack *extack)
....@@ -713,29 +1268,38 @@
7131268 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
7141269 int err, option_len, key_depth, msk_depth = 0;
7151270
716
- err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
717
- TCA_FLOWER_KEY_ENC_OPTS_MAX,
718
- enc_opts_policy, extack);
1271
+ err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1272
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
1273
+ enc_opts_policy, extack);
7191274 if (err)
7201275 return err;
7211276
7221277 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
7231278
7241279 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
725
- err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
726
- TCA_FLOWER_KEY_ENC_OPTS_MAX,
727
- enc_opts_policy, extack);
1280
+ err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1281
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
1282
+ enc_opts_policy, extack);
7281283 if (err)
7291284 return err;
7301285
7311286 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
7321287 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1288
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
1289
+ NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1290
+ return -EINVAL;
1291
+ }
7331292 }
7341293
7351294 nla_for_each_attr(nla_opt_key, nla_enc_key,
7361295 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
7371296 switch (nla_type(nla_opt_key)) {
7381297 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1298
+ if (key->enc_opts.dst_opt_type &&
1299
+ key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1300
+ NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1301
+ return -EINVAL;
1302
+ }
7391303 option_len = 0;
7401304 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
7411305 option_len = fl_set_geneve_opt(nla_opt_key, key,
....@@ -760,14 +1324,153 @@
7601324 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
7611325 return -EINVAL;
7621326 }
1327
+ break;
1328
+ case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1329
+ if (key->enc_opts.dst_opt_type) {
1330
+ NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1331
+ return -EINVAL;
1332
+ }
1333
+ option_len = 0;
1334
+ key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1335
+ option_len = fl_set_vxlan_opt(nla_opt_key, key,
1336
+ key_depth, option_len,
1337
+ extack);
1338
+ if (option_len < 0)
1339
+ return option_len;
7631340
764
- if (msk_depth)
765
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1341
+ key->enc_opts.len += option_len;
1342
+ /* At the same time we need to parse through the mask
1343
+ * in order to verify exact and mask attribute lengths.
1344
+ */
1345
+ mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1346
+ option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1347
+ msk_depth, option_len,
1348
+ extack);
1349
+ if (option_len < 0)
1350
+ return option_len;
1351
+
1352
+ mask->enc_opts.len += option_len;
1353
+ if (key->enc_opts.len != mask->enc_opts.len) {
1354
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1355
+ return -EINVAL;
1356
+ }
1357
+ break;
1358
+ case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1359
+ if (key->enc_opts.dst_opt_type) {
1360
+ NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1361
+ return -EINVAL;
1362
+ }
1363
+ option_len = 0;
1364
+ key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1365
+ option_len = fl_set_erspan_opt(nla_opt_key, key,
1366
+ key_depth, option_len,
1367
+ extack);
1368
+ if (option_len < 0)
1369
+ return option_len;
1370
+
1371
+ key->enc_opts.len += option_len;
1372
+ /* At the same time we need to parse through the mask
1373
+ * in order to verify exact and mask attribute lengths.
1374
+ */
1375
+ mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1376
+ option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1377
+ msk_depth, option_len,
1378
+ extack);
1379
+ if (option_len < 0)
1380
+ return option_len;
1381
+
1382
+ mask->enc_opts.len += option_len;
1383
+ if (key->enc_opts.len != mask->enc_opts.len) {
1384
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1385
+ return -EINVAL;
1386
+ }
7661387 break;
7671388 default:
7681389 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
7691390 return -EINVAL;
7701391 }
1392
+
1393
+ if (!msk_depth)
1394
+ continue;
1395
+
1396
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
1397
+ NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1398
+ return -EINVAL;
1399
+ }
1400
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1401
+ }
1402
+
1403
+ return 0;
1404
+}
1405
+
1406
+static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1407
+ struct netlink_ext_ack *extack)
1408
+{
1409
+ if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1410
+ NL_SET_ERR_MSG_ATTR(extack, tb,
1411
+ "no trk, so no other flag can be set");
1412
+ return -EINVAL;
1413
+ }
1414
+
1415
+ if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1416
+ state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1417
+ NL_SET_ERR_MSG_ATTR(extack, tb,
1418
+ "new and est are mutually exclusive");
1419
+ return -EINVAL;
1420
+ }
1421
+
1422
+ return 0;
1423
+}
1424
+
1425
+static int fl_set_key_ct(struct nlattr **tb,
1426
+ struct flow_dissector_key_ct *key,
1427
+ struct flow_dissector_key_ct *mask,
1428
+ struct netlink_ext_ack *extack)
1429
+{
1430
+ if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1431
+ int err;
1432
+
1433
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1434
+ NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1435
+ return -EOPNOTSUPP;
1436
+ }
1437
+ fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1438
+ &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1439
+ sizeof(key->ct_state));
1440
+
1441
+ err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1442
+ tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1443
+ extack);
1444
+ if (err)
1445
+ return err;
1446
+
1447
+ }
1448
+ if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1449
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1450
+ NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1451
+ return -EOPNOTSUPP;
1452
+ }
1453
+ fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1454
+ &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1455
+ sizeof(key->ct_zone));
1456
+ }
1457
+ if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1458
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1459
+ NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1460
+ return -EOPNOTSUPP;
1461
+ }
1462
+ fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1463
+ &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1464
+ sizeof(key->ct_mark));
1465
+ }
1466
+ if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1467
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1468
+ NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1469
+ return -EOPNOTSUPP;
1470
+ }
1471
+ fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1472
+ mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1473
+ sizeof(key->ct_labels));
7711474 }
7721475
7731476 return 0;
....@@ -779,15 +1482,14 @@
7791482 {
7801483 __be16 ethertype;
7811484 int ret = 0;
782
-#ifdef CONFIG_NET_CLS_IND
1485
+
7831486 if (tb[TCA_FLOWER_INDEV]) {
7841487 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
7851488 if (err < 0)
7861489 return err;
787
- key->indev_ifindex = err;
788
- mask->indev_ifindex = 0xffffffff;
1490
+ key->meta.ingress_ifindex = err;
1491
+ mask->meta.ingress_ifindex = 0xffffffff;
7891492 }
790
-#endif
7911493
7921494 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
7931495 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
....@@ -801,8 +1503,9 @@
8011503
8021504 if (eth_type_vlan(ethertype)) {
8031505 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
804
- TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
805
- &mask->vlan);
1506
+ TCA_FLOWER_KEY_VLAN_PRIO,
1507
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1508
+ &key->vlan, &mask->vlan);
8061509
8071510 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
8081511 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
....@@ -810,6 +1513,7 @@
8101513 fl_set_key_vlan(tb, ethertype,
8111514 TCA_FLOWER_KEY_CVLAN_ID,
8121515 TCA_FLOWER_KEY_CVLAN_PRIO,
1516
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
8131517 &key->cvlan, &mask->cvlan);
8141518 fl_set_key_val(tb, &key->basic.n_proto,
8151519 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
....@@ -901,7 +1605,7 @@
9011605 sizeof(key->icmp.code));
9021606 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
9031607 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
904
- ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1608
+ ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
9051609 if (ret)
9061610 return ret;
9071611 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
....@@ -921,6 +1625,14 @@
9211625 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
9221626 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
9231627 sizeof(key->arp.tha));
1628
+ }
1629
+
1630
+ if (key->basic.ip_proto == IPPROTO_TCP ||
1631
+ key->basic.ip_proto == IPPROTO_UDP ||
1632
+ key->basic.ip_proto == IPPROTO_SCTP) {
1633
+ ret = fl_set_key_port_range(tb, key, mask, extack);
1634
+ if (ret)
1635
+ return ret;
9241636 }
9251637
9261638 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
....@@ -969,14 +1681,23 @@
9691681
9701682 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
9711683
1684
+ fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1685
+ &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1686
+ sizeof(key->hash.hash));
1687
+
9721688 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
9731689 ret = fl_set_enc_opt(tb, key, mask, extack);
9741690 if (ret)
9751691 return ret;
9761692 }
9771693
1694
+ ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1695
+ if (ret)
1696
+ return ret;
1697
+
9781698 if (tb[TCA_FLOWER_KEY_FLAGS])
979
- ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1699
+ ret = fl_set_key_flags(tb, &key->control.flags,
1700
+ &mask->control.flags, extack);
9801701
9811702 return ret;
9821703 }
....@@ -1007,7 +1728,7 @@
10071728 }
10081729
10091730 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1010
-#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
1731
+#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
10111732
10121733 #define FL_KEY_IS_MASKED(mask, member) \
10131734 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
....@@ -1032,6 +1753,8 @@
10321753 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
10331754 size_t cnt = 0;
10341755
1756
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1757
+ FLOW_DISSECTOR_KEY_META, meta);
10351758 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
10361759 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
10371760 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
....@@ -1042,6 +1765,8 @@
10421765 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
10431766 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
10441767 FLOW_DISSECTOR_KEY_PORTS, tp);
1768
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1769
+ FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
10451770 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
10461771 FLOW_DISSECTOR_KEY_IP, ip);
10471772 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
....@@ -1072,6 +1797,10 @@
10721797 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
10731798 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
10741799 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1800
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1801
+ FLOW_DISSECTOR_KEY_CT, ct);
1802
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1803
+ FLOW_DISSECTOR_KEY_HASH, hash);
10751804
10761805 skb_flow_dissector_init(dissector, keys, cnt);
10771806 }
....@@ -1088,6 +1817,12 @@
10881817
10891818 fl_mask_copy(newmask, mask);
10901819
1820
+ if ((newmask->key.tp_range.tp_min.dst &&
1821
+ newmask->key.tp_range.tp_max.dst) ||
1822
+ (newmask->key.tp_range.tp_min.src &&
1823
+ newmask->key.tp_range.tp_max.src))
1824
+ newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1825
+
10911826 err = fl_init_mask_hashtable(newmask);
10921827 if (err)
10931828 goto errout_free;
....@@ -1096,12 +1831,15 @@
10961831
10971832 INIT_LIST_HEAD_RCU(&newmask->filters);
10981833
1099
- err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
1100
- mask_ht_params);
1834
+ refcount_set(&newmask->refcnt, 1);
1835
+ err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1836
+ &newmask->ht_node, mask_ht_params);
11011837 if (err)
11021838 goto errout_destroy;
11031839
1840
+ spin_lock(&head->masks_lock);
11041841 list_add_tail_rcu(&newmask->list, &head->masks);
1842
+ spin_unlock(&head->masks_lock);
11051843
11061844 return newmask;
11071845
....@@ -1119,40 +1857,71 @@
11191857 struct fl_flow_mask *mask)
11201858 {
11211859 struct fl_flow_mask *newmask;
1860
+ int ret = 0;
11221861
1123
- fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
1862
+ rcu_read_lock();
1863
+
1864
+ /* Insert mask as temporary node to prevent concurrent creation of mask
1865
+ * with same key. Any concurrent lookups with same key will return
1866
+ * -EAGAIN because mask's refcnt is zero.
1867
+ */
1868
+ fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1869
+ &mask->ht_node,
1870
+ mask_ht_params);
11241871 if (!fnew->mask) {
1125
- if (fold)
1126
- return -EINVAL;
1872
+ rcu_read_unlock();
1873
+
1874
+ if (fold) {
1875
+ ret = -EINVAL;
1876
+ goto errout_cleanup;
1877
+ }
11271878
11281879 newmask = fl_create_new_mask(head, mask);
1129
- if (IS_ERR(newmask))
1130
- return PTR_ERR(newmask);
1880
+ if (IS_ERR(newmask)) {
1881
+ ret = PTR_ERR(newmask);
1882
+ goto errout_cleanup;
1883
+ }
11311884
11321885 fnew->mask = newmask;
1886
+ return 0;
1887
+ } else if (IS_ERR(fnew->mask)) {
1888
+ ret = PTR_ERR(fnew->mask);
11331889 } else if (fold && fold->mask != fnew->mask) {
1134
- return -EINVAL;
1890
+ ret = -EINVAL;
1891
+ } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1892
+ /* Mask was deleted concurrently, try again */
1893
+ ret = -EAGAIN;
11351894 }
1895
+ rcu_read_unlock();
1896
+ return ret;
11361897
1137
- return 0;
1898
+errout_cleanup:
1899
+ rhashtable_remove_fast(&head->ht, &mask->ht_node,
1900
+ mask_ht_params);
1901
+ return ret;
11381902 }
11391903
11401904 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
11411905 struct cls_fl_filter *f, struct fl_flow_mask *mask,
11421906 unsigned long base, struct nlattr **tb,
11431907 struct nlattr *est, bool ovr,
1144
- struct fl_flow_tmplt *tmplt,
1908
+ struct fl_flow_tmplt *tmplt, bool rtnl_held,
11451909 struct netlink_ext_ack *extack)
11461910 {
11471911 int err;
11481912
1149
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
1913
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1914
+ extack);
11501915 if (err < 0)
11511916 return err;
11521917
11531918 if (tb[TCA_FLOWER_CLASSID]) {
11541919 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1920
+ if (!rtnl_held)
1921
+ rtnl_lock();
11551922 tcf_bind_filter(tp, &f->res, base);
1923
+ if (!rtnl_held)
1924
+ rtnl_unlock();
11561925 }
11571926
11581927 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
....@@ -1170,24 +1939,52 @@
11701939 return 0;
11711940 }
11721941
1942
+static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1943
+ struct cls_fl_filter *fold,
1944
+ bool *in_ht)
1945
+{
1946
+ struct fl_flow_mask *mask = fnew->mask;
1947
+ int err;
1948
+
1949
+ err = rhashtable_lookup_insert_fast(&mask->ht,
1950
+ &fnew->ht_node,
1951
+ mask->filter_ht_params);
1952
+ if (err) {
1953
+ *in_ht = false;
1954
+ /* It is okay if filter with same key exists when
1955
+ * overwriting.
1956
+ */
1957
+ return fold && err == -EEXIST ? 0 : err;
1958
+ }
1959
+
1960
+ *in_ht = true;
1961
+ return 0;
1962
+}
1963
+
11731964 static int fl_change(struct net *net, struct sk_buff *in_skb,
11741965 struct tcf_proto *tp, unsigned long base,
11751966 u32 handle, struct nlattr **tca,
1176
- void **arg, bool ovr, struct netlink_ext_ack *extack)
1967
+ void **arg, bool ovr, bool rtnl_held,
1968
+ struct netlink_ext_ack *extack)
11771969 {
1178
- struct cls_fl_head *head = rtnl_dereference(tp->root);
1970
+ struct cls_fl_head *head = fl_head_dereference(tp);
11791971 struct cls_fl_filter *fold = *arg;
11801972 struct cls_fl_filter *fnew;
11811973 struct fl_flow_mask *mask;
11821974 struct nlattr **tb;
1975
+ bool in_ht;
11831976 int err;
11841977
1185
- if (!tca[TCA_OPTIONS])
1186
- return -EINVAL;
1978
+ if (!tca[TCA_OPTIONS]) {
1979
+ err = -EINVAL;
1980
+ goto errout_fold;
1981
+ }
11871982
11881983 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1189
- if (!mask)
1190
- return -ENOBUFS;
1984
+ if (!mask) {
1985
+ err = -ENOBUFS;
1986
+ goto errout_fold;
1987
+ }
11911988
11921989 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
11931990 if (!tb) {
....@@ -1195,8 +1992,8 @@
11951992 goto errout_mask_alloc;
11961993 }
11971994
1198
- err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1199
- fl_policy, NULL);
1995
+ err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1996
+ tca[TCA_OPTIONS], fl_policy, NULL);
12001997 if (err < 0)
12011998 goto errout_tb;
12021999
....@@ -1210,8 +2007,10 @@
12102007 err = -ENOBUFS;
12112008 goto errout_tb;
12122009 }
2010
+ INIT_LIST_HEAD(&fnew->hw_list);
2011
+ refcount_set(&fnew->refcnt, 1);
12132012
1214
- err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
2013
+ err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
12152014 if (err < 0)
12162015 goto errout;
12172016
....@@ -1225,7 +2024,7 @@
12252024 }
12262025
12272026 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1228
- tp->chain->tmplt_priv, extack);
2027
+ tp->chain->tmplt_priv, rtnl_held, extack);
12292028 if (err)
12302029 goto errout;
12312030
....@@ -1233,189 +2032,320 @@
12332032 if (err)
12342033 goto errout;
12352034
1236
- if (!handle) {
1237
- handle = 1;
1238
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1239
- INT_MAX, GFP_KERNEL);
1240
- } else if (!fold) {
1241
- /* user specifies a handle and it doesn't exist */
1242
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1243
- handle, GFP_KERNEL);
1244
- }
2035
+ err = fl_ht_insert_unique(fnew, fold, &in_ht);
12452036 if (err)
12462037 goto errout_mask;
1247
- fnew->handle = handle;
1248
-
1249
- if (!tc_skip_sw(fnew->flags)) {
1250
- if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
1251
- err = -EEXIST;
1252
- goto errout_idr;
1253
- }
1254
-
1255
- err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1256
- fnew->mask->filter_ht_params);
1257
- if (err)
1258
- goto errout_idr;
1259
- }
12602038
12612039 if (!tc_skip_hw(fnew->flags)) {
1262
- err = fl_hw_replace_filter(tp, fnew, extack);
2040
+ err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
12632041 if (err)
1264
- goto errout_mask;
2042
+ goto errout_ht;
12652043 }
12662044
12672045 if (!tc_in_hw(fnew->flags))
12682046 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
12692047
2048
+ spin_lock(&tp->lock);
2049
+
2050
+ /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2051
+ * proto again or create new one, if necessary.
2052
+ */
2053
+ if (tp->deleting) {
2054
+ err = -EAGAIN;
2055
+ goto errout_hw;
2056
+ }
2057
+
12702058 if (fold) {
1271
- if (!tc_skip_sw(fold->flags))
1272
- rhashtable_remove_fast(&fold->mask->ht,
1273
- &fold->ht_node,
1274
- fold->mask->filter_ht_params);
2059
+ /* Fold filter was deleted concurrently. Retry lookup. */
2060
+ if (fold->deleted) {
2061
+ err = -EAGAIN;
2062
+ goto errout_hw;
2063
+ }
2064
+
2065
+ fnew->handle = handle;
2066
+
2067
+ if (!in_ht) {
2068
+ struct rhashtable_params params =
2069
+ fnew->mask->filter_ht_params;
2070
+
2071
+ err = rhashtable_insert_fast(&fnew->mask->ht,
2072
+ &fnew->ht_node,
2073
+ params);
2074
+ if (err)
2075
+ goto errout_hw;
2076
+ in_ht = true;
2077
+ }
2078
+
2079
+ refcount_inc(&fnew->refcnt);
2080
+ rhashtable_remove_fast(&fold->mask->ht,
2081
+ &fold->ht_node,
2082
+ fold->mask->filter_ht_params);
2083
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
2084
+ list_replace_rcu(&fold->list, &fnew->list);
2085
+ fold->deleted = true;
2086
+
2087
+ spin_unlock(&tp->lock);
2088
+
2089
+ fl_mask_put(head, fold->mask);
12752090 if (!tc_skip_hw(fold->flags))
1276
- fl_hw_destroy_filter(tp, fold, NULL);
2091
+ fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2092
+ tcf_unbind_filter(tp, &fold->res);
2093
+ /* Caller holds reference to fold, so refcnt is always > 0
2094
+ * after this.
2095
+ */
2096
+ refcount_dec(&fold->refcnt);
2097
+ __fl_put(fold);
2098
+ } else {
2099
+ if (handle) {
2100
+ /* user specifies a handle and it doesn't exist */
2101
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2102
+ handle, GFP_ATOMIC);
2103
+
2104
+ /* Filter with specified handle was concurrently
2105
+ * inserted after initial check in cls_api. This is not
2106
+ * necessarily an error if NLM_F_EXCL is not set in
2107
+ * message flags. Returning EAGAIN will cause cls_api to
2108
+ * try to update concurrently inserted rule.
2109
+ */
2110
+ if (err == -ENOSPC)
2111
+ err = -EAGAIN;
2112
+ } else {
2113
+ handle = 1;
2114
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2115
+ INT_MAX, GFP_ATOMIC);
2116
+ }
2117
+ if (err)
2118
+ goto errout_hw;
2119
+
2120
+ refcount_inc(&fnew->refcnt);
2121
+ fnew->handle = handle;
2122
+ list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2123
+ spin_unlock(&tp->lock);
12772124 }
12782125
12792126 *arg = fnew;
12802127
1281
- if (fold) {
1282
- idr_replace(&head->handle_idr, fnew, fnew->handle);
1283
- list_replace_rcu(&fold->list, &fnew->list);
1284
- tcf_unbind_filter(tp, &fold->res);
1285
- tcf_exts_get_net(&fold->exts);
1286
- tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
1287
- } else {
1288
- list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1289
- }
1290
-
12912128 kfree(tb);
1292
- kfree(mask);
2129
+ tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
12932130 return 0;
12942131
1295
-errout_idr:
1296
- if (!fold)
1297
- idr_remove(&head->handle_idr, fnew->handle);
1298
-
2132
+errout_ht:
2133
+ spin_lock(&tp->lock);
2134
+errout_hw:
2135
+ fnew->deleted = true;
2136
+ spin_unlock(&tp->lock);
2137
+ if (!tc_skip_hw(fnew->flags))
2138
+ fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2139
+ if (in_ht)
2140
+ rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2141
+ fnew->mask->filter_ht_params);
12992142 errout_mask:
1300
- fl_mask_put(head, fnew->mask, false);
1301
-
2143
+ fl_mask_put(head, fnew->mask);
13022144 errout:
1303
- tcf_exts_destroy(&fnew->exts);
1304
- kfree(fnew);
2145
+ __fl_put(fnew);
13052146 errout_tb:
13062147 kfree(tb);
13072148 errout_mask_alloc:
1308
- kfree(mask);
2149
+ tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2150
+errout_fold:
2151
+ if (fold)
2152
+ __fl_put(fold);
13092153 return err;
13102154 }
13112155
13122156 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1313
- struct netlink_ext_ack *extack)
2157
+ bool rtnl_held, struct netlink_ext_ack *extack)
13142158 {
1315
- struct cls_fl_head *head = rtnl_dereference(tp->root);
2159
+ struct cls_fl_head *head = fl_head_dereference(tp);
13162160 struct cls_fl_filter *f = arg;
2161
+ bool last_on_mask;
2162
+ int err = 0;
13172163
1318
- if (!tc_skip_sw(f->flags))
1319
- rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
1320
- f->mask->filter_ht_params);
1321
- __fl_delete(tp, f, extack);
2164
+ err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
13222165 *last = list_empty(&head->masks);
1323
- return 0;
2166
+ __fl_put(f);
2167
+
2168
+ return err;
13242169 }
13252170
1326
-static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
2171
+static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2172
+ bool rtnl_held)
13272173 {
1328
- struct cls_fl_head *head = rtnl_dereference(tp->root);
2174
+ struct cls_fl_head *head = fl_head_dereference(tp);
2175
+ unsigned long id = arg->cookie, tmp;
13292176 struct cls_fl_filter *f;
13302177
13312178 arg->count = arg->skip;
13322179
1333
- while ((f = idr_get_next_ul(&head->handle_idr,
1334
- &arg->cookie)) != NULL) {
2180
+ rcu_read_lock();
2181
+ idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2182
+ /* don't return filters that are being deleted */
2183
+ if (!refcount_inc_not_zero(&f->refcnt))
2184
+ continue;
2185
+ rcu_read_unlock();
2186
+
13352187 if (arg->fn(tp, f, arg) < 0) {
2188
+ __fl_put(f);
13362189 arg->stop = 1;
2190
+ rcu_read_lock();
13372191 break;
13382192 }
1339
- arg->cookie = f->handle + 1;
2193
+ __fl_put(f);
13402194 arg->count++;
2195
+ rcu_read_lock();
13412196 }
2197
+ rcu_read_unlock();
2198
+ arg->cookie = id;
13422199 }
13432200
1344
-static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
2201
+static struct cls_fl_filter *
2202
+fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2203
+{
2204
+ struct cls_fl_head *head = fl_head_dereference(tp);
2205
+
2206
+ spin_lock(&tp->lock);
2207
+ if (list_empty(&head->hw_filters)) {
2208
+ spin_unlock(&tp->lock);
2209
+ return NULL;
2210
+ }
2211
+
2212
+ if (!f)
2213
+ f = list_entry(&head->hw_filters, struct cls_fl_filter,
2214
+ hw_list);
2215
+ list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2216
+ if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2217
+ spin_unlock(&tp->lock);
2218
+ return f;
2219
+ }
2220
+ }
2221
+
2222
+ spin_unlock(&tp->lock);
2223
+ return NULL;
2224
+}
2225
+
2226
+static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
13452227 void *cb_priv, struct netlink_ext_ack *extack)
13462228 {
1347
- struct cls_fl_head *head = rtnl_dereference(tp->root);
1348
- struct tc_cls_flower_offload cls_flower = {};
13492229 struct tcf_block *block = tp->chain->block;
1350
- struct fl_flow_mask *mask;
1351
- struct cls_fl_filter *f;
2230
+ struct flow_cls_offload cls_flower = {};
2231
+ struct cls_fl_filter *f = NULL;
13522232 int err;
13532233
1354
- list_for_each_entry(mask, &head->masks, list) {
1355
- list_for_each_entry(f, &mask->filters, list) {
1356
- if (tc_skip_hw(f->flags))
1357
- continue;
2234
+ /* hw_filters list can only be changed by hw offload functions after
2235
+ * obtaining rtnl lock. Make sure it is not changed while reoffload is
2236
+ * iterating it.
2237
+ */
2238
+ ASSERT_RTNL();
13582239
1359
- tc_cls_common_offload_init(&cls_flower.common, tp,
1360
- f->flags, extack);
1361
- cls_flower.command = add ?
1362
- TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1363
- cls_flower.cookie = (unsigned long)f;
1364
- cls_flower.dissector = &mask->dissector;
1365
- cls_flower.mask = &mask->key;
1366
- cls_flower.key = &f->mkey;
1367
- cls_flower.exts = &f->exts;
1368
- cls_flower.classid = f->res.classid;
1369
-
1370
- err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1371
- if (err) {
1372
- if (add && tc_skip_sw(f->flags))
1373
- return err;
1374
- continue;
1375
- }
1376
-
1377
- tc_cls_offload_cnt_update(block, &f->in_hw_count,
1378
- &f->flags, add);
2240
+ while ((f = fl_get_next_hw_filter(tp, f, add))) {
2241
+ cls_flower.rule =
2242
+ flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2243
+ if (!cls_flower.rule) {
2244
+ __fl_put(f);
2245
+ return -ENOMEM;
13792246 }
2247
+
2248
+ tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2249
+ extack);
2250
+ cls_flower.command = add ?
2251
+ FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2252
+ cls_flower.cookie = (unsigned long)f;
2253
+ cls_flower.rule->match.dissector = &f->mask->dissector;
2254
+ cls_flower.rule->match.mask = &f->mask->key;
2255
+ cls_flower.rule->match.key = &f->mkey;
2256
+
2257
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2258
+ if (err) {
2259
+ kfree(cls_flower.rule);
2260
+ if (tc_skip_sw(f->flags)) {
2261
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2262
+ __fl_put(f);
2263
+ return err;
2264
+ }
2265
+ goto next_flow;
2266
+ }
2267
+
2268
+ cls_flower.classid = f->res.classid;
2269
+
2270
+ err = tc_setup_cb_reoffload(block, tp, add, cb,
2271
+ TC_SETUP_CLSFLOWER, &cls_flower,
2272
+ cb_priv, &f->flags,
2273
+ &f->in_hw_count);
2274
+ tc_cleanup_flow_action(&cls_flower.rule->action);
2275
+ kfree(cls_flower.rule);
2276
+
2277
+ if (err) {
2278
+ __fl_put(f);
2279
+ return err;
2280
+ }
2281
+next_flow:
2282
+ __fl_put(f);
13802283 }
13812284
13822285 return 0;
13832286 }
13842287
1385
-static void fl_hw_create_tmplt(struct tcf_chain *chain,
1386
- struct fl_flow_tmplt *tmplt)
2288
+static void fl_hw_add(struct tcf_proto *tp, void *type_data)
13872289 {
1388
- struct tc_cls_flower_offload cls_flower = {};
2290
+ struct flow_cls_offload *cls_flower = type_data;
2291
+ struct cls_fl_filter *f =
2292
+ (struct cls_fl_filter *) cls_flower->cookie;
2293
+ struct cls_fl_head *head = fl_head_dereference(tp);
2294
+
2295
+ spin_lock(&tp->lock);
2296
+ list_add(&f->hw_list, &head->hw_filters);
2297
+ spin_unlock(&tp->lock);
2298
+}
2299
+
2300
+static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2301
+{
2302
+ struct flow_cls_offload *cls_flower = type_data;
2303
+ struct cls_fl_filter *f =
2304
+ (struct cls_fl_filter *) cls_flower->cookie;
2305
+
2306
+ spin_lock(&tp->lock);
2307
+ if (!list_empty(&f->hw_list))
2308
+ list_del_init(&f->hw_list);
2309
+ spin_unlock(&tp->lock);
2310
+}
2311
+
2312
+static int fl_hw_create_tmplt(struct tcf_chain *chain,
2313
+ struct fl_flow_tmplt *tmplt)
2314
+{
2315
+ struct flow_cls_offload cls_flower = {};
13892316 struct tcf_block *block = chain->block;
1390
- struct tcf_exts dummy_exts = { 0, };
2317
+
2318
+ cls_flower.rule = flow_rule_alloc(0);
2319
+ if (!cls_flower.rule)
2320
+ return -ENOMEM;
13912321
13922322 cls_flower.common.chain_index = chain->index;
1393
- cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
2323
+ cls_flower.command = FLOW_CLS_TMPLT_CREATE;
13942324 cls_flower.cookie = (unsigned long) tmplt;
1395
- cls_flower.dissector = &tmplt->dissector;
1396
- cls_flower.mask = &tmplt->mask;
1397
- cls_flower.key = &tmplt->dummy_key;
1398
- cls_flower.exts = &dummy_exts;
2325
+ cls_flower.rule->match.dissector = &tmplt->dissector;
2326
+ cls_flower.rule->match.mask = &tmplt->mask;
2327
+ cls_flower.rule->match.key = &tmplt->dummy_key;
13992328
14002329 /* We don't care if driver (any of them) fails to handle this
14012330 * call. It serves just as a hint for it.
14022331 */
1403
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
1404
- &cls_flower, false);
2332
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2333
+ kfree(cls_flower.rule);
2334
+
2335
+ return 0;
14052336 }
14062337
14072338 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
14082339 struct fl_flow_tmplt *tmplt)
14092340 {
1410
- struct tc_cls_flower_offload cls_flower = {};
2341
+ struct flow_cls_offload cls_flower = {};
14112342 struct tcf_block *block = chain->block;
14122343
14132344 cls_flower.common.chain_index = chain->index;
1414
- cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
2345
+ cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
14152346 cls_flower.cookie = (unsigned long) tmplt;
14162347
1417
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
1418
- &cls_flower, false);
2348
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
14192349 }
14202350
14212351 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
....@@ -1432,8 +2362,8 @@
14322362 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
14332363 if (!tb)
14342364 return ERR_PTR(-ENOBUFS);
1435
- err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1436
- fl_policy, NULL);
2365
+ err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2366
+ tca[TCA_OPTIONS], fl_policy, NULL);
14372367 if (err)
14382368 goto errout_tb;
14392369
....@@ -1446,12 +2376,14 @@
14462376 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
14472377 if (err)
14482378 goto errout_tmplt;
1449
- kfree(tb);
14502379
14512380 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
14522381
1453
- fl_hw_create_tmplt(chain, tmplt);
2382
+ err = fl_hw_create_tmplt(chain, tmplt);
2383
+ if (err)
2384
+ goto errout_tmplt;
14542385
2386
+ kfree(tb);
14552387 return tmplt;
14562388
14572389 errout_tmplt:
....@@ -1488,35 +2420,156 @@
14882420 return 0;
14892421 }
14902422
2423
+static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2424
+ struct fl_flow_key *mask)
2425
+{
2426
+ if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2427
+ TCA_FLOWER_KEY_PORT_DST_MIN,
2428
+ &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2429
+ sizeof(key->tp_range.tp_min.dst)) ||
2430
+ fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2431
+ TCA_FLOWER_KEY_PORT_DST_MAX,
2432
+ &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2433
+ sizeof(key->tp_range.tp_max.dst)) ||
2434
+ fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2435
+ TCA_FLOWER_KEY_PORT_SRC_MIN,
2436
+ &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2437
+ sizeof(key->tp_range.tp_min.src)) ||
2438
+ fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2439
+ TCA_FLOWER_KEY_PORT_SRC_MAX,
2440
+ &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2441
+ sizeof(key->tp_range.tp_max.src)))
2442
+ return -1;
2443
+
2444
+ return 0;
2445
+}
2446
+
2447
+static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2448
+ struct flow_dissector_key_mpls *mpls_key,
2449
+ struct flow_dissector_key_mpls *mpls_mask,
2450
+ u8 lse_index)
2451
+{
2452
+ struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2453
+ struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2454
+ int err;
2455
+
2456
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2457
+ lse_index + 1);
2458
+ if (err)
2459
+ return err;
2460
+
2461
+ if (lse_mask->mpls_ttl) {
2462
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2463
+ lse_key->mpls_ttl);
2464
+ if (err)
2465
+ return err;
2466
+ }
2467
+ if (lse_mask->mpls_bos) {
2468
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2469
+ lse_key->mpls_bos);
2470
+ if (err)
2471
+ return err;
2472
+ }
2473
+ if (lse_mask->mpls_tc) {
2474
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2475
+ lse_key->mpls_tc);
2476
+ if (err)
2477
+ return err;
2478
+ }
2479
+ if (lse_mask->mpls_label) {
2480
+ err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2481
+ lse_key->mpls_label);
2482
+ if (err)
2483
+ return err;
2484
+ }
2485
+
2486
+ return 0;
2487
+}
2488
+
2489
+static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2490
+ struct flow_dissector_key_mpls *mpls_key,
2491
+ struct flow_dissector_key_mpls *mpls_mask)
2492
+{
2493
+ struct nlattr *opts;
2494
+ struct nlattr *lse;
2495
+ u8 lse_index;
2496
+ int err;
2497
+
2498
+ opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2499
+ if (!opts)
2500
+ return -EMSGSIZE;
2501
+
2502
+ for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2503
+ if (!(mpls_mask->used_lses & 1 << lse_index))
2504
+ continue;
2505
+
2506
+ lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2507
+ if (!lse) {
2508
+ err = -EMSGSIZE;
2509
+ goto err_opts;
2510
+ }
2511
+
2512
+ err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2513
+ lse_index);
2514
+ if (err)
2515
+ goto err_opts_lse;
2516
+ nla_nest_end(skb, lse);
2517
+ }
2518
+ nla_nest_end(skb, opts);
2519
+
2520
+ return 0;
2521
+
2522
+err_opts_lse:
2523
+ nla_nest_cancel(skb, lse);
2524
+err_opts:
2525
+ nla_nest_cancel(skb, opts);
2526
+
2527
+ return err;
2528
+}
2529
+
14912530 static int fl_dump_key_mpls(struct sk_buff *skb,
14922531 struct flow_dissector_key_mpls *mpls_key,
14932532 struct flow_dissector_key_mpls *mpls_mask)
14942533 {
2534
+ struct flow_dissector_mpls_lse *lse_mask;
2535
+ struct flow_dissector_mpls_lse *lse_key;
14952536 int err;
14962537
1497
- if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2538
+ if (!mpls_mask->used_lses)
14982539 return 0;
1499
- if (mpls_mask->mpls_ttl) {
2540
+
2541
+ lse_mask = &mpls_mask->ls[0];
2542
+ lse_key = &mpls_key->ls[0];
2543
+
2544
+ /* For backward compatibility, don't use the MPLS nested attributes if
2545
+ * the rule can be expressed using the old attributes.
2546
+ */
2547
+ if (mpls_mask->used_lses & ~1 ||
2548
+ (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2549
+ !lse_mask->mpls_tc && !lse_mask->mpls_label))
2550
+ return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2551
+
2552
+ if (lse_mask->mpls_ttl) {
15002553 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1501
- mpls_key->mpls_ttl);
2554
+ lse_key->mpls_ttl);
15022555 if (err)
15032556 return err;
15042557 }
1505
- if (mpls_mask->mpls_tc) {
2558
+ if (lse_mask->mpls_tc) {
15062559 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1507
- mpls_key->mpls_tc);
2560
+ lse_key->mpls_tc);
15082561 if (err)
15092562 return err;
15102563 }
1511
- if (mpls_mask->mpls_label) {
2564
+ if (lse_mask->mpls_label) {
15122565 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1513
- mpls_key->mpls_label);
2566
+ lse_key->mpls_label);
15142567 if (err)
15152568 return err;
15162569 }
1517
- if (mpls_mask->mpls_bos) {
2570
+ if (lse_mask->mpls_bos) {
15182571 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1519
- mpls_key->mpls_bos);
2572
+ lse_key->mpls_bos);
15202573 if (err)
15212574 return err;
15222575 }
....@@ -1609,7 +2662,7 @@
16092662 struct nlattr *nest;
16102663 int opt_off = 0;
16112664
1612
- nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2665
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
16132666 if (!nest)
16142667 goto nla_put_failure;
16152668
....@@ -1636,6 +2689,95 @@
16362689 return -EMSGSIZE;
16372690 }
16382691
2692
+static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2693
+ struct flow_dissector_key_enc_opts *enc_opts)
2694
+{
2695
+ struct vxlan_metadata *md;
2696
+ struct nlattr *nest;
2697
+
2698
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2699
+ if (!nest)
2700
+ goto nla_put_failure;
2701
+
2702
+ md = (struct vxlan_metadata *)&enc_opts->data[0];
2703
+ if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2704
+ goto nla_put_failure;
2705
+
2706
+ nla_nest_end(skb, nest);
2707
+ return 0;
2708
+
2709
+nla_put_failure:
2710
+ nla_nest_cancel(skb, nest);
2711
+ return -EMSGSIZE;
2712
+}
2713
+
2714
+static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2715
+ struct flow_dissector_key_enc_opts *enc_opts)
2716
+{
2717
+ struct erspan_metadata *md;
2718
+ struct nlattr *nest;
2719
+
2720
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2721
+ if (!nest)
2722
+ goto nla_put_failure;
2723
+
2724
+ md = (struct erspan_metadata *)&enc_opts->data[0];
2725
+ if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2726
+ goto nla_put_failure;
2727
+
2728
+ if (md->version == 1 &&
2729
+ nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2730
+ goto nla_put_failure;
2731
+
2732
+ if (md->version == 2 &&
2733
+ (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2734
+ md->u.md2.dir) ||
2735
+ nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2736
+ get_hwid(&md->u.md2))))
2737
+ goto nla_put_failure;
2738
+
2739
+ nla_nest_end(skb, nest);
2740
+ return 0;
2741
+
2742
+nla_put_failure:
2743
+ nla_nest_cancel(skb, nest);
2744
+ return -EMSGSIZE;
2745
+}
2746
+
2747
+static int fl_dump_key_ct(struct sk_buff *skb,
2748
+ struct flow_dissector_key_ct *key,
2749
+ struct flow_dissector_key_ct *mask)
2750
+{
2751
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2752
+ fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2753
+ &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2754
+ sizeof(key->ct_state)))
2755
+ goto nla_put_failure;
2756
+
2757
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2758
+ fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2759
+ &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2760
+ sizeof(key->ct_zone)))
2761
+ goto nla_put_failure;
2762
+
2763
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2764
+ fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2765
+ &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2766
+ sizeof(key->ct_mark)))
2767
+ goto nla_put_failure;
2768
+
2769
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2770
+ fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2771
+ &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2772
+ sizeof(key->ct_labels)))
2773
+ goto nla_put_failure;
2774
+
2775
+ return 0;
2776
+
2777
+nla_put_failure:
2778
+ return -EMSGSIZE;
2779
+}
2780
+
16392781 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
16402782 struct flow_dissector_key_enc_opts *enc_opts)
16412783 {
....@@ -1645,13 +2787,23 @@
16452787 if (!enc_opts->len)
16462788 return 0;
16472789
1648
- nest = nla_nest_start(skb, enc_opt_type);
2790
+ nest = nla_nest_start_noflag(skb, enc_opt_type);
16492791 if (!nest)
16502792 goto nla_put_failure;
16512793
16522794 switch (enc_opts->dst_opt_type) {
16532795 case TUNNEL_GENEVE_OPT:
16542796 err = fl_dump_key_geneve_opt(skb, enc_opts);
2797
+ if (err)
2798
+ goto nla_put_failure;
2799
+ break;
2800
+ case TUNNEL_VXLAN_OPT:
2801
+ err = fl_dump_key_vxlan_opt(skb, enc_opts);
2802
+ if (err)
2803
+ goto nla_put_failure;
2804
+ break;
2805
+ case TUNNEL_ERSPAN_OPT:
2806
+ err = fl_dump_key_erspan_opt(skb, enc_opts);
16552807 if (err)
16562808 goto nla_put_failure;
16572809 break;
....@@ -1682,10 +2834,10 @@
16822834 static int fl_dump_key(struct sk_buff *skb, struct net *net,
16832835 struct fl_flow_key *key, struct fl_flow_key *mask)
16842836 {
1685
- if (mask->indev_ifindex) {
2837
+ if (mask->meta.ingress_ifindex) {
16862838 struct net_device *dev;
16872839
1688
- dev = __dev_get_by_index(net, key->indev_ifindex);
2840
+ dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
16892841 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
16902842 goto nla_put_failure;
16912843 }
....@@ -1717,13 +2869,13 @@
17172869 goto nla_put_failure;
17182870
17192871 if (mask->basic.n_proto) {
1720
- if (mask->cvlan.vlan_tpid) {
2872
+ if (mask->cvlan.vlan_eth_type) {
17212873 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
17222874 key->basic.n_proto))
17232875 goto nla_put_failure;
1724
- } else if (mask->vlan.vlan_tpid) {
2876
+ } else if (mask->vlan.vlan_eth_type) {
17252877 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1726
- key->basic.n_proto))
2878
+ key->vlan.vlan_eth_type))
17272879 goto nla_put_failure;
17282880 }
17292881 }
....@@ -1824,6 +2976,12 @@
18242976 sizeof(key->arp.tha))))
18252977 goto nla_put_failure;
18262978
2979
+ if ((key->basic.ip_proto == IPPROTO_TCP ||
2980
+ key->basic.ip_proto == IPPROTO_UDP ||
2981
+ key->basic.ip_proto == IPPROTO_SCTP) &&
2982
+ fl_dump_key_port_range(skb, key, mask))
2983
+ goto nla_put_failure;
2984
+
18272985 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
18282986 (fl_dump_key_val(skb, &key->enc_ipv4.src,
18292987 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
....@@ -1863,7 +3021,15 @@
18633021 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
18643022 goto nla_put_failure;
18653023
3024
+ if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3025
+ goto nla_put_failure;
3026
+
18663027 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3028
+ goto nla_put_failure;
3029
+
3030
+ if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3031
+ &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3032
+ sizeof(key->hash.hash)))
18673033 goto nla_put_failure;
18683034
18693035 return 0;
....@@ -1873,35 +3039,44 @@
18733039 }
18743040
18753041 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
1876
- struct sk_buff *skb, struct tcmsg *t)
3042
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
18773043 {
18783044 struct cls_fl_filter *f = fh;
18793045 struct nlattr *nest;
18803046 struct fl_flow_key *key, *mask;
3047
+ bool skip_hw;
18813048
18823049 if (!f)
18833050 return skb->len;
18843051
18853052 t->tcm_handle = f->handle;
18863053
1887
- nest = nla_nest_start(skb, TCA_OPTIONS);
3054
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
18883055 if (!nest)
18893056 goto nla_put_failure;
18903057
3058
+ spin_lock(&tp->lock);
3059
+
18913060 if (f->res.classid &&
18923061 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1893
- goto nla_put_failure;
3062
+ goto nla_put_failure_locked;
18943063
18953064 key = &f->key;
18963065 mask = &f->mask->key;
3066
+ skip_hw = tc_skip_hw(f->flags);
18973067
18983068 if (fl_dump_key(skb, net, key, mask))
1899
- goto nla_put_failure;
1900
-
1901
- if (!tc_skip_hw(f->flags))
1902
- fl_hw_update_stats(tp, f);
3069
+ goto nla_put_failure_locked;
19033070
19043071 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3072
+ goto nla_put_failure_locked;
3073
+
3074
+ spin_unlock(&tp->lock);
3075
+
3076
+ if (!skip_hw)
3077
+ fl_hw_update_stats(tp, f, rtnl_held);
3078
+
3079
+ if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
19053080 goto nla_put_failure;
19063081
19073082 if (tcf_exts_dump(skb, &f->exts))
....@@ -1914,6 +3089,50 @@
19143089
19153090 return skb->len;
19163091
3092
+nla_put_failure_locked:
3093
+ spin_unlock(&tp->lock);
3094
+nla_put_failure:
3095
+ nla_nest_cancel(skb, nest);
3096
+ return -1;
3097
+}
3098
+
3099
+static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3100
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3101
+{
3102
+ struct cls_fl_filter *f = fh;
3103
+ struct nlattr *nest;
3104
+ bool skip_hw;
3105
+
3106
+ if (!f)
3107
+ return skb->len;
3108
+
3109
+ t->tcm_handle = f->handle;
3110
+
3111
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3112
+ if (!nest)
3113
+ goto nla_put_failure;
3114
+
3115
+ spin_lock(&tp->lock);
3116
+
3117
+ skip_hw = tc_skip_hw(f->flags);
3118
+
3119
+ if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3120
+ goto nla_put_failure_locked;
3121
+
3122
+ spin_unlock(&tp->lock);
3123
+
3124
+ if (!skip_hw)
3125
+ fl_hw_update_stats(tp, f, rtnl_held);
3126
+
3127
+ if (tcf_exts_terse_dump(skb, &f->exts))
3128
+ goto nla_put_failure;
3129
+
3130
+ nla_nest_end(skb, nest);
3131
+
3132
+ return skb->len;
3133
+
3134
+nla_put_failure_locked:
3135
+ spin_unlock(&tp->lock);
19173136 nla_put_failure:
19183137 nla_nest_cancel(skb, nest);
19193138 return -1;
....@@ -1925,7 +3144,7 @@
19253144 struct fl_flow_key *key, *mask;
19263145 struct nlattr *nest;
19273146
1928
- nest = nla_nest_start(skb, TCA_OPTIONS);
3147
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
19293148 if (!nest)
19303149 goto nla_put_failure;
19313150
....@@ -1957,22 +3176,39 @@
19573176 }
19583177 }
19593178
3179
+static bool fl_delete_empty(struct tcf_proto *tp)
3180
+{
3181
+ struct cls_fl_head *head = fl_head_dereference(tp);
3182
+
3183
+ spin_lock(&tp->lock);
3184
+ tp->deleting = idr_is_empty(&head->handle_idr);
3185
+ spin_unlock(&tp->lock);
3186
+
3187
+ return tp->deleting;
3188
+}
3189
+
19603190 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
19613191 .kind = "flower",
19623192 .classify = fl_classify,
19633193 .init = fl_init,
19643194 .destroy = fl_destroy,
19653195 .get = fl_get,
3196
+ .put = fl_put,
19663197 .change = fl_change,
19673198 .delete = fl_delete,
3199
+ .delete_empty = fl_delete_empty,
19683200 .walk = fl_walk,
19693201 .reoffload = fl_reoffload,
3202
+ .hw_add = fl_hw_add,
3203
+ .hw_del = fl_hw_del,
19703204 .dump = fl_dump,
3205
+ .terse_dump = fl_terse_dump,
19713206 .bind_class = fl_bind_class,
19723207 .tmplt_create = fl_tmplt_create,
19733208 .tmplt_destroy = fl_tmplt_destroy,
19743209 .tmplt_dump = fl_tmplt_dump,
19753210 .owner = THIS_MODULE,
3211
+ .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
19763212 };
19773213
19783214 static int __init cls_fl_init(void)