hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sched/cls_flower.c
....@@ -1,12 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/cls_flower.c Flower classifier
34 *
45 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or
9
- * (at your option) any later version.
106 */
117
128 #include <linux/kernel.h>
....@@ -14,6 +10,7 @@
1410 #include <linux/module.h>
1511 #include <linux/rhashtable.h>
1612 #include <linux/workqueue.h>
13
+#include <linux/refcount.h>
1714
1815 #include <linux/if_ether.h>
1916 #include <linux/in6.h>
....@@ -25,12 +22,21 @@
2522 #include <net/ip.h>
2623 #include <net/flow_dissector.h>
2724 #include <net/geneve.h>
25
+#include <net/vxlan.h>
26
+#include <net/erspan.h>
2827
2928 #include <net/dst.h>
3029 #include <net/dst_metadata.h>
3130
31
+#include <uapi/linux/netfilter/nf_conntrack_common.h>
32
+
33
+#define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34
+ ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35
+#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36
+ (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37
+
3238 struct fl_flow_key {
33
- int indev_ifindex;
39
+ struct flow_dissector_key_meta meta;
3440 struct flow_dissector_key_control control;
3541 struct flow_dissector_key_control enc_control;
3642 struct flow_dissector_key_basic basic;
....@@ -55,6 +61,15 @@
5561 struct flow_dissector_key_ip ip;
5662 struct flow_dissector_key_ip enc_ip;
5763 struct flow_dissector_key_enc_opts enc_opts;
64
+ union {
65
+ struct flow_dissector_key_ports tp;
66
+ struct {
67
+ struct flow_dissector_key_ports tp_min;
68
+ struct flow_dissector_key_ports tp_max;
69
+ };
70
+ } tp_range;
71
+ struct flow_dissector_key_ct ct;
72
+ struct flow_dissector_key_hash hash;
5873 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
5974
6075 struct fl_flow_mask_range {
....@@ -65,6 +80,7 @@
6580 struct fl_flow_mask {
6681 struct fl_flow_key key;
6782 struct fl_flow_mask_range range;
83
+ u32 flags;
6884 struct rhash_head ht_node;
6985 struct rhashtable ht;
7086 struct rhashtable_params filter_ht_params;
....@@ -72,6 +88,7 @@
7288 struct list_head filters;
7389 struct rcu_work rwork;
7490 struct list_head list;
91
+ refcount_t refcnt;
7592 };
7693
7794 struct fl_flow_tmplt {
....@@ -83,7 +100,9 @@
83100
84101 struct cls_fl_head {
85102 struct rhashtable ht;
103
+ spinlock_t masks_lock; /* Protect masks list */
86104 struct list_head masks;
105
+ struct list_head hw_filters;
87106 struct rcu_work rwork;
88107 struct idr handle_idr;
89108 };
....@@ -96,11 +115,18 @@
96115 struct tcf_result res;
97116 struct fl_flow_key key;
98117 struct list_head list;
118
+ struct list_head hw_list;
99119 u32 handle;
100120 u32 flags;
101
- unsigned int in_hw_count;
121
+ u32 in_hw_count;
102122 struct rcu_work rwork;
103123 struct net_device *hw_dev;
124
+ /* Flower classifier is unlocked, which means that its reference counter
125
+ * can be changed concurrently without any kind of external
126
+ * synchronization. Use atomic reference counter to be concurrency-safe.
127
+ */
128
+ refcount_t refcnt;
129
+ bool deleted;
104130 };
105131
106132 static const struct rhashtable_params mask_ht_params = {
....@@ -179,37 +205,129 @@
179205 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
180206 }
181207
182
-static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
183
- struct fl_flow_key *mkey)
208
+static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209
+ struct fl_flow_key *key,
210
+ struct fl_flow_key *mkey)
211
+{
212
+ u16 min_mask, max_mask, min_val, max_val;
213
+
214
+ min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215
+ max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216
+ min_val = ntohs(filter->key.tp_range.tp_min.dst);
217
+ max_val = ntohs(filter->key.tp_range.tp_max.dst);
218
+
219
+ if (min_mask && max_mask) {
220
+ if (ntohs(key->tp_range.tp.dst) < min_val ||
221
+ ntohs(key->tp_range.tp.dst) > max_val)
222
+ return false;
223
+
224
+ /* skb does not have min and max values */
225
+ mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226
+ mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227
+ }
228
+ return true;
229
+}
230
+
231
+static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232
+ struct fl_flow_key *key,
233
+ struct fl_flow_key *mkey)
234
+{
235
+ u16 min_mask, max_mask, min_val, max_val;
236
+
237
+ min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238
+ max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239
+ min_val = ntohs(filter->key.tp_range.tp_min.src);
240
+ max_val = ntohs(filter->key.tp_range.tp_max.src);
241
+
242
+ if (min_mask && max_mask) {
243
+ if (ntohs(key->tp_range.tp.src) < min_val ||
244
+ ntohs(key->tp_range.tp.src) > max_val)
245
+ return false;
246
+
247
+ /* skb does not have min and max values */
248
+ mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249
+ mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250
+ }
251
+ return true;
252
+}
253
+
254
+static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255
+ struct fl_flow_key *mkey)
184256 {
185257 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
186258 mask->filter_ht_params);
187259 }
188260
261
+static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262
+ struct fl_flow_key *mkey,
263
+ struct fl_flow_key *key)
264
+{
265
+ struct cls_fl_filter *filter, *f;
266
+
267
+ list_for_each_entry_rcu(filter, &mask->filters, list) {
268
+ if (!fl_range_port_dst_cmp(filter, key, mkey))
269
+ continue;
270
+
271
+ if (!fl_range_port_src_cmp(filter, key, mkey))
272
+ continue;
273
+
274
+ f = __fl_lookup(mask, mkey);
275
+ if (f)
276
+ return f;
277
+ }
278
+ return NULL;
279
+}
280
+
281
+static noinline_for_stack
282
+struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283
+{
284
+ struct fl_flow_key mkey;
285
+
286
+ fl_set_masked_key(&mkey, key, mask);
287
+ if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288
+ return fl_lookup_range(mask, &mkey, key);
289
+
290
+ return __fl_lookup(mask, &mkey);
291
+}
292
+
293
+static u16 fl_ct_info_to_flower_map[] = {
294
+ [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296
+ [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297
+ TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298
+ [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
300
+ [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301
+ TCA_FLOWER_KEY_CT_FLAGS_RELATED,
302
+ [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303
+ TCA_FLOWER_KEY_CT_FLAGS_NEW,
304
+};
305
+
189306 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
190307 struct tcf_result *res)
191308 {
192309 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
193
- struct cls_fl_filter *f;
194
- struct fl_flow_mask *mask;
195310 struct fl_flow_key skb_key;
196
- struct fl_flow_key skb_mkey;
311
+ struct fl_flow_mask *mask;
312
+ struct cls_fl_filter *f;
197313
198314 list_for_each_entry_rcu(mask, &head->masks, list) {
199315 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
200316 fl_clear_masked_range(&skb_key, mask);
201317
202
- skb_key.indev_ifindex = skb->skb_iif;
318
+ skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
203319 /* skb_flow_dissect() does not set n_proto in case an unknown
204320 * protocol, so do it rather here.
205321 */
206322 skb_key.basic.n_proto = skb_protocol(skb, false);
207323 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
324
+ skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
325
+ fl_ct_info_to_flower_map,
326
+ ARRAY_SIZE(fl_ct_info_to_flower_map));
327
+ skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
208328 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
209329
210
- fl_set_masked_key(&skb_mkey, &skb_key, mask);
211
-
212
- f = fl_lookup(mask, &skb_mkey);
330
+ f = fl_mask_lookup(mask, &skb_key);
213331 if (f && !tc_skip_sw(f->flags)) {
214332 *res = f->res;
215333 return tcf_exts_exec(skb, &f->exts, res);
....@@ -226,16 +344,22 @@
226344 if (!head)
227345 return -ENOBUFS;
228346
347
+ spin_lock_init(&head->masks_lock);
229348 INIT_LIST_HEAD_RCU(&head->masks);
349
+ INIT_LIST_HEAD(&head->hw_filters);
230350 rcu_assign_pointer(tp->root, head);
231351 idr_init(&head->handle_idr);
232352
233353 return rhashtable_init(&head->ht, &mask_ht_params);
234354 }
235355
236
-static void fl_mask_free(struct fl_flow_mask *mask)
356
+static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
237357 {
238
- rhashtable_destroy(&mask->ht);
358
+ /* temporary masks don't have their filters list and ht initialized */
359
+ if (mask_init_done) {
360
+ WARN_ON(!list_empty(&mask->filters));
361
+ rhashtable_destroy(&mask->ht);
362
+ }
239363 kfree(mask);
240364 }
241365
....@@ -244,23 +368,41 @@
244368 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
245369 struct fl_flow_mask, rwork);
246370
247
- fl_mask_free(mask);
371
+ fl_mask_free(mask, true);
248372 }
249373
250
-static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
251
- bool async)
374
+static void fl_uninit_mask_free_work(struct work_struct *work)
252375 {
253
- if (!list_empty(&mask->filters))
376
+ struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377
+ struct fl_flow_mask, rwork);
378
+
379
+ fl_mask_free(mask, false);
380
+}
381
+
382
+static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
383
+{
384
+ if (!refcount_dec_and_test(&mask->refcnt))
254385 return false;
255386
256387 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
388
+
389
+ spin_lock(&head->masks_lock);
257390 list_del_rcu(&mask->list);
258
- if (async)
259
- tcf_queue_work(&mask->rwork, fl_mask_free_work);
260
- else
261
- fl_mask_free(mask);
391
+ spin_unlock(&head->masks_lock);
392
+
393
+ tcf_queue_work(&mask->rwork, fl_mask_free_work);
262394
263395 return true;
396
+}
397
+
398
+static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
399
+{
400
+ /* Flower classifier only changes root pointer during init and destroy.
401
+ * Users must obtain reference to tcf_proto instance before calling its
402
+ * API, so tp->root pointer is protected from concurrent call to
403
+ * fl_destroy() by reference counting.
404
+ */
405
+ return rcu_dereference_raw(tp->root);
264406 }
265407
266408 static void __fl_destroy_filter(struct cls_fl_filter *f)
....@@ -275,52 +417,63 @@
275417 struct cls_fl_filter *f = container_of(to_rcu_work(work),
276418 struct cls_fl_filter, rwork);
277419
278
- rtnl_lock();
279420 __fl_destroy_filter(f);
280
- rtnl_unlock();
281421 }
282422
283423 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
284
- struct netlink_ext_ack *extack)
424
+ bool rtnl_held, struct netlink_ext_ack *extack)
285425 {
286
- struct tc_cls_flower_offload cls_flower = {};
287426 struct tcf_block *block = tp->chain->block;
427
+ struct flow_cls_offload cls_flower = {};
288428
289429 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
290
- cls_flower.command = TC_CLSFLOWER_DESTROY;
430
+ cls_flower.command = FLOW_CLS_DESTROY;
291431 cls_flower.cookie = (unsigned long) f;
292432
293
- tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
294
- &cls_flower, false);
295
- tcf_block_offload_dec(block, &f->flags);
433
+ tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
434
+ &f->flags, &f->in_hw_count, rtnl_held);
435
+
296436 }
297437
298438 static int fl_hw_replace_filter(struct tcf_proto *tp,
299
- struct cls_fl_filter *f,
439
+ struct cls_fl_filter *f, bool rtnl_held,
300440 struct netlink_ext_ack *extack)
301441 {
302
- struct tc_cls_flower_offload cls_flower = {};
303442 struct tcf_block *block = tp->chain->block;
443
+ struct flow_cls_offload cls_flower = {};
304444 bool skip_sw = tc_skip_sw(f->flags);
305
- int err;
445
+ int err = 0;
446
+
447
+ cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
448
+ if (!cls_flower.rule)
449
+ return -ENOMEM;
306450
307451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
308
- cls_flower.command = TC_CLSFLOWER_REPLACE;
452
+ cls_flower.command = FLOW_CLS_REPLACE;
309453 cls_flower.cookie = (unsigned long) f;
310
- cls_flower.dissector = &f->mask->dissector;
311
- cls_flower.mask = &f->mask->key;
312
- cls_flower.key = &f->mkey;
313
- cls_flower.exts = &f->exts;
454
+ cls_flower.rule->match.dissector = &f->mask->dissector;
455
+ cls_flower.rule->match.mask = &f->mask->key;
456
+ cls_flower.rule->match.key = &f->mkey;
314457 cls_flower.classid = f->res.classid;
315458
316
- err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
317
- &cls_flower, skip_sw);
318
- if (err < 0) {
319
- fl_hw_destroy_filter(tp, f, NULL);
459
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
460
+ if (err) {
461
+ kfree(cls_flower.rule);
462
+ if (skip_sw) {
463
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
464
+ return err;
465
+ }
466
+ return 0;
467
+ }
468
+
469
+ err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
470
+ skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
471
+ tc_cleanup_flow_action(&cls_flower.rule->action);
472
+ kfree(cls_flower.rule);
473
+
474
+ if (err) {
475
+ fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
320476 return err;
321
- } else if (err > 0) {
322
- f->in_hw_count = err;
323
- tcf_block_offload_inc(block, &f->flags);
324477 }
325478
326479 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
....@@ -329,40 +482,80 @@
329482 return 0;
330483 }
331484
332
-static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
485
+static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
486
+ bool rtnl_held)
333487 {
334
- struct tc_cls_flower_offload cls_flower = {};
335488 struct tcf_block *block = tp->chain->block;
489
+ struct flow_cls_offload cls_flower = {};
336490
337491 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
338
- cls_flower.command = TC_CLSFLOWER_STATS;
492
+ cls_flower.command = FLOW_CLS_STATS;
339493 cls_flower.cookie = (unsigned long) f;
340
- cls_flower.exts = &f->exts;
341494 cls_flower.classid = f->res.classid;
342495
343
- tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
344
- &cls_flower, false);
496
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
497
+ rtnl_held);
498
+
499
+ tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
500
+ cls_flower.stats.pkts,
501
+ cls_flower.stats.drops,
502
+ cls_flower.stats.lastused,
503
+ cls_flower.stats.used_hw_stats,
504
+ cls_flower.stats.used_hw_stats_valid);
345505 }
346506
347
-static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
348
- struct netlink_ext_ack *extack)
507
+static void __fl_put(struct cls_fl_filter *f)
349508 {
350
- struct cls_fl_head *head = rtnl_dereference(tp->root);
351
- bool async = tcf_exts_get_net(&f->exts);
352
- bool last;
509
+ if (!refcount_dec_and_test(&f->refcnt))
510
+ return;
353511
354
- idr_remove(&head->handle_idr, f->handle);
355
- list_del_rcu(&f->list);
356
- last = fl_mask_put(head, f->mask, async);
357
- if (!tc_skip_hw(f->flags))
358
- fl_hw_destroy_filter(tp, f, extack);
359
- tcf_unbind_filter(tp, &f->res);
360
- if (async)
512
+ if (tcf_exts_get_net(&f->exts))
361513 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
362514 else
363515 __fl_destroy_filter(f);
516
+}
364517
365
- return last;
518
+static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
519
+{
520
+ struct cls_fl_filter *f;
521
+
522
+ rcu_read_lock();
523
+ f = idr_find(&head->handle_idr, handle);
524
+ if (f && !refcount_inc_not_zero(&f->refcnt))
525
+ f = NULL;
526
+ rcu_read_unlock();
527
+
528
+ return f;
529
+}
530
+
531
+static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
532
+ bool *last, bool rtnl_held,
533
+ struct netlink_ext_ack *extack)
534
+{
535
+ struct cls_fl_head *head = fl_head_dereference(tp);
536
+
537
+ *last = false;
538
+
539
+ spin_lock(&tp->lock);
540
+ if (f->deleted) {
541
+ spin_unlock(&tp->lock);
542
+ return -ENOENT;
543
+ }
544
+
545
+ f->deleted = true;
546
+ rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
547
+ f->mask->filter_ht_params);
548
+ idr_remove(&head->handle_idr, f->handle);
549
+ list_del_rcu(&f->list);
550
+ spin_unlock(&tp->lock);
551
+
552
+ *last = fl_mask_put(head, f->mask);
553
+ if (!tc_skip_hw(f->flags))
554
+ fl_hw_destroy_filter(tp, f, rtnl_held, extack);
555
+ tcf_unbind_filter(tp, &f->res);
556
+ __fl_put(f);
557
+
558
+ return 0;
366559 }
367560
368561 static void fl_destroy_sleepable(struct work_struct *work)
....@@ -376,15 +569,18 @@
376569 module_put(THIS_MODULE);
377570 }
378571
379
-static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
572
+static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
573
+ struct netlink_ext_ack *extack)
380574 {
381
- struct cls_fl_head *head = rtnl_dereference(tp->root);
575
+ struct cls_fl_head *head = fl_head_dereference(tp);
382576 struct fl_flow_mask *mask, *next_mask;
383577 struct cls_fl_filter *f, *next;
578
+ bool last;
384579
385580 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
386581 list_for_each_entry_safe(f, next, &mask->filters, list) {
387
- if (__fl_delete(tp, f, extack))
582
+ __fl_delete(tp, f, &last, rtnl_held, extack);
583
+ if (last)
388584 break;
389585 }
390586 }
....@@ -394,11 +590,18 @@
394590 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
395591 }
396592
593
+static void fl_put(struct tcf_proto *tp, void *arg)
594
+{
595
+ struct cls_fl_filter *f = arg;
596
+
597
+ __fl_put(f);
598
+}
599
+
397600 static void *fl_get(struct tcf_proto *tp, u32 handle)
398601 {
399
- struct cls_fl_head *head = rtnl_dereference(tp->root);
602
+ struct cls_fl_head *head = fl_head_dereference(tp);
400603
401
- return idr_find(&head->handle_idr, handle);
604
+ return __fl_get(head, handle);
402605 }
403606
404607 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
....@@ -472,6 +675,7 @@
472675 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
473676 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
474677 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
678
+ [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
475679 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
476680 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
477681 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
....@@ -487,12 +691,31 @@
487691 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
488692 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
489693 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
694
+ [TCA_FLOWER_KEY_CT_STATE] =
695
+ NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
696
+ [TCA_FLOWER_KEY_CT_STATE_MASK] =
697
+ NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
698
+ [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
699
+ [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
700
+ [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
701
+ [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
702
+ [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
703
+ .len = 128 / BITS_PER_BYTE },
704
+ [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
705
+ .len = 128 / BITS_PER_BYTE },
490706 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
707
+ [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
708
+ [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
709
+
491710 };
492711
493712 static const struct nla_policy
494713 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
714
+ [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
715
+ .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
495716 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
717
+ [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
718
+ [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
496719 };
497720
498721 static const struct nla_policy
....@@ -503,50 +726,281 @@
503726 .len = 128 },
504727 };
505728
729
+static const struct nla_policy
730
+vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
731
+ [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
732
+};
733
+
734
+static const struct nla_policy
735
+erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
736
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
737
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
738
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
739
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
740
+};
741
+
742
+static const struct nla_policy
743
+mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
744
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
745
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
746
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
747
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
748
+ [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
749
+};
750
+
506751 static void fl_set_key_val(struct nlattr **tb,
507752 void *val, int val_type,
508753 void *mask, int mask_type, int len)
509754 {
510755 if (!tb[val_type])
511756 return;
512
- memcpy(val, nla_data(tb[val_type]), len);
757
+ nla_memcpy(val, tb[val_type], len);
513758 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
514759 memset(mask, 0xff, len);
515760 else
516
- memcpy(mask, nla_data(tb[mask_type]), len);
761
+ nla_memcpy(mask, tb[mask_type], len);
762
+}
763
+
764
+static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
765
+ struct fl_flow_key *mask,
766
+ struct netlink_ext_ack *extack)
767
+{
768
+ fl_set_key_val(tb, &key->tp_range.tp_min.dst,
769
+ TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
770
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
771
+ fl_set_key_val(tb, &key->tp_range.tp_max.dst,
772
+ TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
773
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
774
+ fl_set_key_val(tb, &key->tp_range.tp_min.src,
775
+ TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
776
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
777
+ fl_set_key_val(tb, &key->tp_range.tp_max.src,
778
+ TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
779
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
780
+
781
+ if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
782
+ NL_SET_ERR_MSG(extack,
783
+ "Both min and max destination ports must be specified");
784
+ return -EINVAL;
785
+ }
786
+ if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
787
+ NL_SET_ERR_MSG(extack,
788
+ "Both min and max source ports must be specified");
789
+ return -EINVAL;
790
+ }
791
+ if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
792
+ ntohs(key->tp_range.tp_max.dst) <=
793
+ ntohs(key->tp_range.tp_min.dst)) {
794
+ NL_SET_ERR_MSG_ATTR(extack,
795
+ tb[TCA_FLOWER_KEY_PORT_DST_MIN],
796
+ "Invalid destination port range (min must be strictly smaller than max)");
797
+ return -EINVAL;
798
+ }
799
+ if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
800
+ ntohs(key->tp_range.tp_max.src) <=
801
+ ntohs(key->tp_range.tp_min.src)) {
802
+ NL_SET_ERR_MSG_ATTR(extack,
803
+ tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
804
+ "Invalid source port range (min must be strictly smaller than max)");
805
+ return -EINVAL;
806
+ }
807
+
808
+ return 0;
809
+}
810
+
811
+static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
812
+ struct flow_dissector_key_mpls *key_val,
813
+ struct flow_dissector_key_mpls *key_mask,
814
+ struct netlink_ext_ack *extack)
815
+{
816
+ struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
817
+ struct flow_dissector_mpls_lse *lse_mask;
818
+ struct flow_dissector_mpls_lse *lse_val;
819
+ u8 lse_index;
820
+ u8 depth;
821
+ int err;
822
+
823
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
824
+ mpls_stack_entry_policy, extack);
825
+ if (err < 0)
826
+ return err;
827
+
828
+ if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
829
+ NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
830
+ return -EINVAL;
831
+ }
832
+
833
+ depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
834
+
835
+ /* LSE depth starts at 1, for consistency with terminology used by
836
+ * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
837
+ */
838
+ if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
839
+ NL_SET_ERR_MSG_ATTR(extack,
840
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
841
+ "Invalid MPLS depth");
842
+ return -EINVAL;
843
+ }
844
+ lse_index = depth - 1;
845
+
846
+ dissector_set_mpls_lse(key_val, lse_index);
847
+ dissector_set_mpls_lse(key_mask, lse_index);
848
+
849
+ lse_val = &key_val->ls[lse_index];
850
+ lse_mask = &key_mask->ls[lse_index];
851
+
852
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
853
+ lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
854
+ lse_mask->mpls_ttl = MPLS_TTL_MASK;
855
+ }
856
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
857
+ u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
858
+
859
+ if (bos & ~MPLS_BOS_MASK) {
860
+ NL_SET_ERR_MSG_ATTR(extack,
861
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
862
+ "Bottom Of Stack (BOS) must be 0 or 1");
863
+ return -EINVAL;
864
+ }
865
+ lse_val->mpls_bos = bos;
866
+ lse_mask->mpls_bos = MPLS_BOS_MASK;
867
+ }
868
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
869
+ u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
870
+
871
+ if (tc & ~MPLS_TC_MASK) {
872
+ NL_SET_ERR_MSG_ATTR(extack,
873
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
874
+ "Traffic Class (TC) must be between 0 and 7");
875
+ return -EINVAL;
876
+ }
877
+ lse_val->mpls_tc = tc;
878
+ lse_mask->mpls_tc = MPLS_TC_MASK;
879
+ }
880
+ if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
881
+ u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
882
+
883
+ if (label & ~MPLS_LABEL_MASK) {
884
+ NL_SET_ERR_MSG_ATTR(extack,
885
+ tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
886
+ "Label must be between 0 and 1048575");
887
+ return -EINVAL;
888
+ }
889
+ lse_val->mpls_label = label;
890
+ lse_mask->mpls_label = MPLS_LABEL_MASK;
891
+ }
892
+
893
+ return 0;
894
+}
895
+
896
+static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
897
+ struct flow_dissector_key_mpls *key_val,
898
+ struct flow_dissector_key_mpls *key_mask,
899
+ struct netlink_ext_ack *extack)
900
+{
901
+ struct nlattr *nla_lse;
902
+ int rem;
903
+ int err;
904
+
905
+ if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
906
+ NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
907
+ "NLA_F_NESTED is missing");
908
+ return -EINVAL;
909
+ }
910
+
911
+ nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
912
+ if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
913
+ NL_SET_ERR_MSG_ATTR(extack, nla_lse,
914
+ "Invalid MPLS option type");
915
+ return -EINVAL;
916
+ }
917
+
918
+ err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
919
+ if (err < 0)
920
+ return err;
921
+ }
922
+ if (rem) {
923
+ NL_SET_ERR_MSG(extack,
924
+ "Bytes leftover after parsing MPLS options");
925
+ return -EINVAL;
926
+ }
927
+
928
+ return 0;
517929 }
518930
519931 static int fl_set_key_mpls(struct nlattr **tb,
520932 struct flow_dissector_key_mpls *key_val,
521
- struct flow_dissector_key_mpls *key_mask)
933
+ struct flow_dissector_key_mpls *key_mask,
934
+ struct netlink_ext_ack *extack)
522935 {
936
+ struct flow_dissector_mpls_lse *lse_mask;
937
+ struct flow_dissector_mpls_lse *lse_val;
938
+
939
+ if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
940
+ if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
941
+ tb[TCA_FLOWER_KEY_MPLS_BOS] ||
942
+ tb[TCA_FLOWER_KEY_MPLS_TC] ||
943
+ tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
944
+ NL_SET_ERR_MSG_ATTR(extack,
945
+ tb[TCA_FLOWER_KEY_MPLS_OPTS],
946
+ "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
947
+ return -EBADMSG;
948
+ }
949
+
950
+ return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
951
+ key_val, key_mask, extack);
952
+ }
953
+
954
+ lse_val = &key_val->ls[0];
955
+ lse_mask = &key_mask->ls[0];
956
+
523957 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
524
- key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
525
- key_mask->mpls_ttl = MPLS_TTL_MASK;
958
+ lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
959
+ lse_mask->mpls_ttl = MPLS_TTL_MASK;
960
+ dissector_set_mpls_lse(key_val, 0);
961
+ dissector_set_mpls_lse(key_mask, 0);
526962 }
527963 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
528964 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
529965
530
- if (bos & ~MPLS_BOS_MASK)
966
+ if (bos & ~MPLS_BOS_MASK) {
967
+ NL_SET_ERR_MSG_ATTR(extack,
968
+ tb[TCA_FLOWER_KEY_MPLS_BOS],
969
+ "Bottom Of Stack (BOS) must be 0 or 1");
531970 return -EINVAL;
532
- key_val->mpls_bos = bos;
533
- key_mask->mpls_bos = MPLS_BOS_MASK;
971
+ }
972
+ lse_val->mpls_bos = bos;
973
+ lse_mask->mpls_bos = MPLS_BOS_MASK;
974
+ dissector_set_mpls_lse(key_val, 0);
975
+ dissector_set_mpls_lse(key_mask, 0);
534976 }
535977 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
536978 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
537979
538
- if (tc & ~MPLS_TC_MASK)
980
+ if (tc & ~MPLS_TC_MASK) {
981
+ NL_SET_ERR_MSG_ATTR(extack,
982
+ tb[TCA_FLOWER_KEY_MPLS_TC],
983
+ "Traffic Class (TC) must be between 0 and 7");
539984 return -EINVAL;
540
- key_val->mpls_tc = tc;
541
- key_mask->mpls_tc = MPLS_TC_MASK;
985
+ }
986
+ lse_val->mpls_tc = tc;
987
+ lse_mask->mpls_tc = MPLS_TC_MASK;
988
+ dissector_set_mpls_lse(key_val, 0);
989
+ dissector_set_mpls_lse(key_mask, 0);
542990 }
543991 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
544992 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
545993
546
- if (label & ~MPLS_LABEL_MASK)
994
+ if (label & ~MPLS_LABEL_MASK) {
995
+ NL_SET_ERR_MSG_ATTR(extack,
996
+ tb[TCA_FLOWER_KEY_MPLS_LABEL],
997
+ "Label must be between 0 and 1048575");
547998 return -EINVAL;
548
- key_val->mpls_label = label;
549
- key_mask->mpls_label = MPLS_LABEL_MASK;
999
+ }
1000
+ lse_val->mpls_label = label;
1001
+ lse_mask->mpls_label = MPLS_LABEL_MASK;
1002
+ dissector_set_mpls_lse(key_val, 0);
1003
+ dissector_set_mpls_lse(key_mask, 0);
5501004 }
5511005 return 0;
5521006 }
....@@ -554,6 +1008,7 @@
5541008 static void fl_set_key_vlan(struct nlattr **tb,
5551009 __be16 ethertype,
5561010 int vlan_id_key, int vlan_prio_key,
1011
+ int vlan_next_eth_type_key,
5571012 struct flow_dissector_key_vlan *key_val,
5581013 struct flow_dissector_key_vlan *key_mask)
5591014 {
....@@ -572,6 +1027,11 @@
5721027 }
5731028 key_val->vlan_tpid = ethertype;
5741029 key_mask->vlan_tpid = cpu_to_be16(~0);
1030
+ if (tb[vlan_next_eth_type_key]) {
1031
+ key_val->vlan_eth_type =
1032
+ nla_get_be16(tb[vlan_next_eth_type_key]);
1033
+ key_mask->vlan_eth_type = cpu_to_be16(~0);
1034
+ }
5751035 }
5761036
5771037 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
....@@ -585,14 +1045,16 @@
5851045 }
5861046 }
5871047
588
-static int fl_set_key_flags(struct nlattr **tb,
589
- u32 *flags_key, u32 *flags_mask)
1048
+static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1049
+ u32 *flags_mask, struct netlink_ext_ack *extack)
5901050 {
5911051 u32 key, mask;
5921052
5931053 /* mask is mandatory for flags */
594
- if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
1054
+ if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1055
+ NL_SET_ERR_MSG(extack, "Missing flags mask");
5951056 return -EINVAL;
1057
+ }
5961058
5971059 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
5981060 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
....@@ -634,6 +1096,9 @@
6341096 if (option_len > sizeof(struct geneve_opt))
6351097 data_len = option_len - sizeof(struct geneve_opt);
6361098
1099
+ if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1100
+ return -ERANGE;
1101
+
6371102 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
6381103 memset(opt, 0xff, option_len);
6391104 opt->length = data_len / 4;
....@@ -650,8 +1115,9 @@
6501115 return -EINVAL;
6511116 }
6521117
653
- err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
654
- nla, geneve_opt_policy, extack);
1118
+ err = nla_parse_nested_deprecated(tb,
1119
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1120
+ nla, geneve_opt_policy, extack);
6551121 if (err < 0)
6561122 return err;
6571123
....@@ -706,6 +1172,108 @@
7061172 return sizeof(struct geneve_opt) + data_len;
7071173 }
7081174
1175
+static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1176
+ int depth, int option_len,
1177
+ struct netlink_ext_ack *extack)
1178
+{
1179
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1180
+ struct vxlan_metadata *md;
1181
+ int err;
1182
+
1183
+ md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1184
+ memset(md, 0xff, sizeof(*md));
1185
+
1186
+ if (!depth)
1187
+ return sizeof(*md);
1188
+
1189
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1190
+ NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1191
+ return -EINVAL;
1192
+ }
1193
+
1194
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1195
+ vxlan_opt_policy, extack);
1196
+ if (err < 0)
1197
+ return err;
1198
+
1199
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1200
+ NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1201
+ return -EINVAL;
1202
+ }
1203
+
1204
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1205
+ md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1206
+ md->gbp &= VXLAN_GBP_MASK;
1207
+ }
1208
+
1209
+ return sizeof(*md);
1210
+}
1211
+
1212
+static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1213
+ int depth, int option_len,
1214
+ struct netlink_ext_ack *extack)
1215
+{
1216
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1217
+ struct erspan_metadata *md;
1218
+ int err;
1219
+
1220
+ md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1221
+ memset(md, 0xff, sizeof(*md));
1222
+ md->version = 1;
1223
+
1224
+ if (!depth)
1225
+ return sizeof(*md);
1226
+
1227
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1228
+ NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1229
+ return -EINVAL;
1230
+ }
1231
+
1232
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1233
+ erspan_opt_policy, extack);
1234
+ if (err < 0)
1235
+ return err;
1236
+
1237
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1238
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1239
+ return -EINVAL;
1240
+ }
1241
+
1242
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1243
+ md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1244
+
1245
+ if (md->version == 1) {
1246
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1247
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1248
+ return -EINVAL;
1249
+ }
1250
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1251
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1252
+ memset(&md->u, 0x00, sizeof(md->u));
1253
+ md->u.index = nla_get_be32(nla);
1254
+ }
1255
+ } else if (md->version == 2) {
1256
+ if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1257
+ !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1258
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1259
+ return -EINVAL;
1260
+ }
1261
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1262
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1263
+ md->u.md2.dir = nla_get_u8(nla);
1264
+ }
1265
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1266
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1267
+ set_hwid(&md->u.md2, nla_get_u8(nla));
1268
+ }
1269
+ } else {
1270
+ NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1271
+ return -EINVAL;
1272
+ }
1273
+
1274
+ return sizeof(*md);
1275
+}
1276
+
7091277 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
7101278 struct fl_flow_key *mask,
7111279 struct netlink_ext_ack *extack)
....@@ -713,29 +1281,38 @@
7131281 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
7141282 int err, option_len, key_depth, msk_depth = 0;
7151283
716
- err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
717
- TCA_FLOWER_KEY_ENC_OPTS_MAX,
718
- enc_opts_policy, extack);
1284
+ err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1285
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
1286
+ enc_opts_policy, extack);
7191287 if (err)
7201288 return err;
7211289
7221290 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
7231291
7241292 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
725
- err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
726
- TCA_FLOWER_KEY_ENC_OPTS_MAX,
727
- enc_opts_policy, extack);
1293
+ err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1294
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
1295
+ enc_opts_policy, extack);
7281296 if (err)
7291297 return err;
7301298
7311299 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
7321300 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1301
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
1302
+ NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1303
+ return -EINVAL;
1304
+ }
7331305 }
7341306
7351307 nla_for_each_attr(nla_opt_key, nla_enc_key,
7361308 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
7371309 switch (nla_type(nla_opt_key)) {
7381310 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1311
+ if (key->enc_opts.dst_opt_type &&
1312
+ key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1313
+ NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1314
+ return -EINVAL;
1315
+ }
7391316 option_len = 0;
7401317 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
7411318 option_len = fl_set_geneve_opt(nla_opt_key, key,
....@@ -760,14 +1337,153 @@
7601337 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
7611338 return -EINVAL;
7621339 }
1340
+ break;
1341
+ case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1342
+ if (key->enc_opts.dst_opt_type) {
1343
+ NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1344
+ return -EINVAL;
1345
+ }
1346
+ option_len = 0;
1347
+ key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1348
+ option_len = fl_set_vxlan_opt(nla_opt_key, key,
1349
+ key_depth, option_len,
1350
+ extack);
1351
+ if (option_len < 0)
1352
+ return option_len;
7631353
764
- if (msk_depth)
765
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1354
+ key->enc_opts.len += option_len;
1355
+ /* At the same time we need to parse through the mask
1356
+ * in order to verify exact and mask attribute lengths.
1357
+ */
1358
+ mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1359
+ option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1360
+ msk_depth, option_len,
1361
+ extack);
1362
+ if (option_len < 0)
1363
+ return option_len;
1364
+
1365
+ mask->enc_opts.len += option_len;
1366
+ if (key->enc_opts.len != mask->enc_opts.len) {
1367
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1368
+ return -EINVAL;
1369
+ }
1370
+ break;
1371
+ case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1372
+ if (key->enc_opts.dst_opt_type) {
1373
+ NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1374
+ return -EINVAL;
1375
+ }
1376
+ option_len = 0;
1377
+ key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1378
+ option_len = fl_set_erspan_opt(nla_opt_key, key,
1379
+ key_depth, option_len,
1380
+ extack);
1381
+ if (option_len < 0)
1382
+ return option_len;
1383
+
1384
+ key->enc_opts.len += option_len;
1385
+ /* At the same time we need to parse through the mask
1386
+ * in order to verify exact and mask attribute lengths.
1387
+ */
1388
+ mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1389
+ option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1390
+ msk_depth, option_len,
1391
+ extack);
1392
+ if (option_len < 0)
1393
+ return option_len;
1394
+
1395
+ mask->enc_opts.len += option_len;
1396
+ if (key->enc_opts.len != mask->enc_opts.len) {
1397
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1398
+ return -EINVAL;
1399
+ }
7661400 break;
7671401 default:
7681402 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
7691403 return -EINVAL;
7701404 }
1405
+
1406
+ if (!msk_depth)
1407
+ continue;
1408
+
1409
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
1410
+ NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1411
+ return -EINVAL;
1412
+ }
1413
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1414
+ }
1415
+
1416
+ return 0;
1417
+}
1418
+
1419
+static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1420
+ struct netlink_ext_ack *extack)
1421
+{
1422
+ if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1423
+ NL_SET_ERR_MSG_ATTR(extack, tb,
1424
+ "no trk, so no other flag can be set");
1425
+ return -EINVAL;
1426
+ }
1427
+
1428
+ if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1429
+ state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1430
+ NL_SET_ERR_MSG_ATTR(extack, tb,
1431
+ "new and est are mutually exclusive");
1432
+ return -EINVAL;
1433
+ }
1434
+
1435
+ return 0;
1436
+}
1437
+
1438
+static int fl_set_key_ct(struct nlattr **tb,
1439
+ struct flow_dissector_key_ct *key,
1440
+ struct flow_dissector_key_ct *mask,
1441
+ struct netlink_ext_ack *extack)
1442
+{
1443
+ if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1444
+ int err;
1445
+
1446
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1447
+ NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1448
+ return -EOPNOTSUPP;
1449
+ }
1450
+ fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1451
+ &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1452
+ sizeof(key->ct_state));
1453
+
1454
+ err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1455
+ tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1456
+ extack);
1457
+ if (err)
1458
+ return err;
1459
+
1460
+ }
1461
+ if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1462
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1463
+ NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1464
+ return -EOPNOTSUPP;
1465
+ }
1466
+ fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1467
+ &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1468
+ sizeof(key->ct_zone));
1469
+ }
1470
+ if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1471
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1472
+ NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1473
+ return -EOPNOTSUPP;
1474
+ }
1475
+ fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1476
+ &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1477
+ sizeof(key->ct_mark));
1478
+ }
1479
+ if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1480
+ if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1481
+ NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1482
+ return -EOPNOTSUPP;
1483
+ }
1484
+ fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1485
+ mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1486
+ sizeof(key->ct_labels));
7711487 }
7721488
7731489 return 0;
....@@ -779,15 +1495,14 @@
7791495 {
7801496 __be16 ethertype;
7811497 int ret = 0;
782
-#ifdef CONFIG_NET_CLS_IND
1498
+
7831499 if (tb[TCA_FLOWER_INDEV]) {
7841500 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
7851501 if (err < 0)
7861502 return err;
787
- key->indev_ifindex = err;
788
- mask->indev_ifindex = 0xffffffff;
1503
+ key->meta.ingress_ifindex = err;
1504
+ mask->meta.ingress_ifindex = 0xffffffff;
7891505 }
790
-#endif
7911506
7921507 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
7931508 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
....@@ -801,8 +1516,9 @@
8011516
8021517 if (eth_type_vlan(ethertype)) {
8031518 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
804
- TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
805
- &mask->vlan);
1519
+ TCA_FLOWER_KEY_VLAN_PRIO,
1520
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1521
+ &key->vlan, &mask->vlan);
8061522
8071523 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
8081524 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
....@@ -810,6 +1526,7 @@
8101526 fl_set_key_vlan(tb, ethertype,
8111527 TCA_FLOWER_KEY_CVLAN_ID,
8121528 TCA_FLOWER_KEY_CVLAN_PRIO,
1529
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
8131530 &key->cvlan, &mask->cvlan);
8141531 fl_set_key_val(tb, &key->basic.n_proto,
8151532 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
....@@ -901,7 +1618,7 @@
9011618 sizeof(key->icmp.code));
9021619 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
9031620 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
904
- ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1621
+ ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
9051622 if (ret)
9061623 return ret;
9071624 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
....@@ -921,6 +1638,14 @@
9211638 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
9221639 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
9231640 sizeof(key->arp.tha));
1641
+ }
1642
+
1643
+ if (key->basic.ip_proto == IPPROTO_TCP ||
1644
+ key->basic.ip_proto == IPPROTO_UDP ||
1645
+ key->basic.ip_proto == IPPROTO_SCTP) {
1646
+ ret = fl_set_key_port_range(tb, key, mask, extack);
1647
+ if (ret)
1648
+ return ret;
9241649 }
9251650
9261651 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
....@@ -969,14 +1694,23 @@
9691694
9701695 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
9711696
1697
+ fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1698
+ &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1699
+ sizeof(key->hash.hash));
1700
+
9721701 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
9731702 ret = fl_set_enc_opt(tb, key, mask, extack);
9741703 if (ret)
9751704 return ret;
9761705 }
9771706
1707
+ ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1708
+ if (ret)
1709
+ return ret;
1710
+
9781711 if (tb[TCA_FLOWER_KEY_FLAGS])
979
- ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1712
+ ret = fl_set_key_flags(tb, &key->control.flags,
1713
+ &mask->control.flags, extack);
9801714
9811715 return ret;
9821716 }
....@@ -1007,7 +1741,7 @@
10071741 }
10081742
10091743 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1010
-#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
1744
+#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
10111745
10121746 #define FL_KEY_IS_MASKED(mask, member) \
10131747 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
....@@ -1032,6 +1766,8 @@
10321766 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
10331767 size_t cnt = 0;
10341768
1769
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1770
+ FLOW_DISSECTOR_KEY_META, meta);
10351771 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
10361772 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
10371773 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
....@@ -1042,6 +1778,8 @@
10421778 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
10431779 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
10441780 FLOW_DISSECTOR_KEY_PORTS, tp);
1781
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1782
+ FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
10451783 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
10461784 FLOW_DISSECTOR_KEY_IP, ip);
10471785 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
....@@ -1072,6 +1810,10 @@
10721810 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
10731811 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
10741812 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1813
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1814
+ FLOW_DISSECTOR_KEY_CT, ct);
1815
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1816
+ FLOW_DISSECTOR_KEY_HASH, hash);
10751817
10761818 skb_flow_dissector_init(dissector, keys, cnt);
10771819 }
....@@ -1088,6 +1830,12 @@
10881830
10891831 fl_mask_copy(newmask, mask);
10901832
1833
+ if ((newmask->key.tp_range.tp_min.dst &&
1834
+ newmask->key.tp_range.tp_max.dst) ||
1835
+ (newmask->key.tp_range.tp_min.src &&
1836
+ newmask->key.tp_range.tp_max.src))
1837
+ newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1838
+
10911839 err = fl_init_mask_hashtable(newmask);
10921840 if (err)
10931841 goto errout_free;
....@@ -1096,12 +1844,15 @@
10961844
10971845 INIT_LIST_HEAD_RCU(&newmask->filters);
10981846
1099
- err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
1100
- mask_ht_params);
1847
+ refcount_set(&newmask->refcnt, 1);
1848
+ err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1849
+ &newmask->ht_node, mask_ht_params);
11011850 if (err)
11021851 goto errout_destroy;
11031852
1853
+ spin_lock(&head->masks_lock);
11041854 list_add_tail_rcu(&newmask->list, &head->masks);
1855
+ spin_unlock(&head->masks_lock);
11051856
11061857 return newmask;
11071858
....@@ -1119,40 +1870,71 @@
11191870 struct fl_flow_mask *mask)
11201871 {
11211872 struct fl_flow_mask *newmask;
1873
+ int ret = 0;
11221874
1123
- fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
1875
+ rcu_read_lock();
1876
+
1877
+ /* Insert mask as temporary node to prevent concurrent creation of mask
1878
+ * with same key. Any concurrent lookups with same key will return
1879
+ * -EAGAIN because mask's refcnt is zero.
1880
+ */
1881
+ fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1882
+ &mask->ht_node,
1883
+ mask_ht_params);
11241884 if (!fnew->mask) {
1125
- if (fold)
1126
- return -EINVAL;
1885
+ rcu_read_unlock();
1886
+
1887
+ if (fold) {
1888
+ ret = -EINVAL;
1889
+ goto errout_cleanup;
1890
+ }
11271891
11281892 newmask = fl_create_new_mask(head, mask);
1129
- if (IS_ERR(newmask))
1130
- return PTR_ERR(newmask);
1893
+ if (IS_ERR(newmask)) {
1894
+ ret = PTR_ERR(newmask);
1895
+ goto errout_cleanup;
1896
+ }
11311897
11321898 fnew->mask = newmask;
1899
+ return 0;
1900
+ } else if (IS_ERR(fnew->mask)) {
1901
+ ret = PTR_ERR(fnew->mask);
11331902 } else if (fold && fold->mask != fnew->mask) {
1134
- return -EINVAL;
1903
+ ret = -EINVAL;
1904
+ } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1905
+ /* Mask was deleted concurrently, try again */
1906
+ ret = -EAGAIN;
11351907 }
1908
+ rcu_read_unlock();
1909
+ return ret;
11361910
1137
- return 0;
1911
+errout_cleanup:
1912
+ rhashtable_remove_fast(&head->ht, &mask->ht_node,
1913
+ mask_ht_params);
1914
+ return ret;
11381915 }
11391916
11401917 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
11411918 struct cls_fl_filter *f, struct fl_flow_mask *mask,
11421919 unsigned long base, struct nlattr **tb,
11431920 struct nlattr *est, bool ovr,
1144
- struct fl_flow_tmplt *tmplt,
1921
+ struct fl_flow_tmplt *tmplt, bool rtnl_held,
11451922 struct netlink_ext_ack *extack)
11461923 {
11471924 int err;
11481925
1149
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
1926
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1927
+ extack);
11501928 if (err < 0)
11511929 return err;
11521930
11531931 if (tb[TCA_FLOWER_CLASSID]) {
11541932 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1933
+ if (!rtnl_held)
1934
+ rtnl_lock();
11551935 tcf_bind_filter(tp, &f->res, base);
1936
+ if (!rtnl_held)
1937
+ rtnl_unlock();
11561938 }
11571939
11581940 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
....@@ -1170,24 +1952,52 @@
11701952 return 0;
11711953 }
11721954
1955
+static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1956
+ struct cls_fl_filter *fold,
1957
+ bool *in_ht)
1958
+{
1959
+ struct fl_flow_mask *mask = fnew->mask;
1960
+ int err;
1961
+
1962
+ err = rhashtable_lookup_insert_fast(&mask->ht,
1963
+ &fnew->ht_node,
1964
+ mask->filter_ht_params);
1965
+ if (err) {
1966
+ *in_ht = false;
1967
+ /* It is okay if filter with same key exists when
1968
+ * overwriting.
1969
+ */
1970
+ return fold && err == -EEXIST ? 0 : err;
1971
+ }
1972
+
1973
+ *in_ht = true;
1974
+ return 0;
1975
+}
1976
+
11731977 static int fl_change(struct net *net, struct sk_buff *in_skb,
11741978 struct tcf_proto *tp, unsigned long base,
11751979 u32 handle, struct nlattr **tca,
1176
- void **arg, bool ovr, struct netlink_ext_ack *extack)
1980
+ void **arg, bool ovr, bool rtnl_held,
1981
+ struct netlink_ext_ack *extack)
11771982 {
1178
- struct cls_fl_head *head = rtnl_dereference(tp->root);
1983
+ struct cls_fl_head *head = fl_head_dereference(tp);
11791984 struct cls_fl_filter *fold = *arg;
11801985 struct cls_fl_filter *fnew;
11811986 struct fl_flow_mask *mask;
11821987 struct nlattr **tb;
1988
+ bool in_ht;
11831989 int err;
11841990
1185
- if (!tca[TCA_OPTIONS])
1186
- return -EINVAL;
1991
+ if (!tca[TCA_OPTIONS]) {
1992
+ err = -EINVAL;
1993
+ goto errout_fold;
1994
+ }
11871995
11881996 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1189
- if (!mask)
1190
- return -ENOBUFS;
1997
+ if (!mask) {
1998
+ err = -ENOBUFS;
1999
+ goto errout_fold;
2000
+ }
11912001
11922002 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
11932003 if (!tb) {
....@@ -1195,8 +2005,8 @@
11952005 goto errout_mask_alloc;
11962006 }
11972007
1198
- err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1199
- fl_policy, NULL);
2008
+ err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2009
+ tca[TCA_OPTIONS], fl_policy, NULL);
12002010 if (err < 0)
12012011 goto errout_tb;
12022012
....@@ -1210,8 +2020,10 @@
12102020 err = -ENOBUFS;
12112021 goto errout_tb;
12122022 }
2023
+ INIT_LIST_HEAD(&fnew->hw_list);
2024
+ refcount_set(&fnew->refcnt, 1);
12132025
1214
- err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
2026
+ err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
12152027 if (err < 0)
12162028 goto errout;
12172029
....@@ -1225,7 +2037,7 @@
12252037 }
12262038
12272039 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1228
- tp->chain->tmplt_priv, extack);
2040
+ tp->chain->tmplt_priv, rtnl_held, extack);
12292041 if (err)
12302042 goto errout;
12312043
....@@ -1233,189 +2045,320 @@
12332045 if (err)
12342046 goto errout;
12352047
1236
- if (!handle) {
1237
- handle = 1;
1238
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1239
- INT_MAX, GFP_KERNEL);
1240
- } else if (!fold) {
1241
- /* user specifies a handle and it doesn't exist */
1242
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1243
- handle, GFP_KERNEL);
1244
- }
2048
+ err = fl_ht_insert_unique(fnew, fold, &in_ht);
12452049 if (err)
12462050 goto errout_mask;
1247
- fnew->handle = handle;
1248
-
1249
- if (!tc_skip_sw(fnew->flags)) {
1250
- if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
1251
- err = -EEXIST;
1252
- goto errout_idr;
1253
- }
1254
-
1255
- err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1256
- fnew->mask->filter_ht_params);
1257
- if (err)
1258
- goto errout_idr;
1259
- }
12602051
12612052 if (!tc_skip_hw(fnew->flags)) {
1262
- err = fl_hw_replace_filter(tp, fnew, extack);
2053
+ err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
12632054 if (err)
1264
- goto errout_mask;
2055
+ goto errout_ht;
12652056 }
12662057
12672058 if (!tc_in_hw(fnew->flags))
12682059 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
12692060
2061
+ spin_lock(&tp->lock);
2062
+
2063
+ /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2064
+ * proto again or create new one, if necessary.
2065
+ */
2066
+ if (tp->deleting) {
2067
+ err = -EAGAIN;
2068
+ goto errout_hw;
2069
+ }
2070
+
12702071 if (fold) {
1271
- if (!tc_skip_sw(fold->flags))
1272
- rhashtable_remove_fast(&fold->mask->ht,
1273
- &fold->ht_node,
1274
- fold->mask->filter_ht_params);
2072
+ /* Fold filter was deleted concurrently. Retry lookup. */
2073
+ if (fold->deleted) {
2074
+ err = -EAGAIN;
2075
+ goto errout_hw;
2076
+ }
2077
+
2078
+ fnew->handle = handle;
2079
+
2080
+ if (!in_ht) {
2081
+ struct rhashtable_params params =
2082
+ fnew->mask->filter_ht_params;
2083
+
2084
+ err = rhashtable_insert_fast(&fnew->mask->ht,
2085
+ &fnew->ht_node,
2086
+ params);
2087
+ if (err)
2088
+ goto errout_hw;
2089
+ in_ht = true;
2090
+ }
2091
+
2092
+ refcount_inc(&fnew->refcnt);
2093
+ rhashtable_remove_fast(&fold->mask->ht,
2094
+ &fold->ht_node,
2095
+ fold->mask->filter_ht_params);
2096
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
2097
+ list_replace_rcu(&fold->list, &fnew->list);
2098
+ fold->deleted = true;
2099
+
2100
+ spin_unlock(&tp->lock);
2101
+
2102
+ fl_mask_put(head, fold->mask);
12752103 if (!tc_skip_hw(fold->flags))
1276
- fl_hw_destroy_filter(tp, fold, NULL);
2104
+ fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2105
+ tcf_unbind_filter(tp, &fold->res);
2106
+ /* Caller holds reference to fold, so refcnt is always > 0
2107
+ * after this.
2108
+ */
2109
+ refcount_dec(&fold->refcnt);
2110
+ __fl_put(fold);
2111
+ } else {
2112
+ if (handle) {
2113
+ /* user specifies a handle and it doesn't exist */
2114
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2115
+ handle, GFP_ATOMIC);
2116
+
2117
+ /* Filter with specified handle was concurrently
2118
+ * inserted after initial check in cls_api. This is not
2119
+ * necessarily an error if NLM_F_EXCL is not set in
2120
+ * message flags. Returning EAGAIN will cause cls_api to
2121
+ * try to update concurrently inserted rule.
2122
+ */
2123
+ if (err == -ENOSPC)
2124
+ err = -EAGAIN;
2125
+ } else {
2126
+ handle = 1;
2127
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2128
+ INT_MAX, GFP_ATOMIC);
2129
+ }
2130
+ if (err)
2131
+ goto errout_hw;
2132
+
2133
+ refcount_inc(&fnew->refcnt);
2134
+ fnew->handle = handle;
2135
+ list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2136
+ spin_unlock(&tp->lock);
12772137 }
12782138
12792139 *arg = fnew;
12802140
1281
- if (fold) {
1282
- idr_replace(&head->handle_idr, fnew, fnew->handle);
1283
- list_replace_rcu(&fold->list, &fnew->list);
1284
- tcf_unbind_filter(tp, &fold->res);
1285
- tcf_exts_get_net(&fold->exts);
1286
- tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
1287
- } else {
1288
- list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1289
- }
1290
-
12912141 kfree(tb);
1292
- kfree(mask);
2142
+ tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
12932143 return 0;
12942144
1295
-errout_idr:
1296
- if (!fold)
1297
- idr_remove(&head->handle_idr, fnew->handle);
1298
-
2145
+errout_ht:
2146
+ spin_lock(&tp->lock);
2147
+errout_hw:
2148
+ fnew->deleted = true;
2149
+ spin_unlock(&tp->lock);
2150
+ if (!tc_skip_hw(fnew->flags))
2151
+ fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2152
+ if (in_ht)
2153
+ rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2154
+ fnew->mask->filter_ht_params);
12992155 errout_mask:
1300
- fl_mask_put(head, fnew->mask, false);
1301
-
2156
+ fl_mask_put(head, fnew->mask);
13022157 errout:
1303
- tcf_exts_destroy(&fnew->exts);
1304
- kfree(fnew);
2158
+ __fl_put(fnew);
13052159 errout_tb:
13062160 kfree(tb);
13072161 errout_mask_alloc:
1308
- kfree(mask);
2162
+ tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2163
+errout_fold:
2164
+ if (fold)
2165
+ __fl_put(fold);
13092166 return err;
13102167 }
13112168
13122169 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1313
- struct netlink_ext_ack *extack)
2170
+ bool rtnl_held, struct netlink_ext_ack *extack)
13142171 {
1315
- struct cls_fl_head *head = rtnl_dereference(tp->root);
2172
+ struct cls_fl_head *head = fl_head_dereference(tp);
13162173 struct cls_fl_filter *f = arg;
2174
+ bool last_on_mask;
2175
+ int err = 0;
13172176
1318
- if (!tc_skip_sw(f->flags))
1319
- rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
1320
- f->mask->filter_ht_params);
1321
- __fl_delete(tp, f, extack);
2177
+ err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
13222178 *last = list_empty(&head->masks);
1323
- return 0;
2179
+ __fl_put(f);
2180
+
2181
+ return err;
13242182 }
13252183
1326
-static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
2184
+static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2185
+ bool rtnl_held)
13272186 {
1328
- struct cls_fl_head *head = rtnl_dereference(tp->root);
2187
+ struct cls_fl_head *head = fl_head_dereference(tp);
2188
+ unsigned long id = arg->cookie, tmp;
13292189 struct cls_fl_filter *f;
13302190
13312191 arg->count = arg->skip;
13322192
1333
- while ((f = idr_get_next_ul(&head->handle_idr,
1334
- &arg->cookie)) != NULL) {
2193
+ rcu_read_lock();
2194
+ idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2195
+ /* don't return filters that are being deleted */
2196
+ if (!refcount_inc_not_zero(&f->refcnt))
2197
+ continue;
2198
+ rcu_read_unlock();
2199
+
13352200 if (arg->fn(tp, f, arg) < 0) {
2201
+ __fl_put(f);
13362202 arg->stop = 1;
2203
+ rcu_read_lock();
13372204 break;
13382205 }
1339
- arg->cookie = f->handle + 1;
2206
+ __fl_put(f);
13402207 arg->count++;
2208
+ rcu_read_lock();
13412209 }
2210
+ rcu_read_unlock();
2211
+ arg->cookie = id;
13422212 }
13432213
1344
-static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
2214
+static struct cls_fl_filter *
2215
+fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2216
+{
2217
+ struct cls_fl_head *head = fl_head_dereference(tp);
2218
+
2219
+ spin_lock(&tp->lock);
2220
+ if (list_empty(&head->hw_filters)) {
2221
+ spin_unlock(&tp->lock);
2222
+ return NULL;
2223
+ }
2224
+
2225
+ if (!f)
2226
+ f = list_entry(&head->hw_filters, struct cls_fl_filter,
2227
+ hw_list);
2228
+ list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2229
+ if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2230
+ spin_unlock(&tp->lock);
2231
+ return f;
2232
+ }
2233
+ }
2234
+
2235
+ spin_unlock(&tp->lock);
2236
+ return NULL;
2237
+}
2238
+
2239
+static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
13452240 void *cb_priv, struct netlink_ext_ack *extack)
13462241 {
1347
- struct cls_fl_head *head = rtnl_dereference(tp->root);
1348
- struct tc_cls_flower_offload cls_flower = {};
13492242 struct tcf_block *block = tp->chain->block;
1350
- struct fl_flow_mask *mask;
1351
- struct cls_fl_filter *f;
2243
+ struct flow_cls_offload cls_flower = {};
2244
+ struct cls_fl_filter *f = NULL;
13522245 int err;
13532246
1354
- list_for_each_entry(mask, &head->masks, list) {
1355
- list_for_each_entry(f, &mask->filters, list) {
1356
- if (tc_skip_hw(f->flags))
1357
- continue;
2247
+ /* hw_filters list can only be changed by hw offload functions after
2248
+ * obtaining rtnl lock. Make sure it is not changed while reoffload is
2249
+ * iterating it.
2250
+ */
2251
+ ASSERT_RTNL();
13582252
1359
- tc_cls_common_offload_init(&cls_flower.common, tp,
1360
- f->flags, extack);
1361
- cls_flower.command = add ?
1362
- TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1363
- cls_flower.cookie = (unsigned long)f;
1364
- cls_flower.dissector = &mask->dissector;
1365
- cls_flower.mask = &mask->key;
1366
- cls_flower.key = &f->mkey;
1367
- cls_flower.exts = &f->exts;
1368
- cls_flower.classid = f->res.classid;
1369
-
1370
- err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1371
- if (err) {
1372
- if (add && tc_skip_sw(f->flags))
1373
- return err;
1374
- continue;
1375
- }
1376
-
1377
- tc_cls_offload_cnt_update(block, &f->in_hw_count,
1378
- &f->flags, add);
2253
+ while ((f = fl_get_next_hw_filter(tp, f, add))) {
2254
+ cls_flower.rule =
2255
+ flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2256
+ if (!cls_flower.rule) {
2257
+ __fl_put(f);
2258
+ return -ENOMEM;
13792259 }
2260
+
2261
+ tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2262
+ extack);
2263
+ cls_flower.command = add ?
2264
+ FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2265
+ cls_flower.cookie = (unsigned long)f;
2266
+ cls_flower.rule->match.dissector = &f->mask->dissector;
2267
+ cls_flower.rule->match.mask = &f->mask->key;
2268
+ cls_flower.rule->match.key = &f->mkey;
2269
+
2270
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2271
+ if (err) {
2272
+ kfree(cls_flower.rule);
2273
+ if (tc_skip_sw(f->flags)) {
2274
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2275
+ __fl_put(f);
2276
+ return err;
2277
+ }
2278
+ goto next_flow;
2279
+ }
2280
+
2281
+ cls_flower.classid = f->res.classid;
2282
+
2283
+ err = tc_setup_cb_reoffload(block, tp, add, cb,
2284
+ TC_SETUP_CLSFLOWER, &cls_flower,
2285
+ cb_priv, &f->flags,
2286
+ &f->in_hw_count);
2287
+ tc_cleanup_flow_action(&cls_flower.rule->action);
2288
+ kfree(cls_flower.rule);
2289
+
2290
+ if (err) {
2291
+ __fl_put(f);
2292
+ return err;
2293
+ }
2294
+next_flow:
2295
+ __fl_put(f);
13802296 }
13812297
13822298 return 0;
13832299 }
13842300
1385
-static void fl_hw_create_tmplt(struct tcf_chain *chain,
1386
- struct fl_flow_tmplt *tmplt)
2301
+static void fl_hw_add(struct tcf_proto *tp, void *type_data)
13872302 {
1388
- struct tc_cls_flower_offload cls_flower = {};
2303
+ struct flow_cls_offload *cls_flower = type_data;
2304
+ struct cls_fl_filter *f =
2305
+ (struct cls_fl_filter *) cls_flower->cookie;
2306
+ struct cls_fl_head *head = fl_head_dereference(tp);
2307
+
2308
+ spin_lock(&tp->lock);
2309
+ list_add(&f->hw_list, &head->hw_filters);
2310
+ spin_unlock(&tp->lock);
2311
+}
2312
+
2313
+static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2314
+{
2315
+ struct flow_cls_offload *cls_flower = type_data;
2316
+ struct cls_fl_filter *f =
2317
+ (struct cls_fl_filter *) cls_flower->cookie;
2318
+
2319
+ spin_lock(&tp->lock);
2320
+ if (!list_empty(&f->hw_list))
2321
+ list_del_init(&f->hw_list);
2322
+ spin_unlock(&tp->lock);
2323
+}
2324
+
2325
+static int fl_hw_create_tmplt(struct tcf_chain *chain,
2326
+ struct fl_flow_tmplt *tmplt)
2327
+{
2328
+ struct flow_cls_offload cls_flower = {};
13892329 struct tcf_block *block = chain->block;
1390
- struct tcf_exts dummy_exts = { 0, };
2330
+
2331
+ cls_flower.rule = flow_rule_alloc(0);
2332
+ if (!cls_flower.rule)
2333
+ return -ENOMEM;
13912334
13922335 cls_flower.common.chain_index = chain->index;
1393
- cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
2336
+ cls_flower.command = FLOW_CLS_TMPLT_CREATE;
13942337 cls_flower.cookie = (unsigned long) tmplt;
1395
- cls_flower.dissector = &tmplt->dissector;
1396
- cls_flower.mask = &tmplt->mask;
1397
- cls_flower.key = &tmplt->dummy_key;
1398
- cls_flower.exts = &dummy_exts;
2338
+ cls_flower.rule->match.dissector = &tmplt->dissector;
2339
+ cls_flower.rule->match.mask = &tmplt->mask;
2340
+ cls_flower.rule->match.key = &tmplt->dummy_key;
13992341
14002342 /* We don't care if driver (any of them) fails to handle this
14012343 * call. It serves just as a hint for it.
14022344 */
1403
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
1404
- &cls_flower, false);
2345
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2346
+ kfree(cls_flower.rule);
2347
+
2348
+ return 0;
14052349 }
14062350
14072351 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
14082352 struct fl_flow_tmplt *tmplt)
14092353 {
1410
- struct tc_cls_flower_offload cls_flower = {};
2354
+ struct flow_cls_offload cls_flower = {};
14112355 struct tcf_block *block = chain->block;
14122356
14132357 cls_flower.common.chain_index = chain->index;
1414
- cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
2358
+ cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
14152359 cls_flower.cookie = (unsigned long) tmplt;
14162360
1417
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
1418
- &cls_flower, false);
2361
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
14192362 }
14202363
14212364 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
....@@ -1432,8 +2375,8 @@
14322375 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
14332376 if (!tb)
14342377 return ERR_PTR(-ENOBUFS);
1435
- err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1436
- fl_policy, NULL);
2378
+ err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2379
+ tca[TCA_OPTIONS], fl_policy, NULL);
14372380 if (err)
14382381 goto errout_tb;
14392382
....@@ -1446,12 +2389,14 @@
14462389 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
14472390 if (err)
14482391 goto errout_tmplt;
1449
- kfree(tb);
14502392
14512393 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
14522394
1453
- fl_hw_create_tmplt(chain, tmplt);
2395
+ err = fl_hw_create_tmplt(chain, tmplt);
2396
+ if (err)
2397
+ goto errout_tmplt;
14542398
2399
+ kfree(tb);
14552400 return tmplt;
14562401
14572402 errout_tmplt:
....@@ -1488,35 +2433,156 @@
14882433 return 0;
14892434 }
14902435
2436
+static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2437
+ struct fl_flow_key *mask)
2438
+{
2439
+ if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2440
+ TCA_FLOWER_KEY_PORT_DST_MIN,
2441
+ &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2442
+ sizeof(key->tp_range.tp_min.dst)) ||
2443
+ fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2444
+ TCA_FLOWER_KEY_PORT_DST_MAX,
2445
+ &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2446
+ sizeof(key->tp_range.tp_max.dst)) ||
2447
+ fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2448
+ TCA_FLOWER_KEY_PORT_SRC_MIN,
2449
+ &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2450
+ sizeof(key->tp_range.tp_min.src)) ||
2451
+ fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2452
+ TCA_FLOWER_KEY_PORT_SRC_MAX,
2453
+ &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2454
+ sizeof(key->tp_range.tp_max.src)))
2455
+ return -1;
2456
+
2457
+ return 0;
2458
+}
2459
+
2460
+static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2461
+ struct flow_dissector_key_mpls *mpls_key,
2462
+ struct flow_dissector_key_mpls *mpls_mask,
2463
+ u8 lse_index)
2464
+{
2465
+ struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2466
+ struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2467
+ int err;
2468
+
2469
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2470
+ lse_index + 1);
2471
+ if (err)
2472
+ return err;
2473
+
2474
+ if (lse_mask->mpls_ttl) {
2475
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2476
+ lse_key->mpls_ttl);
2477
+ if (err)
2478
+ return err;
2479
+ }
2480
+ if (lse_mask->mpls_bos) {
2481
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2482
+ lse_key->mpls_bos);
2483
+ if (err)
2484
+ return err;
2485
+ }
2486
+ if (lse_mask->mpls_tc) {
2487
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2488
+ lse_key->mpls_tc);
2489
+ if (err)
2490
+ return err;
2491
+ }
2492
+ if (lse_mask->mpls_label) {
2493
+ err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2494
+ lse_key->mpls_label);
2495
+ if (err)
2496
+ return err;
2497
+ }
2498
+
2499
+ return 0;
2500
+}
2501
+
2502
+static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2503
+ struct flow_dissector_key_mpls *mpls_key,
2504
+ struct flow_dissector_key_mpls *mpls_mask)
2505
+{
2506
+ struct nlattr *opts;
2507
+ struct nlattr *lse;
2508
+ u8 lse_index;
2509
+ int err;
2510
+
2511
+ opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2512
+ if (!opts)
2513
+ return -EMSGSIZE;
2514
+
2515
+ for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2516
+ if (!(mpls_mask->used_lses & 1 << lse_index))
2517
+ continue;
2518
+
2519
+ lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2520
+ if (!lse) {
2521
+ err = -EMSGSIZE;
2522
+ goto err_opts;
2523
+ }
2524
+
2525
+ err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2526
+ lse_index);
2527
+ if (err)
2528
+ goto err_opts_lse;
2529
+ nla_nest_end(skb, lse);
2530
+ }
2531
+ nla_nest_end(skb, opts);
2532
+
2533
+ return 0;
2534
+
2535
+err_opts_lse:
2536
+ nla_nest_cancel(skb, lse);
2537
+err_opts:
2538
+ nla_nest_cancel(skb, opts);
2539
+
2540
+ return err;
2541
+}
2542
+
14912543 static int fl_dump_key_mpls(struct sk_buff *skb,
14922544 struct flow_dissector_key_mpls *mpls_key,
14932545 struct flow_dissector_key_mpls *mpls_mask)
14942546 {
2547
+ struct flow_dissector_mpls_lse *lse_mask;
2548
+ struct flow_dissector_mpls_lse *lse_key;
14952549 int err;
14962550
1497
- if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2551
+ if (!mpls_mask->used_lses)
14982552 return 0;
1499
- if (mpls_mask->mpls_ttl) {
2553
+
2554
+ lse_mask = &mpls_mask->ls[0];
2555
+ lse_key = &mpls_key->ls[0];
2556
+
2557
+ /* For backward compatibility, don't use the MPLS nested attributes if
2558
+ * the rule can be expressed using the old attributes.
2559
+ */
2560
+ if (mpls_mask->used_lses & ~1 ||
2561
+ (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2562
+ !lse_mask->mpls_tc && !lse_mask->mpls_label))
2563
+ return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2564
+
2565
+ if (lse_mask->mpls_ttl) {
15002566 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1501
- mpls_key->mpls_ttl);
2567
+ lse_key->mpls_ttl);
15022568 if (err)
15032569 return err;
15042570 }
1505
- if (mpls_mask->mpls_tc) {
2571
+ if (lse_mask->mpls_tc) {
15062572 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1507
- mpls_key->mpls_tc);
2573
+ lse_key->mpls_tc);
15082574 if (err)
15092575 return err;
15102576 }
1511
- if (mpls_mask->mpls_label) {
2577
+ if (lse_mask->mpls_label) {
15122578 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1513
- mpls_key->mpls_label);
2579
+ lse_key->mpls_label);
15142580 if (err)
15152581 return err;
15162582 }
1517
- if (mpls_mask->mpls_bos) {
2583
+ if (lse_mask->mpls_bos) {
15182584 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1519
- mpls_key->mpls_bos);
2585
+ lse_key->mpls_bos);
15202586 if (err)
15212587 return err;
15222588 }
....@@ -1609,7 +2675,7 @@
16092675 struct nlattr *nest;
16102676 int opt_off = 0;
16112677
1612
- nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2678
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
16132679 if (!nest)
16142680 goto nla_put_failure;
16152681
....@@ -1636,6 +2702,95 @@
16362702 return -EMSGSIZE;
16372703 }
16382704
2705
+static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2706
+ struct flow_dissector_key_enc_opts *enc_opts)
2707
+{
2708
+ struct vxlan_metadata *md;
2709
+ struct nlattr *nest;
2710
+
2711
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2712
+ if (!nest)
2713
+ goto nla_put_failure;
2714
+
2715
+ md = (struct vxlan_metadata *)&enc_opts->data[0];
2716
+ if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2717
+ goto nla_put_failure;
2718
+
2719
+ nla_nest_end(skb, nest);
2720
+ return 0;
2721
+
2722
+nla_put_failure:
2723
+ nla_nest_cancel(skb, nest);
2724
+ return -EMSGSIZE;
2725
+}
2726
+
2727
+static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2728
+ struct flow_dissector_key_enc_opts *enc_opts)
2729
+{
2730
+ struct erspan_metadata *md;
2731
+ struct nlattr *nest;
2732
+
2733
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2734
+ if (!nest)
2735
+ goto nla_put_failure;
2736
+
2737
+ md = (struct erspan_metadata *)&enc_opts->data[0];
2738
+ if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2739
+ goto nla_put_failure;
2740
+
2741
+ if (md->version == 1 &&
2742
+ nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2743
+ goto nla_put_failure;
2744
+
2745
+ if (md->version == 2 &&
2746
+ (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2747
+ md->u.md2.dir) ||
2748
+ nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2749
+ get_hwid(&md->u.md2))))
2750
+ goto nla_put_failure;
2751
+
2752
+ nla_nest_end(skb, nest);
2753
+ return 0;
2754
+
2755
+nla_put_failure:
2756
+ nla_nest_cancel(skb, nest);
2757
+ return -EMSGSIZE;
2758
+}
2759
+
2760
+static int fl_dump_key_ct(struct sk_buff *skb,
2761
+ struct flow_dissector_key_ct *key,
2762
+ struct flow_dissector_key_ct *mask)
2763
+{
2764
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2765
+ fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2766
+ &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2767
+ sizeof(key->ct_state)))
2768
+ goto nla_put_failure;
2769
+
2770
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2771
+ fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2772
+ &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2773
+ sizeof(key->ct_zone)))
2774
+ goto nla_put_failure;
2775
+
2776
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2777
+ fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2778
+ &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2779
+ sizeof(key->ct_mark)))
2780
+ goto nla_put_failure;
2781
+
2782
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2783
+ fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2784
+ &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2785
+ sizeof(key->ct_labels)))
2786
+ goto nla_put_failure;
2787
+
2788
+ return 0;
2789
+
2790
+nla_put_failure:
2791
+ return -EMSGSIZE;
2792
+}
2793
+
16392794 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
16402795 struct flow_dissector_key_enc_opts *enc_opts)
16412796 {
....@@ -1645,13 +2800,23 @@
16452800 if (!enc_opts->len)
16462801 return 0;
16472802
1648
- nest = nla_nest_start(skb, enc_opt_type);
2803
+ nest = nla_nest_start_noflag(skb, enc_opt_type);
16492804 if (!nest)
16502805 goto nla_put_failure;
16512806
16522807 switch (enc_opts->dst_opt_type) {
16532808 case TUNNEL_GENEVE_OPT:
16542809 err = fl_dump_key_geneve_opt(skb, enc_opts);
2810
+ if (err)
2811
+ goto nla_put_failure;
2812
+ break;
2813
+ case TUNNEL_VXLAN_OPT:
2814
+ err = fl_dump_key_vxlan_opt(skb, enc_opts);
2815
+ if (err)
2816
+ goto nla_put_failure;
2817
+ break;
2818
+ case TUNNEL_ERSPAN_OPT:
2819
+ err = fl_dump_key_erspan_opt(skb, enc_opts);
16552820 if (err)
16562821 goto nla_put_failure;
16572822 break;
....@@ -1682,10 +2847,10 @@
16822847 static int fl_dump_key(struct sk_buff *skb, struct net *net,
16832848 struct fl_flow_key *key, struct fl_flow_key *mask)
16842849 {
1685
- if (mask->indev_ifindex) {
2850
+ if (mask->meta.ingress_ifindex) {
16862851 struct net_device *dev;
16872852
1688
- dev = __dev_get_by_index(net, key->indev_ifindex);
2853
+ dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
16892854 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
16902855 goto nla_put_failure;
16912856 }
....@@ -1717,13 +2882,13 @@
17172882 goto nla_put_failure;
17182883
17192884 if (mask->basic.n_proto) {
1720
- if (mask->cvlan.vlan_tpid) {
2885
+ if (mask->cvlan.vlan_eth_type) {
17212886 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
17222887 key->basic.n_proto))
17232888 goto nla_put_failure;
1724
- } else if (mask->vlan.vlan_tpid) {
2889
+ } else if (mask->vlan.vlan_eth_type) {
17252890 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1726
- key->basic.n_proto))
2891
+ key->vlan.vlan_eth_type))
17272892 goto nla_put_failure;
17282893 }
17292894 }
....@@ -1824,6 +2989,12 @@
18242989 sizeof(key->arp.tha))))
18252990 goto nla_put_failure;
18262991
2992
+ if ((key->basic.ip_proto == IPPROTO_TCP ||
2993
+ key->basic.ip_proto == IPPROTO_UDP ||
2994
+ key->basic.ip_proto == IPPROTO_SCTP) &&
2995
+ fl_dump_key_port_range(skb, key, mask))
2996
+ goto nla_put_failure;
2997
+
18272998 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
18282999 (fl_dump_key_val(skb, &key->enc_ipv4.src,
18293000 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
....@@ -1863,7 +3034,15 @@
18633034 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
18643035 goto nla_put_failure;
18653036
3037
+ if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3038
+ goto nla_put_failure;
3039
+
18663040 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3041
+ goto nla_put_failure;
3042
+
3043
+ if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3044
+ &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3045
+ sizeof(key->hash.hash)))
18673046 goto nla_put_failure;
18683047
18693048 return 0;
....@@ -1873,35 +3052,44 @@
18733052 }
18743053
18753054 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
1876
- struct sk_buff *skb, struct tcmsg *t)
3055
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
18773056 {
18783057 struct cls_fl_filter *f = fh;
18793058 struct nlattr *nest;
18803059 struct fl_flow_key *key, *mask;
3060
+ bool skip_hw;
18813061
18823062 if (!f)
18833063 return skb->len;
18843064
18853065 t->tcm_handle = f->handle;
18863066
1887
- nest = nla_nest_start(skb, TCA_OPTIONS);
3067
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
18883068 if (!nest)
18893069 goto nla_put_failure;
18903070
3071
+ spin_lock(&tp->lock);
3072
+
18913073 if (f->res.classid &&
18923074 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1893
- goto nla_put_failure;
3075
+ goto nla_put_failure_locked;
18943076
18953077 key = &f->key;
18963078 mask = &f->mask->key;
3079
+ skip_hw = tc_skip_hw(f->flags);
18973080
18983081 if (fl_dump_key(skb, net, key, mask))
1899
- goto nla_put_failure;
1900
-
1901
- if (!tc_skip_hw(f->flags))
1902
- fl_hw_update_stats(tp, f);
3082
+ goto nla_put_failure_locked;
19033083
19043084 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3085
+ goto nla_put_failure_locked;
3086
+
3087
+ spin_unlock(&tp->lock);
3088
+
3089
+ if (!skip_hw)
3090
+ fl_hw_update_stats(tp, f, rtnl_held);
3091
+
3092
+ if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
19053093 goto nla_put_failure;
19063094
19073095 if (tcf_exts_dump(skb, &f->exts))
....@@ -1914,6 +3102,50 @@
19143102
19153103 return skb->len;
19163104
3105
+nla_put_failure_locked:
3106
+ spin_unlock(&tp->lock);
3107
+nla_put_failure:
3108
+ nla_nest_cancel(skb, nest);
3109
+ return -1;
3110
+}
3111
+
3112
+static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3113
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3114
+{
3115
+ struct cls_fl_filter *f = fh;
3116
+ struct nlattr *nest;
3117
+ bool skip_hw;
3118
+
3119
+ if (!f)
3120
+ return skb->len;
3121
+
3122
+ t->tcm_handle = f->handle;
3123
+
3124
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3125
+ if (!nest)
3126
+ goto nla_put_failure;
3127
+
3128
+ spin_lock(&tp->lock);
3129
+
3130
+ skip_hw = tc_skip_hw(f->flags);
3131
+
3132
+ if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3133
+ goto nla_put_failure_locked;
3134
+
3135
+ spin_unlock(&tp->lock);
3136
+
3137
+ if (!skip_hw)
3138
+ fl_hw_update_stats(tp, f, rtnl_held);
3139
+
3140
+ if (tcf_exts_terse_dump(skb, &f->exts))
3141
+ goto nla_put_failure;
3142
+
3143
+ nla_nest_end(skb, nest);
3144
+
3145
+ return skb->len;
3146
+
3147
+nla_put_failure_locked:
3148
+ spin_unlock(&tp->lock);
19173149 nla_put_failure:
19183150 nla_nest_cancel(skb, nest);
19193151 return -1;
....@@ -1925,7 +3157,7 @@
19253157 struct fl_flow_key *key, *mask;
19263158 struct nlattr *nest;
19273159
1928
- nest = nla_nest_start(skb, TCA_OPTIONS);
3160
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
19293161 if (!nest)
19303162 goto nla_put_failure;
19313163
....@@ -1957,22 +3189,39 @@
19573189 }
19583190 }
19593191
3192
+static bool fl_delete_empty(struct tcf_proto *tp)
3193
+{
3194
+ struct cls_fl_head *head = fl_head_dereference(tp);
3195
+
3196
+ spin_lock(&tp->lock);
3197
+ tp->deleting = idr_is_empty(&head->handle_idr);
3198
+ spin_unlock(&tp->lock);
3199
+
3200
+ return tp->deleting;
3201
+}
3202
+
19603203 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
19613204 .kind = "flower",
19623205 .classify = fl_classify,
19633206 .init = fl_init,
19643207 .destroy = fl_destroy,
19653208 .get = fl_get,
3209
+ .put = fl_put,
19663210 .change = fl_change,
19673211 .delete = fl_delete,
3212
+ .delete_empty = fl_delete_empty,
19683213 .walk = fl_walk,
19693214 .reoffload = fl_reoffload,
3215
+ .hw_add = fl_hw_add,
3216
+ .hw_del = fl_hw_del,
19703217 .dump = fl_dump,
3218
+ .terse_dump = fl_terse_dump,
19713219 .bind_class = fl_bind_class,
19723220 .tmplt_create = fl_tmplt_create,
19733221 .tmplt_destroy = fl_tmplt_destroy,
19743222 .tmplt_dump = fl_tmplt_dump,
19753223 .owner = THIS_MODULE,
3224
+ .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
19763225 };
19773226
19783227 static int __init cls_fl_init(void)