hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/net/bridge/br_vlan.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/kernel.h>
23 #include <linux/netdevice.h>
34 #include <linux/rtnetlink.h>
....@@ -6,6 +7,8 @@
67
78 #include "br_private.h"
89 #include "br_private_tunnel.h"
10
+
11
+static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
912
1013 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
1114 const void *ptr)
....@@ -21,7 +24,6 @@
2124 .key_offset = offsetof(struct net_bridge_vlan, vid),
2225 .key_len = sizeof(u16),
2326 .nelem_hint = 3,
24
- .locks_mul = 1,
2527 .max_size = VLAN_N_VID,
2628 .obj_cmpfn = br_vlan_cmp,
2729 .automatic_shrinking = true,
....@@ -32,13 +34,15 @@
3234 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
3335 }
3436
35
-static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
37
+static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
38
+ const struct net_bridge_vlan *v)
3639 {
37
- if (vg->pvid == vid)
40
+ if (vg->pvid == v->vid)
3841 return false;
3942
4043 smp_wmb();
41
- vg->pvid = vid;
44
+ br_vlan_set_pvid_state(vg, v->state);
45
+ vg->pvid = v->vid;
4246
4347 return true;
4448 }
....@@ -67,7 +71,7 @@
6771 vg = nbp_vlan_group(v->port);
6872
6973 if (flags & BRIDGE_VLAN_INFO_PVID)
70
- ret = __vlan_add_pvid(vg, v->vid);
74
+ ret = __vlan_add_pvid(vg, v);
7175 else
7276 ret = __vlan_delete_pvid(vg, v->vid);
7377
....@@ -80,16 +84,18 @@
8084 }
8185
8286 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
83
- u16 vid, u16 flags)
87
+ struct net_bridge_vlan *v, u16 flags,
88
+ struct netlink_ext_ack *extack)
8489 {
8590 int err;
8691
8792 /* Try switchdev op first. In case it is not supported, fallback to
8893 * 8021q add.
8994 */
90
- err = br_switchdev_port_vlan_add(dev, vid, flags);
95
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
9196 if (err == -EOPNOTSUPP)
92
- return vlan_vid_add(dev, br->vlan_proto, vid);
97
+ return vlan_vid_add(dev, br->vlan_proto, v->vid);
98
+ v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
9399 return err;
94100 }
95101
....@@ -121,25 +127,25 @@
121127 }
122128
123129 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
124
- u16 vid)
130
+ const struct net_bridge_vlan *v)
125131 {
126132 int err;
127133
128134 /* Try switchdev op first. In case it is not supported, fallback to
129135 * 8021q del.
130136 */
131
- err = br_switchdev_port_vlan_del(dev, vid);
132
- if (err == -EOPNOTSUPP) {
133
- vlan_vid_del(dev, br->vlan_proto, vid);
134
- return 0;
135
- }
136
- return err;
137
+ err = br_switchdev_port_vlan_del(dev, v->vid);
138
+ if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
139
+ vlan_vid_del(dev, br->vlan_proto, v->vid);
140
+ return err == -EOPNOTSUPP ? 0 : err;
137141 }
138142
139
-/* Returns a master vlan, if it didn't exist it gets created. In all cases a
143
+/* Returns a master vlan, if it didn't exist it gets created. In all cases
140144 * a reference is taken to the master vlan before returning.
141145 */
142
-static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
146
+static struct net_bridge_vlan *
147
+br_vlan_get_master(struct net_bridge *br, u16 vid,
148
+ struct netlink_ext_ack *extack)
143149 {
144150 struct net_bridge_vlan_group *vg;
145151 struct net_bridge_vlan *masterv;
....@@ -150,7 +156,7 @@
150156 bool changed;
151157
152158 /* missing global ctx, create it now */
153
- if (br_vlan_add(br, vid, 0, &changed))
159
+ if (br_vlan_add(br, vid, 0, &changed, extack))
154160 return NULL;
155161 masterv = br_vlan_find(vg, vid);
156162 if (WARN_ON(!masterv))
....@@ -190,6 +196,19 @@
190196 }
191197 }
192198
199
+static void nbp_vlan_rcu_free(struct rcu_head *rcu)
200
+{
201
+ struct net_bridge_vlan *v;
202
+
203
+ v = container_of(rcu, struct net_bridge_vlan, rcu);
204
+ WARN_ON(br_vlan_is_master(v));
205
+ /* if we had per-port stats configured then free them here */
206
+ if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
207
+ free_percpu(v->stats);
208
+ v->stats = NULL;
209
+ kfree(v);
210
+}
211
+
193212 /* This is the shared VLAN add function which works for both ports and bridge
194213 * devices. There are four possible calls to this function in terms of the
195214 * vlan entry type:
....@@ -201,7 +220,8 @@
201220 * 4. same as 3 but with both master and brentry flags set so the entry
202221 * will be used for filtering in both the port and the bridge
203222 */
204
-static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
223
+static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
224
+ struct netlink_ext_ack *extack)
205225 {
206226 struct net_bridge_vlan *masterv = NULL;
207227 struct net_bridge_port *p = NULL;
....@@ -226,7 +246,7 @@
226246 * This ensures tagged traffic enters the bridge when
227247 * promiscuous mode is disabled by br_manage_promisc().
228248 */
229
- err = __vlan_vid_add(dev, br, v->vid, flags);
249
+ err = __vlan_vid_add(dev, br, v, flags, extack);
230250 if (err)
231251 goto out;
232252
....@@ -236,20 +256,33 @@
236256
237257 err = br_vlan_add(br, v->vid,
238258 flags | BRIDGE_VLAN_INFO_BRENTRY,
239
- &changed);
259
+ &changed, extack);
240260 if (err)
241261 goto out_filt;
262
+
263
+ if (changed)
264
+ br_vlan_notify(br, NULL, v->vid, 0,
265
+ RTM_NEWVLAN);
242266 }
243267
244
- masterv = br_vlan_get_master(br, v->vid);
268
+ masterv = br_vlan_get_master(br, v->vid, extack);
245269 if (!masterv) {
246270 err = -ENOMEM;
247271 goto out_filt;
248272 }
249273 v->brvlan = masterv;
250
- v->stats = masterv->stats;
274
+ if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
275
+ v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
276
+ if (!v->stats) {
277
+ err = -ENOMEM;
278
+ goto out_filt;
279
+ }
280
+ v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
281
+ } else {
282
+ v->stats = masterv->stats;
283
+ }
251284 } else {
252
- err = br_switchdev_port_vlan_add(dev, v->vid, flags);
285
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
253286 if (err && err != -EOPNOTSUPP)
254287 goto out;
255288 }
....@@ -264,6 +297,9 @@
264297 vg->num_vlans++;
265298 }
266299
300
+ /* set the state before publishing */
301
+ v->state = BR_STATE_FORWARDING;
302
+
267303 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
268304 br_vlan_rht_params);
269305 if (err)
....@@ -271,6 +307,9 @@
271307
272308 __vlan_add_list(v);
273309 __vlan_add_flags(v, flags);
310
+
311
+ if (p)
312
+ nbp_vlan_set_vlan_dev_state(p, v->vid);
274313 out:
275314 return err;
276315
....@@ -282,8 +321,12 @@
282321
283322 out_filt:
284323 if (p) {
285
- __vlan_vid_del(dev, br, v->vid);
324
+ __vlan_vid_del(dev, br, v);
286325 if (masterv) {
326
+ if (v->stats && masterv->stats != v->stats)
327
+ free_percpu(v->stats);
328
+ v->stats = NULL;
329
+
287330 br_vlan_put_master(masterv);
288331 v->brvlan = NULL;
289332 }
....@@ -311,7 +354,7 @@
311354
312355 __vlan_delete_pvid(vg, v->vid);
313356 if (p) {
314
- err = __vlan_vid_del(p->dev, p->br, v->vid);
357
+ err = __vlan_vid_del(p->dev, p->br, v);
315358 if (err)
316359 goto out;
317360 } else {
....@@ -331,7 +374,8 @@
331374 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
332375 br_vlan_rht_params);
333376 __vlan_del_list(v);
334
- kfree_rcu(v, rcu);
377
+ nbp_vlan_set_vlan_dev_state(p, v->vid);
378
+ call_rcu(&v->rcu, nbp_vlan_rcu_free);
335379 }
336380
337381 br_vlan_put_master(masterv);
....@@ -347,13 +391,31 @@
347391 kfree(vg);
348392 }
349393
350
-static void __vlan_flush(struct net_bridge_vlan_group *vg)
394
+static void __vlan_flush(const struct net_bridge *br,
395
+ const struct net_bridge_port *p,
396
+ struct net_bridge_vlan_group *vg)
351397 {
352398 struct net_bridge_vlan *vlan, *tmp;
399
+ u16 v_start = 0, v_end = 0;
353400
354401 __vlan_delete_pvid(vg, vg->pvid);
355
- list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
402
+ list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
403
+ /* take care of disjoint ranges */
404
+ if (!v_start) {
405
+ v_start = vlan->vid;
406
+ } else if (vlan->vid - v_end != 1) {
407
+ /* found range end, notify and start next one */
408
+ br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
409
+ v_start = vlan->vid;
410
+ }
411
+ v_end = vlan->vid;
412
+
356413 __vlan_del(vlan);
414
+ }
415
+
416
+ /* notify about the last/whole vlan range */
417
+ if (v_start)
418
+ br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
357419 }
358420
359421 struct sk_buff *br_handle_vlan(struct net_bridge *br,
....@@ -388,7 +450,7 @@
388450 return NULL;
389451 }
390452 }
391
- if (br->vlan_stats_enabled) {
453
+ if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
392454 stats = this_cpu_ptr(v->stats);
393455 u64_stats_update_begin(&stats->syncp);
394456 stats->tx_bytes += skb->len;
....@@ -397,7 +459,7 @@
397459 }
398460
399461 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
400
- skb->vlan_tci = 0;
462
+ __vlan_hwaccel_clear_tag(skb);
401463
402464 if (p && (p->flags & BR_VLAN_TUNNEL) &&
403465 br_handle_egress_vlan_tunnel(skb, v)) {
....@@ -411,7 +473,8 @@
411473 /* Called under RCU */
412474 static bool __allowed_ingress(const struct net_bridge *br,
413475 struct net_bridge_vlan_group *vg,
414
- struct sk_buff *skb, u16 *vid)
476
+ struct sk_buff *skb, u16 *vid,
477
+ u8 *state)
415478 {
416479 struct br_vlan_stats *stats;
417480 struct net_bridge_vlan *v;
....@@ -470,21 +533,33 @@
470533 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
471534 else
472535 /* Priority-tagged Frame.
473
- * At this point, We know that skb->vlan_tci had
474
- * VLAN_TAG_PRESENT bit and its VID field was 0x000.
536
+ * At this point, we know that skb->vlan_tci VID
537
+ * field was 0.
475538 * We update only VID field and preserve PCP field.
476539 */
477540 skb->vlan_tci |= pvid;
478541
479542 /* if stats are disabled we can avoid the lookup */
480
- if (!br->vlan_stats_enabled)
543
+ if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
544
+ if (*state == BR_STATE_FORWARDING) {
545
+ *state = br_vlan_get_pvid_state(vg);
546
+ if (!br_vlan_state_allowed(*state, true))
547
+ goto drop;
548
+ }
481549 return true;
550
+ }
482551 }
483552 v = br_vlan_find(vg, *vid);
484553 if (!v || !br_vlan_should_use(v))
485554 goto drop;
486555
487
- if (br->vlan_stats_enabled) {
556
+ if (*state == BR_STATE_FORWARDING) {
557
+ *state = br_vlan_get_state(v);
558
+ if (!br_vlan_state_allowed(*state, true))
559
+ goto drop;
560
+ }
561
+
562
+ if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
488563 stats = this_cpu_ptr(v->stats);
489564 u64_stats_update_begin(&stats->syncp);
490565 stats->rx_bytes += skb->len;
....@@ -501,17 +576,17 @@
501576
502577 bool br_allowed_ingress(const struct net_bridge *br,
503578 struct net_bridge_vlan_group *vg, struct sk_buff *skb,
504
- u16 *vid)
579
+ u16 *vid, u8 *state)
505580 {
506581 /* If VLAN filtering is disabled on the bridge, all packets are
507582 * permitted.
508583 */
509
- if (!br->vlan_enabled) {
584
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
510585 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
511586 return true;
512587 }
513588
514
- return __allowed_ingress(br, vg, skb, vid);
589
+ return __allowed_ingress(br, vg, skb, vid, state);
515590 }
516591
517592 /* Called under RCU. */
....@@ -527,7 +602,8 @@
527602
528603 br_vlan_get_tag(skb, &vid);
529604 v = br_vlan_find(vg, vid);
530
- if (v && br_vlan_should_use(v))
605
+ if (v && br_vlan_should_use(v) &&
606
+ br_vlan_state_allowed(br_vlan_get_state(v), false))
531607 return true;
532608
533609 return false;
....@@ -538,9 +614,10 @@
538614 {
539615 struct net_bridge_vlan_group *vg;
540616 struct net_bridge *br = p->br;
617
+ struct net_bridge_vlan *v;
541618
542619 /* If filtering was disabled at input, let it pass. */
543
- if (!br->vlan_enabled)
620
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED))
544621 return true;
545622
546623 vg = nbp_vlan_group_rcu(p);
....@@ -552,13 +629,15 @@
552629
553630 if (!*vid) {
554631 *vid = br_get_pvid(vg);
555
- if (!*vid)
632
+ if (!*vid ||
633
+ !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
556634 return false;
557635
558636 return true;
559637 }
560638
561
- if (br_vlan_find(vg, *vid))
639
+ v = br_vlan_find(vg, *vid);
640
+ if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
562641 return true;
563642
564643 return false;
....@@ -567,11 +646,12 @@
567646 static int br_vlan_add_existing(struct net_bridge *br,
568647 struct net_bridge_vlan_group *vg,
569648 struct net_bridge_vlan *vlan,
570
- u16 flags, bool *changed)
649
+ u16 flags, bool *changed,
650
+ struct netlink_ext_ack *extack)
571651 {
572652 int err;
573653
574
- err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags);
654
+ err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
575655 if (err && err != -EOPNOTSUPP)
576656 return err;
577657
....@@ -610,7 +690,8 @@
610690 * Must be called with vid in range from 1 to 4094 inclusive.
611691 * changed must be true only if the vlan was created or updated
612692 */
613
-int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
693
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
694
+ struct netlink_ext_ack *extack)
614695 {
615696 struct net_bridge_vlan_group *vg;
616697 struct net_bridge_vlan *vlan;
....@@ -622,7 +703,8 @@
622703 vg = br_vlan_group(br);
623704 vlan = br_vlan_find(vg, vid);
624705 if (vlan)
625
- return br_vlan_add_existing(br, vg, vlan, flags, changed);
706
+ return br_vlan_add_existing(br, vg, vlan, flags, changed,
707
+ extack);
626708
627709 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
628710 if (!vlan)
....@@ -639,7 +721,7 @@
639721 vlan->br = br;
640722 if (flags & BRIDGE_VLAN_INFO_BRENTRY)
641723 refcount_set(&vlan->refcnt, 1);
642
- ret = __vlan_add(vlan, flags);
724
+ ret = __vlan_add(vlan, flags, extack);
643725 if (ret) {
644726 free_percpu(vlan->stats);
645727 kfree(vlan);
....@@ -679,13 +761,8 @@
679761
680762 ASSERT_RTNL();
681763
682
- /* delete auto-added default pvid local fdb before flushing vlans
683
- * otherwise it will be leaked on bridge device init failure
684
- */
685
- br_fdb_delete_by_port(br, NULL, 0, 1);
686
-
687764 vg = br_vlan_group(br);
688
- __vlan_flush(vg);
765
+ __vlan_flush(br, NULL, vg);
689766 RCU_INIT_POINTER(br->vlgrp, NULL);
690767 synchronize_rcu();
691768 __vlan_group_free(vg);
....@@ -702,11 +779,12 @@
702779 /* Must be protected by RTNL. */
703780 static void recalculate_group_addr(struct net_bridge *br)
704781 {
705
- if (br->group_addr_set)
782
+ if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
706783 return;
707784
708785 spin_lock_bh(&br->lock);
709
- if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
786
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
787
+ br->vlan_proto == htons(ETH_P_8021Q)) {
710788 /* Bridge Group Address */
711789 br->group_addr[5] = 0x00;
712790 } else { /* vlan_enabled && ETH_P_8021AD */
....@@ -719,7 +797,8 @@
719797 /* Must be protected by RTNL. */
720798 void br_recalculate_fwd_mask(struct net_bridge *br)
721799 {
722
- if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
800
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
801
+ br->vlan_proto == htons(ETH_P_8021Q))
723802 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
724803 else /* vlan_enabled && ETH_P_8021AD */
725804 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
....@@ -736,14 +815,14 @@
736815 };
737816 int err;
738817
739
- if (br->vlan_enabled == val)
818
+ if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
740819 return 0;
741820
742821 err = switchdev_port_attr_set(br->dev, &attr);
743822 if (err && err != -EOPNOTSUPP)
744823 return err;
745824
746
- br->vlan_enabled = val;
825
+ br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
747826 br_manage_promisc(br);
748827 recalculate_group_addr(br);
749828 br_recalculate_fwd_mask(br);
....@@ -760,32 +839,53 @@
760839 {
761840 struct net_bridge *br = netdev_priv(dev);
762841
763
- return !!br->vlan_enabled;
842
+ return br_opt_get(br, BROPT_VLAN_ENABLED);
764843 }
765844 EXPORT_SYMBOL_GPL(br_vlan_enabled);
766845
846
+int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
847
+{
848
+ struct net_bridge *br = netdev_priv(dev);
849
+
850
+ *p_proto = ntohs(br->vlan_proto);
851
+
852
+ return 0;
853
+}
854
+EXPORT_SYMBOL_GPL(br_vlan_get_proto);
855
+
767856 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
768857 {
858
+ struct switchdev_attr attr = {
859
+ .orig_dev = br->dev,
860
+ .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
861
+ .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
862
+ .u.vlan_protocol = ntohs(proto),
863
+ };
769864 int err = 0;
770865 struct net_bridge_port *p;
771866 struct net_bridge_vlan *vlan;
772867 struct net_bridge_vlan_group *vg;
773
- __be16 oldproto;
868
+ __be16 oldproto = br->vlan_proto;
774869
775870 if (br->vlan_proto == proto)
776871 return 0;
872
+
873
+ err = switchdev_port_attr_set(br->dev, &attr);
874
+ if (err && err != -EOPNOTSUPP)
875
+ return err;
777876
778877 /* Add VLANs for the new proto to the device filter. */
779878 list_for_each_entry(p, &br->port_list, list) {
780879 vg = nbp_vlan_group(p);
781880 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
881
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
882
+ continue;
782883 err = vlan_vid_add(p->dev, proto, vlan->vid);
783884 if (err)
784885 goto err_filt;
785886 }
786887 }
787888
788
- oldproto = br->vlan_proto;
789889 br->vlan_proto = proto;
790890
791891 recalculate_group_addr(br);
....@@ -794,20 +894,32 @@
794894 /* Delete VLANs for the old proto from the device filter. */
795895 list_for_each_entry(p, &br->port_list, list) {
796896 vg = nbp_vlan_group(p);
797
- list_for_each_entry(vlan, &vg->vlan_list, vlist)
897
+ list_for_each_entry(vlan, &vg->vlan_list, vlist) {
898
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
899
+ continue;
798900 vlan_vid_del(p->dev, oldproto, vlan->vid);
901
+ }
799902 }
800903
801904 return 0;
802905
803906 err_filt:
804
- list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
907
+ attr.u.vlan_protocol = ntohs(oldproto);
908
+ switchdev_port_attr_set(br->dev, &attr);
909
+
910
+ list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
911
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
912
+ continue;
805913 vlan_vid_del(p->dev, proto, vlan->vid);
914
+ }
806915
807916 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
808917 vg = nbp_vlan_group(p);
809
- list_for_each_entry(vlan, &vg->vlan_list, vlist)
918
+ list_for_each_entry(vlan, &vg->vlan_list, vlist) {
919
+ if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
920
+ continue;
810921 vlan_vid_del(p->dev, proto, vlan->vid);
922
+ }
811923 }
812924
813925 return err;
....@@ -826,7 +938,31 @@
826938 switch (val) {
827939 case 0:
828940 case 1:
829
- br->vlan_stats_enabled = val;
941
+ br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
942
+ break;
943
+ default:
944
+ return -EINVAL;
945
+ }
946
+
947
+ return 0;
948
+}
949
+
950
+int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
951
+{
952
+ struct net_bridge_port *p;
953
+
954
+ /* allow to change the option if there are no port vlans configured */
955
+ list_for_each_entry(p, &br->port_list, list) {
956
+ struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
957
+
958
+ if (vg->num_vlans)
959
+ return -EBUSY;
960
+ }
961
+
962
+ switch (val) {
963
+ case 0:
964
+ case 1:
965
+ br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
830966 break;
831967 default:
832968 return -EINVAL;
....@@ -858,18 +994,22 @@
858994 /* Disable default_pvid on all ports where it is still
859995 * configured.
860996 */
861
- if (vlan_default_pvid(br_vlan_group(br), pvid))
862
- br_vlan_delete(br, pvid);
997
+ if (vlan_default_pvid(br_vlan_group(br), pvid)) {
998
+ if (!br_vlan_delete(br, pvid))
999
+ br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1000
+ }
8631001
8641002 list_for_each_entry(p, &br->port_list, list) {
865
- if (vlan_default_pvid(nbp_vlan_group(p), pvid))
866
- nbp_vlan_delete(p, pvid);
1003
+ if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1004
+ !nbp_vlan_delete(p, pvid))
1005
+ br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
8671006 }
8681007
8691008 br->default_pvid = 0;
8701009 }
8711010
872
-int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
1011
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1012
+ struct netlink_ext_ack *extack)
8731013 {
8741014 const struct net_bridge_vlan *pvent;
8751015 struct net_bridge_vlan_group *vg;
....@@ -884,8 +1024,7 @@
8841024 return 0;
8851025 }
8861026
887
- changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
888
- GFP_KERNEL);
1027
+ changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
8891028 if (!changed)
8901029 return -ENOMEM;
8911030
....@@ -902,10 +1041,13 @@
9021041 BRIDGE_VLAN_INFO_PVID |
9031042 BRIDGE_VLAN_INFO_UNTAGGED |
9041043 BRIDGE_VLAN_INFO_BRENTRY,
905
- &vlchange);
1044
+ &vlchange, extack);
9061045 if (err)
9071046 goto out;
908
- br_vlan_delete(br, old_pvid);
1047
+
1048
+ if (br_vlan_delete(br, old_pvid))
1049
+ br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1050
+ br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
9091051 set_bit(0, changed);
9101052 }
9111053
....@@ -922,17 +1064,19 @@
9221064 err = nbp_vlan_add(p, pvid,
9231065 BRIDGE_VLAN_INFO_PVID |
9241066 BRIDGE_VLAN_INFO_UNTAGGED,
925
- &vlchange);
1067
+ &vlchange, extack);
9261068 if (err)
9271069 goto err_port;
928
- nbp_vlan_delete(p, old_pvid);
1070
+ if (nbp_vlan_delete(p, old_pvid))
1071
+ br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1072
+ br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
9291073 set_bit(p->port_no, changed);
9301074 }
9311075
9321076 br->default_pvid = pvid;
9331077
9341078 out:
935
- kfree(changed);
1079
+ bitmap_free(changed);
9361080 return err;
9371081
9381082 err_port:
....@@ -940,22 +1084,28 @@
9401084 if (!test_bit(p->port_no, changed))
9411085 continue;
9421086
943
- if (old_pvid)
1087
+ if (old_pvid) {
9441088 nbp_vlan_add(p, old_pvid,
9451089 BRIDGE_VLAN_INFO_PVID |
9461090 BRIDGE_VLAN_INFO_UNTAGGED,
947
- &vlchange);
1091
+ &vlchange, NULL);
1092
+ br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1093
+ }
9481094 nbp_vlan_delete(p, pvid);
1095
+ br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
9491096 }
9501097
9511098 if (test_bit(0, changed)) {
952
- if (old_pvid)
1099
+ if (old_pvid) {
9531100 br_vlan_add(br, old_pvid,
9541101 BRIDGE_VLAN_INFO_PVID |
9551102 BRIDGE_VLAN_INFO_UNTAGGED |
9561103 BRIDGE_VLAN_INFO_BRENTRY,
957
- &vlchange);
1104
+ &vlchange, NULL);
1105
+ br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1106
+ }
9581107 br_vlan_delete(br, pvid);
1108
+ br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
9591109 }
9601110 goto out;
9611111 }
....@@ -972,12 +1122,12 @@
9721122 goto out;
9731123
9741124 /* Only allow default pvid change when filtering is disabled */
975
- if (br->vlan_enabled) {
1125
+ if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
9761126 pr_info_once("Please disable vlan filtering to change default_pvid\n");
9771127 err = -EPERM;
9781128 goto out;
9791129 }
980
- err = __br_vlan_set_default_pvid(br, pvid);
1130
+ err = __br_vlan_set_default_pvid(br, pvid, NULL);
9811131 out:
9821132 return err;
9831133 }
....@@ -986,7 +1136,6 @@
9861136 {
9871137 struct net_bridge_vlan_group *vg;
9881138 int ret = -ENOMEM;
989
- bool changed;
9901139
9911140 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
9921141 if (!vg)
....@@ -1001,17 +1150,10 @@
10011150 br->vlan_proto = htons(ETH_P_8021Q);
10021151 br->default_pvid = 1;
10031152 rcu_assign_pointer(br->vlgrp, vg);
1004
- ret = br_vlan_add(br, 1,
1005
- BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1006
- BRIDGE_VLAN_INFO_BRENTRY, &changed);
1007
- if (ret)
1008
- goto err_vlan_add;
10091153
10101154 out:
10111155 return ret;
10121156
1013
-err_vlan_add:
1014
- vlan_tunnel_deinit(vg);
10151157 err_tunnel_init:
10161158 rhashtable_destroy(&vg->vlan_hash);
10171159 err_rhtbl:
....@@ -1020,13 +1162,13 @@
10201162 goto out;
10211163 }
10221164
1023
-int nbp_vlan_init(struct net_bridge_port *p)
1165
+int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
10241166 {
10251167 struct switchdev_attr attr = {
10261168 .orig_dev = p->br->dev,
10271169 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
10281170 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1029
- .u.vlan_filtering = p->br->vlan_enabled,
1171
+ .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
10301172 };
10311173 struct net_bridge_vlan_group *vg;
10321174 int ret = -ENOMEM;
....@@ -1053,9 +1195,10 @@
10531195 ret = nbp_vlan_add(p, p->br->default_pvid,
10541196 BRIDGE_VLAN_INFO_PVID |
10551197 BRIDGE_VLAN_INFO_UNTAGGED,
1056
- &changed);
1198
+ &changed, extack);
10571199 if (ret)
10581200 goto err_vlan_add;
1201
+ br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
10591202 }
10601203 out:
10611204 return ret;
....@@ -1078,7 +1221,7 @@
10781221 * changed must be true only if the vlan was created or updated
10791222 */
10801223 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1081
- bool *changed)
1224
+ bool *changed, struct netlink_ext_ack *extack)
10821225 {
10831226 struct net_bridge_vlan *vlan;
10841227 int ret;
....@@ -1089,7 +1232,7 @@
10891232 vlan = br_vlan_find(nbp_vlan_group(port), vid);
10901233 if (vlan) {
10911234 /* Pass the flags to the hardware bridge */
1092
- ret = br_switchdev_port_vlan_add(port->dev, vid, flags);
1235
+ ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
10931236 if (ret && ret != -EOPNOTSUPP)
10941237 return ret;
10951238 *changed = __vlan_add_flags(vlan, flags);
....@@ -1103,7 +1246,7 @@
11031246
11041247 vlan->vid = vid;
11051248 vlan->port = port;
1106
- ret = __vlan_add(vlan, flags);
1249
+ ret = __vlan_add(vlan, flags, extack);
11071250 if (ret)
11081251 kfree(vlan);
11091252 else
....@@ -1137,7 +1280,7 @@
11371280 ASSERT_RTNL();
11381281
11391282 vg = nbp_vlan_group(port);
1140
- __vlan_flush(vg);
1283
+ __vlan_flush(port->br, port, vg);
11411284 RCU_INIT_POINTER(port->vlgrp, NULL);
11421285 synchronize_rcu();
11431286 __vlan_group_free(vg);
....@@ -1173,9 +1316,13 @@
11731316 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
11741317 {
11751318 struct net_bridge_vlan_group *vg;
1319
+ struct net_bridge_port *p;
11761320
11771321 ASSERT_RTNL();
1178
- if (netif_is_bridge_master(dev))
1322
+ p = br_port_get_check_rtnl(dev);
1323
+ if (p)
1324
+ vg = nbp_vlan_group(p);
1325
+ else if (netif_is_bridge_master(dev))
11791326 vg = br_vlan_group(netdev_priv(dev));
11801327 else
11811328 return -EINVAL;
....@@ -1184,6 +1331,24 @@
11841331 return 0;
11851332 }
11861333 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1334
+
1335
+int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1336
+{
1337
+ struct net_bridge_vlan_group *vg;
1338
+ struct net_bridge_port *p;
1339
+
1340
+ p = br_port_get_check_rcu(dev);
1341
+ if (p)
1342
+ vg = nbp_vlan_group_rcu(p);
1343
+ else if (netif_is_bridge_master(dev))
1344
+ vg = br_vlan_group_rcu(netdev_priv(dev));
1345
+ else
1346
+ return -EINVAL;
1347
+
1348
+ *p_pvid = br_get_pvid(vg);
1349
+ return 0;
1350
+}
1351
+EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
11871352
11881353 int br_vlan_get_info(const struct net_device *dev, u16 vid,
11891354 struct bridge_vlan_info *p_vinfo)
....@@ -1207,6 +1372,726 @@
12071372
12081373 p_vinfo->vid = vid;
12091374 p_vinfo->flags = v->flags;
1375
+ if (vid == br_get_pvid(vg))
1376
+ p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
12101377 return 0;
12111378 }
12121379 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1380
+
1381
+static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1382
+{
1383
+ return is_vlan_dev(dev) &&
1384
+ !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1385
+}
1386
+
1387
+static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1388
+ __always_unused struct netdev_nested_priv *priv)
1389
+{
1390
+ return br_vlan_is_bind_vlan_dev(dev);
1391
+}
1392
+
1393
+static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1394
+{
1395
+ int found;
1396
+
1397
+ rcu_read_lock();
1398
+ found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1399
+ NULL);
1400
+ rcu_read_unlock();
1401
+
1402
+ return !!found;
1403
+}
1404
+
1405
+struct br_vlan_bind_walk_data {
1406
+ u16 vid;
1407
+ struct net_device *result;
1408
+};
1409
+
1410
+static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1411
+ struct netdev_nested_priv *priv)
1412
+{
1413
+ struct br_vlan_bind_walk_data *data = priv->data;
1414
+ int found = 0;
1415
+
1416
+ if (br_vlan_is_bind_vlan_dev(dev) &&
1417
+ vlan_dev_priv(dev)->vlan_id == data->vid) {
1418
+ data->result = dev;
1419
+ found = 1;
1420
+ }
1421
+
1422
+ return found;
1423
+}
1424
+
1425
+static struct net_device *
1426
+br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1427
+{
1428
+ struct br_vlan_bind_walk_data data = {
1429
+ .vid = vid,
1430
+ };
1431
+ struct netdev_nested_priv priv = {
1432
+ .data = (void *)&data,
1433
+ };
1434
+
1435
+ rcu_read_lock();
1436
+ netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1437
+ &priv);
1438
+ rcu_read_unlock();
1439
+
1440
+ return data.result;
1441
+}
1442
+
1443
+static bool br_vlan_is_dev_up(const struct net_device *dev)
1444
+{
1445
+ return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1446
+}
1447
+
1448
+static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1449
+ struct net_device *vlan_dev)
1450
+{
1451
+ u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1452
+ struct net_bridge_vlan_group *vg;
1453
+ struct net_bridge_port *p;
1454
+ bool has_carrier = false;
1455
+
1456
+ if (!netif_carrier_ok(br->dev)) {
1457
+ netif_carrier_off(vlan_dev);
1458
+ return;
1459
+ }
1460
+
1461
+ list_for_each_entry(p, &br->port_list, list) {
1462
+ vg = nbp_vlan_group(p);
1463
+ if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1464
+ has_carrier = true;
1465
+ break;
1466
+ }
1467
+ }
1468
+
1469
+ if (has_carrier)
1470
+ netif_carrier_on(vlan_dev);
1471
+ else
1472
+ netif_carrier_off(vlan_dev);
1473
+}
1474
+
1475
+static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1476
+{
1477
+ struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1478
+ struct net_bridge_vlan *vlan;
1479
+ struct net_device *vlan_dev;
1480
+
1481
+ list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1482
+ vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1483
+ vlan->vid);
1484
+ if (vlan_dev) {
1485
+ if (br_vlan_is_dev_up(p->dev)) {
1486
+ if (netif_carrier_ok(p->br->dev))
1487
+ netif_carrier_on(vlan_dev);
1488
+ } else {
1489
+ br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1490
+ }
1491
+ }
1492
+ }
1493
+}
1494
+
1495
+static void br_vlan_upper_change(struct net_device *dev,
1496
+ struct net_device *upper_dev,
1497
+ bool linking)
1498
+{
1499
+ struct net_bridge *br = netdev_priv(dev);
1500
+
1501
+ if (!br_vlan_is_bind_vlan_dev(upper_dev))
1502
+ return;
1503
+
1504
+ if (linking) {
1505
+ br_vlan_set_vlan_dev_state(br, upper_dev);
1506
+ br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1507
+ } else {
1508
+ br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1509
+ br_vlan_has_upper_bind_vlan_dev(dev));
1510
+ }
1511
+}
1512
+
1513
+struct br_vlan_link_state_walk_data {
1514
+ struct net_bridge *br;
1515
+};
1516
+
1517
+static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1518
+ struct netdev_nested_priv *priv)
1519
+{
1520
+ struct br_vlan_link_state_walk_data *data = priv->data;
1521
+
1522
+ if (br_vlan_is_bind_vlan_dev(vlan_dev))
1523
+ br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1524
+
1525
+ return 0;
1526
+}
1527
+
1528
+static void br_vlan_link_state_change(struct net_device *dev,
1529
+ struct net_bridge *br)
1530
+{
1531
+ struct br_vlan_link_state_walk_data data = {
1532
+ .br = br
1533
+ };
1534
+ struct netdev_nested_priv priv = {
1535
+ .data = (void *)&data,
1536
+ };
1537
+
1538
+ rcu_read_lock();
1539
+ netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1540
+ &priv);
1541
+ rcu_read_unlock();
1542
+}
1543
+
1544
+/* Must be protected by RTNL. */
1545
+static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1546
+{
1547
+ struct net_device *vlan_dev;
1548
+
1549
+ if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1550
+ return;
1551
+
1552
+ vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1553
+ if (vlan_dev)
1554
+ br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1555
+}
1556
+
1557
+/* Must be protected by RTNL. */
1558
+int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1559
+{
1560
+ struct netdev_notifier_changeupper_info *info;
1561
+ struct net_bridge *br = netdev_priv(dev);
1562
+ int vlcmd = 0, ret = 0;
1563
+ bool changed = false;
1564
+
1565
+ switch (event) {
1566
+ case NETDEV_REGISTER:
1567
+ ret = br_vlan_add(br, br->default_pvid,
1568
+ BRIDGE_VLAN_INFO_PVID |
1569
+ BRIDGE_VLAN_INFO_UNTAGGED |
1570
+ BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1571
+ vlcmd = RTM_NEWVLAN;
1572
+ break;
1573
+ case NETDEV_UNREGISTER:
1574
+ changed = !br_vlan_delete(br, br->default_pvid);
1575
+ vlcmd = RTM_DELVLAN;
1576
+ break;
1577
+ case NETDEV_CHANGEUPPER:
1578
+ info = ptr;
1579
+ br_vlan_upper_change(dev, info->upper_dev, info->linking);
1580
+ break;
1581
+
1582
+ case NETDEV_CHANGE:
1583
+ case NETDEV_UP:
1584
+ if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1585
+ break;
1586
+ br_vlan_link_state_change(dev, br);
1587
+ break;
1588
+ }
1589
+ if (changed)
1590
+ br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1591
+
1592
+ return ret;
1593
+}
1594
+
1595
+/* Must be protected by RTNL. */
1596
+void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1597
+{
1598
+ if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1599
+ return;
1600
+
1601
+ switch (event) {
1602
+ case NETDEV_CHANGE:
1603
+ case NETDEV_DOWN:
1604
+ case NETDEV_UP:
1605
+ br_vlan_set_all_vlan_dev_state(p);
1606
+ break;
1607
+ }
1608
+}
1609
+
1610
+static bool br_vlan_stats_fill(struct sk_buff *skb,
1611
+ const struct net_bridge_vlan *v)
1612
+{
1613
+ struct br_vlan_stats stats;
1614
+ struct nlattr *nest;
1615
+
1616
+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1617
+ if (!nest)
1618
+ return false;
1619
+
1620
+ br_vlan_get_stats(v, &stats);
1621
+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
1622
+ BRIDGE_VLANDB_STATS_PAD) ||
1623
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1624
+ stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
1625
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
1626
+ BRIDGE_VLANDB_STATS_PAD) ||
1627
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1628
+ stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
1629
+ goto out_err;
1630
+
1631
+ nla_nest_end(skb, nest);
1632
+
1633
+ return true;
1634
+
1635
+out_err:
1636
+ nla_nest_cancel(skb, nest);
1637
+ return false;
1638
+}
1639
+
1640
+/* v_opts is used to dump the options which must be equal in the whole range */
1641
+static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1642
+ const struct net_bridge_vlan *v_opts,
1643
+ u16 flags,
1644
+ bool dump_stats)
1645
+{
1646
+ struct bridge_vlan_info info;
1647
+ struct nlattr *nest;
1648
+
1649
+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1650
+ if (!nest)
1651
+ return false;
1652
+
1653
+ memset(&info, 0, sizeof(info));
1654
+ info.vid = vid;
1655
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1656
+ info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1657
+ if (flags & BRIDGE_VLAN_INFO_PVID)
1658
+ info.flags |= BRIDGE_VLAN_INFO_PVID;
1659
+
1660
+ if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1661
+ goto out_err;
1662
+
1663
+ if (vid_range && vid < vid_range &&
1664
+ !(flags & BRIDGE_VLAN_INFO_PVID) &&
1665
+ nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1666
+ goto out_err;
1667
+
1668
+ if (v_opts) {
1669
+ if (!br_vlan_opts_fill(skb, v_opts))
1670
+ goto out_err;
1671
+
1672
+ if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1673
+ goto out_err;
1674
+ }
1675
+
1676
+ nla_nest_end(skb, nest);
1677
+
1678
+ return true;
1679
+
1680
+out_err:
1681
+ nla_nest_cancel(skb, nest);
1682
+ return false;
1683
+}
1684
+
1685
+static size_t rtnl_vlan_nlmsg_size(void)
1686
+{
1687
+ return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1688
+ + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1689
+ + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1690
+ + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1691
+ + br_vlan_opts_nl_size(); /* bridge vlan options */
1692
+}
1693
+
1694
+void br_vlan_notify(const struct net_bridge *br,
1695
+ const struct net_bridge_port *p,
1696
+ u16 vid, u16 vid_range,
1697
+ int cmd)
1698
+{
1699
+ struct net_bridge_vlan_group *vg;
1700
+ struct net_bridge_vlan *v = NULL;
1701
+ struct br_vlan_msg *bvm;
1702
+ struct nlmsghdr *nlh;
1703
+ struct sk_buff *skb;
1704
+ int err = -ENOBUFS;
1705
+ struct net *net;
1706
+ u16 flags = 0;
1707
+ int ifindex;
1708
+
1709
+ /* right now notifications are done only with rtnl held */
1710
+ ASSERT_RTNL();
1711
+
1712
+ if (p) {
1713
+ ifindex = p->dev->ifindex;
1714
+ vg = nbp_vlan_group(p);
1715
+ net = dev_net(p->dev);
1716
+ } else {
1717
+ ifindex = br->dev->ifindex;
1718
+ vg = br_vlan_group(br);
1719
+ net = dev_net(br->dev);
1720
+ }
1721
+
1722
+ skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1723
+ if (!skb)
1724
+ goto out_err;
1725
+
1726
+ err = -EMSGSIZE;
1727
+ nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1728
+ if (!nlh)
1729
+ goto out_err;
1730
+ bvm = nlmsg_data(nlh);
1731
+ memset(bvm, 0, sizeof(*bvm));
1732
+ bvm->family = AF_BRIDGE;
1733
+ bvm->ifindex = ifindex;
1734
+
1735
+ switch (cmd) {
1736
+ case RTM_NEWVLAN:
1737
+ /* need to find the vlan due to flags/options */
1738
+ v = br_vlan_find(vg, vid);
1739
+ if (!v || !br_vlan_should_use(v))
1740
+ goto out_kfree;
1741
+
1742
+ flags = v->flags;
1743
+ if (br_get_pvid(vg) == v->vid)
1744
+ flags |= BRIDGE_VLAN_INFO_PVID;
1745
+ break;
1746
+ case RTM_DELVLAN:
1747
+ break;
1748
+ default:
1749
+ goto out_kfree;
1750
+ }
1751
+
1752
+ if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1753
+ goto out_err;
1754
+
1755
+ nlmsg_end(skb, nlh);
1756
+ rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1757
+ return;
1758
+
1759
+out_err:
1760
+ rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1761
+out_kfree:
1762
+ kfree_skb(skb);
1763
+}
1764
+
1765
+/* check if v_curr can enter a range ending in range_end */
1766
+bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1767
+ const struct net_bridge_vlan *range_end)
1768
+{
1769
+ return v_curr->vid - range_end->vid == 1 &&
1770
+ range_end->flags == v_curr->flags &&
1771
+ br_vlan_opts_eq_range(v_curr, range_end);
1772
+}
1773
+
1774
+static int br_vlan_dump_dev(const struct net_device *dev,
1775
+ struct sk_buff *skb,
1776
+ struct netlink_callback *cb,
1777
+ u32 dump_flags)
1778
+{
1779
+ struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1780
+ bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1781
+ struct net_bridge_vlan_group *vg;
1782
+ int idx = 0, s_idx = cb->args[1];
1783
+ struct nlmsghdr *nlh = NULL;
1784
+ struct net_bridge_port *p;
1785
+ struct br_vlan_msg *bvm;
1786
+ struct net_bridge *br;
1787
+ int err = 0;
1788
+ u16 pvid;
1789
+
1790
+ if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1791
+ return -EINVAL;
1792
+
1793
+ if (netif_is_bridge_master(dev)) {
1794
+ br = netdev_priv(dev);
1795
+ vg = br_vlan_group_rcu(br);
1796
+ p = NULL;
1797
+ } else {
1798
+ p = br_port_get_rcu(dev);
1799
+ if (WARN_ON(!p))
1800
+ return -EINVAL;
1801
+ vg = nbp_vlan_group_rcu(p);
1802
+ br = p->br;
1803
+ }
1804
+
1805
+ if (!vg)
1806
+ return 0;
1807
+
1808
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1809
+ RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1810
+ if (!nlh)
1811
+ return -EMSGSIZE;
1812
+ bvm = nlmsg_data(nlh);
1813
+ memset(bvm, 0, sizeof(*bvm));
1814
+ bvm->family = PF_BRIDGE;
1815
+ bvm->ifindex = dev->ifindex;
1816
+ pvid = br_get_pvid(vg);
1817
+
1818
+ /* idx must stay at range's beginning until it is filled in */
1819
+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1820
+ if (!br_vlan_should_use(v))
1821
+ continue;
1822
+ if (idx < s_idx) {
1823
+ idx++;
1824
+ continue;
1825
+ }
1826
+
1827
+ if (!range_start) {
1828
+ range_start = v;
1829
+ range_end = v;
1830
+ continue;
1831
+ }
1832
+
1833
+ if (dump_stats || v->vid == pvid ||
1834
+ !br_vlan_can_enter_range(v, range_end)) {
1835
+ u16 vlan_flags = br_vlan_flags(range_start, pvid);
1836
+
1837
+ if (!br_vlan_fill_vids(skb, range_start->vid,
1838
+ range_end->vid, range_start,
1839
+ vlan_flags, dump_stats)) {
1840
+ err = -EMSGSIZE;
1841
+ break;
1842
+ }
1843
+ /* advance number of filled vlans */
1844
+ idx += range_end->vid - range_start->vid + 1;
1845
+
1846
+ range_start = v;
1847
+ }
1848
+ range_end = v;
1849
+ }
1850
+
1851
+ /* err will be 0 and range_start will be set in 3 cases here:
1852
+ * - first vlan (range_start == range_end)
1853
+ * - last vlan (range_start == range_end, not in range)
1854
+ * - last vlan range (range_start != range_end, in range)
1855
+ */
1856
+ if (!err && range_start &&
1857
+ !br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
1858
+ range_start, br_vlan_flags(range_start, pvid),
1859
+ dump_stats))
1860
+ err = -EMSGSIZE;
1861
+
1862
+ cb->args[1] = err ? idx : 0;
1863
+
1864
+ nlmsg_end(skb, nlh);
1865
+
1866
+ return err;
1867
+}
1868
+
1869
+static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
1870
+ [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
1871
+};
1872
+
1873
+static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
1874
+{
1875
+ struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
1876
+ int idx = 0, err = 0, s_idx = cb->args[0];
1877
+ struct net *net = sock_net(skb->sk);
1878
+ struct br_vlan_msg *bvm;
1879
+ struct net_device *dev;
1880
+ u32 dump_flags = 0;
1881
+
1882
+ err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
1883
+ br_vlan_db_dump_pol, cb->extack);
1884
+ if (err < 0)
1885
+ return err;
1886
+
1887
+ bvm = nlmsg_data(cb->nlh);
1888
+ if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
1889
+ dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
1890
+
1891
+ rcu_read_lock();
1892
+ if (bvm->ifindex) {
1893
+ dev = dev_get_by_index_rcu(net, bvm->ifindex);
1894
+ if (!dev) {
1895
+ err = -ENODEV;
1896
+ goto out_err;
1897
+ }
1898
+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1899
+ /* if the dump completed without an error we return 0 here */
1900
+ if (err != -EMSGSIZE)
1901
+ goto out_err;
1902
+ } else {
1903
+ for_each_netdev_rcu(net, dev) {
1904
+ if (idx < s_idx)
1905
+ goto skip;
1906
+
1907
+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1908
+ if (err == -EMSGSIZE)
1909
+ break;
1910
+skip:
1911
+ idx++;
1912
+ }
1913
+ }
1914
+ cb->args[0] = idx;
1915
+ rcu_read_unlock();
1916
+
1917
+ return skb->len;
1918
+
1919
+out_err:
1920
+ rcu_read_unlock();
1921
+
1922
+ return err;
1923
+}
1924
+
1925
+static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
1926
+ [BRIDGE_VLANDB_ENTRY_INFO] =
1927
+ NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
1928
+ [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
1929
+ [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
1930
+ [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
1931
+};
1932
+
1933
+static int br_vlan_rtm_process_one(struct net_device *dev,
1934
+ const struct nlattr *attr,
1935
+ int cmd, struct netlink_ext_ack *extack)
1936
+{
1937
+ struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
1938
+ struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
1939
+ bool changed = false, skip_processing = false;
1940
+ struct net_bridge_vlan_group *vg;
1941
+ struct net_bridge_port *p = NULL;
1942
+ int err = 0, cmdmap = 0;
1943
+ struct net_bridge *br;
1944
+
1945
+ if (netif_is_bridge_master(dev)) {
1946
+ br = netdev_priv(dev);
1947
+ vg = br_vlan_group(br);
1948
+ } else {
1949
+ p = br_port_get_rtnl(dev);
1950
+ if (WARN_ON(!p))
1951
+ return -ENODEV;
1952
+ br = p->br;
1953
+ vg = nbp_vlan_group(p);
1954
+ }
1955
+
1956
+ if (WARN_ON(!vg))
1957
+ return -ENODEV;
1958
+
1959
+ err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
1960
+ br_vlan_db_policy, extack);
1961
+ if (err)
1962
+ return err;
1963
+
1964
+ if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
1965
+ NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
1966
+ return -EINVAL;
1967
+ }
1968
+ memset(&vrange_end, 0, sizeof(vrange_end));
1969
+
1970
+ vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
1971
+ if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
1972
+ BRIDGE_VLAN_INFO_RANGE_END)) {
1973
+ NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
1974
+ return -EINVAL;
1975
+ }
1976
+ if (!br_vlan_valid_id(vinfo->vid, extack))
1977
+ return -EINVAL;
1978
+
1979
+ if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
1980
+ vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
1981
+ /* validate user-provided flags without RANGE_BEGIN */
1982
+ vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
1983
+ vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
1984
+
1985
+ /* vinfo_last is the range start, vinfo the range end */
1986
+ vinfo_last = vinfo;
1987
+ vinfo = &vrange_end;
1988
+
1989
+ if (!br_vlan_valid_id(vinfo->vid, extack) ||
1990
+ !br_vlan_valid_range(vinfo, vinfo_last, extack))
1991
+ return -EINVAL;
1992
+ }
1993
+
1994
+ switch (cmd) {
1995
+ case RTM_NEWVLAN:
1996
+ cmdmap = RTM_SETLINK;
1997
+ skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
1998
+ break;
1999
+ case RTM_DELVLAN:
2000
+ cmdmap = RTM_DELLINK;
2001
+ break;
2002
+ }
2003
+
2004
+ if (!skip_processing) {
2005
+ struct bridge_vlan_info *tmp_last = vinfo_last;
2006
+
2007
+ /* br_process_vlan_info may overwrite vinfo_last */
2008
+ err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2009
+ &changed, extack);
2010
+
2011
+ /* notify first if anything changed */
2012
+ if (changed)
2013
+ br_ifinfo_notify(cmdmap, br, p);
2014
+
2015
+ if (err)
2016
+ return err;
2017
+ }
2018
+
2019
+ /* deal with options */
2020
+ if (cmd == RTM_NEWVLAN) {
2021
+ struct net_bridge_vlan *range_start, *range_end;
2022
+
2023
+ if (vinfo_last) {
2024
+ range_start = br_vlan_find(vg, vinfo_last->vid);
2025
+ range_end = br_vlan_find(vg, vinfo->vid);
2026
+ } else {
2027
+ range_start = br_vlan_find(vg, vinfo->vid);
2028
+ range_end = range_start;
2029
+ }
2030
+
2031
+ err = br_vlan_process_options(br, p, range_start, range_end,
2032
+ tb, extack);
2033
+ }
2034
+
2035
+ return err;
2036
+}
2037
+
2038
+static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2039
+ struct netlink_ext_ack *extack)
2040
+{
2041
+ struct net *net = sock_net(skb->sk);
2042
+ struct br_vlan_msg *bvm;
2043
+ struct net_device *dev;
2044
+ struct nlattr *attr;
2045
+ int err, vlans = 0;
2046
+ int rem;
2047
+
2048
+ /* this should validate the header and check for remaining bytes */
2049
+ err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2050
+ extack);
2051
+ if (err < 0)
2052
+ return err;
2053
+
2054
+ bvm = nlmsg_data(nlh);
2055
+ dev = __dev_get_by_index(net, bvm->ifindex);
2056
+ if (!dev)
2057
+ return -ENODEV;
2058
+
2059
+ if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2060
+ NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2061
+ return -EINVAL;
2062
+ }
2063
+
2064
+ nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2065
+ if (nla_type(attr) != BRIDGE_VLANDB_ENTRY)
2066
+ continue;
2067
+
2068
+ vlans++;
2069
+ err = br_vlan_rtm_process_one(dev, attr, nlh->nlmsg_type,
2070
+ extack);
2071
+ if (err)
2072
+ break;
2073
+ }
2074
+ if (!vlans) {
2075
+ NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2076
+ err = -EINVAL;
2077
+ }
2078
+
2079
+ return err;
2080
+}
2081
+
2082
+void br_vlan_rtnl_init(void)
2083
+{
2084
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2085
+ br_vlan_rtm_dump, 0);
2086
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2087
+ br_vlan_rtm_process, NULL, 0);
2088
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2089
+ br_vlan_rtm_process, NULL, 0);
2090
+}
2091
+
2092
+void br_vlan_rtnl_uninit(void)
2093
+{
2094
+ rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2095
+ rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2096
+ rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2097
+}