hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/net/switchdev/switchdev.c
....@@ -1,12 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/switchdev/switchdev.c - Switch device API
34 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
45 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or
9
- * (at your option) any later version.
106 */
117
128 #include <linux/kernel.h>
....@@ -23,78 +19,6 @@
2319 #include <linux/rtnetlink.h>
2420 #include <net/switchdev.h>
2521
26
-/**
27
- * switchdev_trans_item_enqueue - Enqueue data item to transaction queue
28
- *
29
- * @trans: transaction
30
- * @data: pointer to data being queued
31
- * @destructor: data destructor
32
- * @tritem: transaction item being queued
33
- *
34
- * Enqeueue data item to transaction queue. tritem is typically placed in
35
- * cointainter pointed at by data pointer. Destructor is called on
36
- * transaction abort and after successful commit phase in case
37
- * the caller did not dequeue the item before.
38
- */
39
-void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
40
- void *data, void (*destructor)(void const *),
41
- struct switchdev_trans_item *tritem)
42
-{
43
- tritem->data = data;
44
- tritem->destructor = destructor;
45
- list_add_tail(&tritem->list, &trans->item_list);
46
-}
47
-EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
48
-
49
-static struct switchdev_trans_item *
50
-__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
51
-{
52
- struct switchdev_trans_item *tritem;
53
-
54
- if (list_empty(&trans->item_list))
55
- return NULL;
56
- tritem = list_first_entry(&trans->item_list,
57
- struct switchdev_trans_item, list);
58
- list_del(&tritem->list);
59
- return tritem;
60
-}
61
-
62
-/**
63
- * switchdev_trans_item_dequeue - Dequeue data item from transaction queue
64
- *
65
- * @trans: transaction
66
- */
67
-void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
68
-{
69
- struct switchdev_trans_item *tritem;
70
-
71
- tritem = __switchdev_trans_item_dequeue(trans);
72
- BUG_ON(!tritem);
73
- return tritem->data;
74
-}
75
-EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
76
-
77
-static void switchdev_trans_init(struct switchdev_trans *trans)
78
-{
79
- INIT_LIST_HEAD(&trans->item_list);
80
-}
81
-
82
-static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
83
-{
84
- struct switchdev_trans_item *tritem;
85
-
86
- while ((tritem = __switchdev_trans_item_dequeue(trans)))
87
- tritem->destructor(tritem->data);
88
-}
89
-
90
-static void switchdev_trans_items_warn_destroy(struct net_device *dev,
91
- struct switchdev_trans *trans)
92
-{
93
- WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
94
- dev->name);
95
- switchdev_trans_items_destroy(trans);
96
-}
97
-
9822 static LIST_HEAD(deferred);
9923 static DEFINE_SPINLOCK(deferred_lock);
10024
....@@ -105,7 +29,7 @@
10529 struct list_head list;
10630 struct net_device *dev;
10731 switchdev_deferred_func_t *func;
108
- unsigned long data[0];
32
+ unsigned long data[];
10933 };
11034
11135 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
....@@ -174,81 +98,32 @@
17498 return 0;
17599 }
176100
177
-/**
178
- * switchdev_port_attr_get - Get port attribute
179
- *
180
- * @dev: port device
181
- * @attr: attribute to get
182
- */
183
-int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
101
+static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102
+ struct net_device *dev,
103
+ const struct switchdev_attr *attr,
104
+ struct switchdev_trans *trans)
184105 {
185
- const struct switchdev_ops *ops = dev->switchdev_ops;
186
- struct net_device *lower_dev;
187
- struct list_head *iter;
188
- struct switchdev_attr first = {
189
- .id = SWITCHDEV_ATTR_ID_UNDEFINED
106
+ int err;
107
+ int rc;
108
+
109
+ struct switchdev_notifier_port_attr_info attr_info = {
110
+ .attr = attr,
111
+ .trans = trans,
112
+ .handled = false,
190113 };
191
- int err = -EOPNOTSUPP;
192114
193
- if (ops && ops->switchdev_port_attr_get)
194
- return ops->switchdev_port_attr_get(dev, attr);
195
-
196
- if (attr->flags & SWITCHDEV_F_NO_RECURSE)
115
+ rc = call_switchdev_blocking_notifiers(nt, dev,
116
+ &attr_info.info, NULL);
117
+ err = notifier_to_errno(rc);
118
+ if (err) {
119
+ WARN_ON(!attr_info.handled);
197120 return err;
198
-
199
- /* Switch device port(s) may be stacked under
200
- * bond/team/vlan dev, so recurse down to get attr on
201
- * each port. Return -ENODATA if attr values don't
202
- * compare across ports.
203
- */
204
-
205
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
206
- err = switchdev_port_attr_get(lower_dev, attr);
207
- if (err)
208
- break;
209
- if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
210
- first = *attr;
211
- else if (memcmp(&first, attr, sizeof(*attr)))
212
- return -ENODATA;
213121 }
214122
215
- return err;
216
-}
217
-EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
123
+ if (!attr_info.handled)
124
+ return -EOPNOTSUPP;
218125
219
-static int __switchdev_port_attr_set(struct net_device *dev,
220
- const struct switchdev_attr *attr,
221
- struct switchdev_trans *trans)
222
-{
223
- const struct switchdev_ops *ops = dev->switchdev_ops;
224
- struct net_device *lower_dev;
225
- struct list_head *iter;
226
- int err = -EOPNOTSUPP;
227
-
228
- if (ops && ops->switchdev_port_attr_set) {
229
- err = ops->switchdev_port_attr_set(dev, attr, trans);
230
- goto done;
231
- }
232
-
233
- if (attr->flags & SWITCHDEV_F_NO_RECURSE)
234
- goto done;
235
-
236
- /* Switch device port(s) may be stacked under
237
- * bond/team/vlan dev, so recurse down to set attr on
238
- * each port.
239
- */
240
-
241
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
242
- err = __switchdev_port_attr_set(lower_dev, attr, trans);
243
- if (err)
244
- break;
245
- }
246
-
247
-done:
248
- if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
249
- err = 0;
250
-
251
- return err;
126
+ return 0;
252127 }
253128
254129 static int switchdev_port_attr_set_now(struct net_device *dev,
....@@ -256,8 +131,6 @@
256131 {
257132 struct switchdev_trans trans;
258133 int err;
259
-
260
- switchdev_trans_init(&trans);
261134
262135 /* Phase I: prepare for attr set. Driver/device should fail
263136 * here if there are going to be issues in the commit phase,
....@@ -267,18 +140,10 @@
267140 */
268141
269142 trans.ph_prepare = true;
270
- err = __switchdev_port_attr_set(dev, attr, &trans);
271
- if (err) {
272
- /* Prepare phase failed: abort the transaction. Any
273
- * resources reserved in the prepare phase are
274
- * released.
275
- */
276
-
277
- if (err != -EOPNOTSUPP)
278
- switchdev_trans_items_destroy(&trans);
279
-
143
+ err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
144
+ &trans);
145
+ if (err)
280146 return err;
281
- }
282147
283148 /* Phase II: commit attr set. This cannot fail as a fault
284149 * of driver/device. If it does, it's a bug in the driver/device
....@@ -286,10 +151,10 @@
286151 */
287152
288153 trans.ph_prepare = false;
289
- err = __switchdev_port_attr_set(dev, attr, &trans);
154
+ err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
155
+ &trans);
290156 WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
291157 dev->name, attr->id);
292
- switchdev_trans_items_warn_destroy(dev, &trans);
293158
294159 return err;
295160 }
....@@ -353,41 +218,40 @@
353218 return 0;
354219 }
355220
356
-static int __switchdev_port_obj_add(struct net_device *dev,
357
- const struct switchdev_obj *obj,
358
- struct switchdev_trans *trans)
221
+static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
222
+ struct net_device *dev,
223
+ const struct switchdev_obj *obj,
224
+ struct switchdev_trans *trans,
225
+ struct netlink_ext_ack *extack)
359226 {
360
- const struct switchdev_ops *ops = dev->switchdev_ops;
361
- struct net_device *lower_dev;
362
- struct list_head *iter;
363
- int err = -EOPNOTSUPP;
227
+ int rc;
228
+ int err;
364229
365
- if (ops && ops->switchdev_port_obj_add)
366
- return ops->switchdev_port_obj_add(dev, obj, trans);
230
+ struct switchdev_notifier_port_obj_info obj_info = {
231
+ .obj = obj,
232
+ .trans = trans,
233
+ .handled = false,
234
+ };
367235
368
- /* Switch device port(s) may be stacked under
369
- * bond/team/vlan dev, so recurse down to add object on
370
- * each port.
371
- */
372
-
373
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
374
- err = __switchdev_port_obj_add(lower_dev, obj, trans);
375
- if (err)
376
- break;
236
+ rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237
+ err = notifier_to_errno(rc);
238
+ if (err) {
239
+ WARN_ON(!obj_info.handled);
240
+ return err;
377241 }
378
-
379
- return err;
242
+ if (!obj_info.handled)
243
+ return -EOPNOTSUPP;
244
+ return 0;
380245 }
381246
382247 static int switchdev_port_obj_add_now(struct net_device *dev,
383
- const struct switchdev_obj *obj)
248
+ const struct switchdev_obj *obj,
249
+ struct netlink_ext_ack *extack)
384250 {
385251 struct switchdev_trans trans;
386252 int err;
387253
388254 ASSERT_RTNL();
389
-
390
- switchdev_trans_init(&trans);
391255
392256 /* Phase I: prepare for obj add. Driver/device should fail
393257 * here if there are going to be issues in the commit phase,
....@@ -397,18 +261,10 @@
397261 */
398262
399263 trans.ph_prepare = true;
400
- err = __switchdev_port_obj_add(dev, obj, &trans);
401
- if (err) {
402
- /* Prepare phase failed: abort the transaction. Any
403
- * resources reserved in the prepare phase are
404
- * released.
405
- */
406
-
407
- if (err != -EOPNOTSUPP)
408
- switchdev_trans_items_destroy(&trans);
409
-
264
+ err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
265
+ dev, obj, &trans, extack);
266
+ if (err)
410267 return err;
411
- }
412268
413269 /* Phase II: commit obj add. This cannot fail as a fault
414270 * of driver/device. If it does, it's a bug in the driver/device
....@@ -416,9 +272,9 @@
416272 */
417273
418274 trans.ph_prepare = false;
419
- err = __switchdev_port_obj_add(dev, obj, &trans);
275
+ err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
276
+ dev, obj, &trans, extack);
420277 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
421
- switchdev_trans_items_warn_destroy(dev, &trans);
422278
423279 return err;
424280 }
....@@ -429,7 +285,7 @@
429285 const struct switchdev_obj *obj = data;
430286 int err;
431287
432
- err = switchdev_port_obj_add_now(dev, obj);
288
+ err = switchdev_port_obj_add_now(dev, obj, NULL);
433289 if (err && err != -EOPNOTSUPP)
434290 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
435291 err, obj->id);
....@@ -448,8 +304,8 @@
448304 * switchdev_port_obj_add - Add port object
449305 *
450306 * @dev: port device
451
- * @id: object ID
452307 * @obj: object to add
308
+ * @extack: netlink extended ack
453309 *
454310 * Use a 2-phase prepare-commit transaction model to ensure
455311 * system is not left in a partially updated state due to
....@@ -459,38 +315,21 @@
459315 * in case SWITCHDEV_F_DEFER flag is not set.
460316 */
461317 int switchdev_port_obj_add(struct net_device *dev,
462
- const struct switchdev_obj *obj)
318
+ const struct switchdev_obj *obj,
319
+ struct netlink_ext_ack *extack)
463320 {
464321 if (obj->flags & SWITCHDEV_F_DEFER)
465322 return switchdev_port_obj_add_defer(dev, obj);
466323 ASSERT_RTNL();
467
- return switchdev_port_obj_add_now(dev, obj);
324
+ return switchdev_port_obj_add_now(dev, obj, extack);
468325 }
469326 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
470327
471328 static int switchdev_port_obj_del_now(struct net_device *dev,
472329 const struct switchdev_obj *obj)
473330 {
474
- const struct switchdev_ops *ops = dev->switchdev_ops;
475
- struct net_device *lower_dev;
476
- struct list_head *iter;
477
- int err = -EOPNOTSUPP;
478
-
479
- if (ops && ops->switchdev_port_obj_del)
480
- return ops->switchdev_port_obj_del(dev, obj);
481
-
482
- /* Switch device port(s) may be stacked under
483
- * bond/team/vlan dev, so recurse down to delete object on
484
- * each port.
485
- */
486
-
487
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
488
- err = switchdev_port_obj_del_now(lower_dev, obj);
489
- if (err)
490
- break;
491
- }
492
-
493
- return err;
331
+ return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
332
+ dev, obj, NULL, NULL);
494333 }
495334
496335 static void switchdev_port_obj_del_deferred(struct net_device *dev,
....@@ -518,7 +357,6 @@
518357 * switchdev_port_obj_del - Delete port object
519358 *
520359 * @dev: port device
521
- * @id: object ID
522360 * @obj: object to delete
523361 *
524362 * rtnl_lock must be held and must not be in atomic section,
....@@ -535,6 +373,7 @@
535373 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
536374
537375 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
376
+static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
538377
539378 /**
540379 * register_switchdev_notifier - Register notifier
....@@ -565,33 +404,211 @@
565404 * @val: value passed unmodified to notifier function
566405 * @dev: port device
567406 * @info: notifier information data
568
- *
407
+ * @extack: netlink extended ack
569408 * Call all network notifier blocks.
570409 */
571410 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
572
- struct switchdev_notifier_info *info)
411
+ struct switchdev_notifier_info *info,
412
+ struct netlink_ext_ack *extack)
573413 {
574414 info->dev = dev;
415
+ info->extack = extack;
575416 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
576417 }
577418 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
578419
579
-bool switchdev_port_same_parent_id(struct net_device *a,
580
- struct net_device *b)
420
+int register_switchdev_blocking_notifier(struct notifier_block *nb)
581421 {
582
- struct switchdev_attr a_attr = {
583
- .orig_dev = a,
584
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
585
- };
586
- struct switchdev_attr b_attr = {
587
- .orig_dev = b,
588
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
589
- };
422
+ struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
590423
591
- if (switchdev_port_attr_get(a, &a_attr) ||
592
- switchdev_port_attr_get(b, &b_attr))
593
- return false;
594
-
595
- return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
424
+ return blocking_notifier_chain_register(chain, nb);
596425 }
597
-EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
426
+EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
427
+
428
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
429
+{
430
+ struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
431
+
432
+ return blocking_notifier_chain_unregister(chain, nb);
433
+}
434
+EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
435
+
436
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
437
+ struct switchdev_notifier_info *info,
438
+ struct netlink_ext_ack *extack)
439
+{
440
+ info->dev = dev;
441
+ info->extack = extack;
442
+ return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
443
+ val, info);
444
+}
445
+EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
446
+
447
+static int __switchdev_handle_port_obj_add(struct net_device *dev,
448
+ struct switchdev_notifier_port_obj_info *port_obj_info,
449
+ bool (*check_cb)(const struct net_device *dev),
450
+ int (*add_cb)(struct net_device *dev,
451
+ const struct switchdev_obj *obj,
452
+ struct switchdev_trans *trans,
453
+ struct netlink_ext_ack *extack))
454
+{
455
+ struct netlink_ext_ack *extack;
456
+ struct net_device *lower_dev;
457
+ struct list_head *iter;
458
+ int err = -EOPNOTSUPP;
459
+
460
+ extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
461
+
462
+ if (check_cb(dev)) {
463
+ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
464
+ extack);
465
+ if (err != -EOPNOTSUPP)
466
+ port_obj_info->handled = true;
467
+ return err;
468
+ }
469
+
470
+ /* Switch ports might be stacked under e.g. a LAG. Ignore the
471
+ * unsupported devices, another driver might be able to handle them. But
472
+ * propagate to the callers any hard errors.
473
+ *
474
+ * If the driver does its own bookkeeping of stacked ports, it's not
475
+ * necessary to go through this helper.
476
+ */
477
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
478
+ if (netif_is_bridge_master(lower_dev))
479
+ continue;
480
+
481
+ err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
482
+ check_cb, add_cb);
483
+ if (err && err != -EOPNOTSUPP)
484
+ return err;
485
+ }
486
+
487
+ return err;
488
+}
489
+
490
+int switchdev_handle_port_obj_add(struct net_device *dev,
491
+ struct switchdev_notifier_port_obj_info *port_obj_info,
492
+ bool (*check_cb)(const struct net_device *dev),
493
+ int (*add_cb)(struct net_device *dev,
494
+ const struct switchdev_obj *obj,
495
+ struct switchdev_trans *trans,
496
+ struct netlink_ext_ack *extack))
497
+{
498
+ int err;
499
+
500
+ err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
501
+ add_cb);
502
+ if (err == -EOPNOTSUPP)
503
+ err = 0;
504
+ return err;
505
+}
506
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
507
+
508
+static int __switchdev_handle_port_obj_del(struct net_device *dev,
509
+ struct switchdev_notifier_port_obj_info *port_obj_info,
510
+ bool (*check_cb)(const struct net_device *dev),
511
+ int (*del_cb)(struct net_device *dev,
512
+ const struct switchdev_obj *obj))
513
+{
514
+ struct net_device *lower_dev;
515
+ struct list_head *iter;
516
+ int err = -EOPNOTSUPP;
517
+
518
+ if (check_cb(dev)) {
519
+ err = del_cb(dev, port_obj_info->obj);
520
+ if (err != -EOPNOTSUPP)
521
+ port_obj_info->handled = true;
522
+ return err;
523
+ }
524
+
525
+ /* Switch ports might be stacked under e.g. a LAG. Ignore the
526
+ * unsupported devices, another driver might be able to handle them. But
527
+ * propagate to the callers any hard errors.
528
+ *
529
+ * If the driver does its own bookkeeping of stacked ports, it's not
530
+ * necessary to go through this helper.
531
+ */
532
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
533
+ if (netif_is_bridge_master(lower_dev))
534
+ continue;
535
+
536
+ err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
537
+ check_cb, del_cb);
538
+ if (err && err != -EOPNOTSUPP)
539
+ return err;
540
+ }
541
+
542
+ return err;
543
+}
544
+
545
+int switchdev_handle_port_obj_del(struct net_device *dev,
546
+ struct switchdev_notifier_port_obj_info *port_obj_info,
547
+ bool (*check_cb)(const struct net_device *dev),
548
+ int (*del_cb)(struct net_device *dev,
549
+ const struct switchdev_obj *obj))
550
+{
551
+ int err;
552
+
553
+ err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
554
+ del_cb);
555
+ if (err == -EOPNOTSUPP)
556
+ err = 0;
557
+ return err;
558
+}
559
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
560
+
561
+static int __switchdev_handle_port_attr_set(struct net_device *dev,
562
+ struct switchdev_notifier_port_attr_info *port_attr_info,
563
+ bool (*check_cb)(const struct net_device *dev),
564
+ int (*set_cb)(struct net_device *dev,
565
+ const struct switchdev_attr *attr,
566
+ struct switchdev_trans *trans))
567
+{
568
+ struct net_device *lower_dev;
569
+ struct list_head *iter;
570
+ int err = -EOPNOTSUPP;
571
+
572
+ if (check_cb(dev)) {
573
+ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
574
+ if (err != -EOPNOTSUPP)
575
+ port_attr_info->handled = true;
576
+ return err;
577
+ }
578
+
579
+ /* Switch ports might be stacked under e.g. a LAG. Ignore the
580
+ * unsupported devices, another driver might be able to handle them. But
581
+ * propagate to the callers any hard errors.
582
+ *
583
+ * If the driver does its own bookkeeping of stacked ports, it's not
584
+ * necessary to go through this helper.
585
+ */
586
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
587
+ if (netif_is_bridge_master(lower_dev))
588
+ continue;
589
+
590
+ err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
591
+ check_cb, set_cb);
592
+ if (err && err != -EOPNOTSUPP)
593
+ return err;
594
+ }
595
+
596
+ return err;
597
+}
598
+
599
+int switchdev_handle_port_attr_set(struct net_device *dev,
600
+ struct switchdev_notifier_port_attr_info *port_attr_info,
601
+ bool (*check_cb)(const struct net_device *dev),
602
+ int (*set_cb)(struct net_device *dev,
603
+ const struct switchdev_attr *attr,
604
+ struct switchdev_trans *trans))
605
+{
606
+ int err;
607
+
608
+ err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
609
+ set_cb);
610
+ if (err == -EOPNOTSUPP)
611
+ err = 0;
612
+ return err;
613
+}
614
+EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);