| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * net/dsa/slave.c - Slave device handling |
|---|
| 3 | 4 | * Copyright (c) 2008-2009 Marvell Semiconductor |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 6 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 7 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 8 | | - * (at your option) any later version. |
|---|
| 9 | 5 | */ |
|---|
| 10 | 6 | |
|---|
| 11 | 7 | #include <linux/list.h> |
|---|
| .. | .. |
|---|
| 25 | 21 | #include <linux/ptp_classify.h> |
|---|
| 26 | 22 | |
|---|
| 27 | 23 | #include "dsa_priv.h" |
|---|
| 28 | | - |
|---|
| 29 | | -static bool dsa_slave_dev_check(struct net_device *dev); |
|---|
| 30 | 24 | |
|---|
| 31 | 25 | /* slave mii_bus handling ***************************************************/ |
|---|
| 32 | 26 | static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) |
|---|
| .. | .. |
|---|
| 94 | 88 | goto clear_allmulti; |
|---|
| 95 | 89 | } |
|---|
| 96 | 90 | |
|---|
| 97 | | - err = dsa_port_enable(dp, dev->phydev); |
|---|
| 91 | + err = dsa_port_enable_rt(dp, dev->phydev); |
|---|
| 98 | 92 | if (err) |
|---|
| 99 | 93 | goto clear_promisc; |
|---|
| 100 | | - |
|---|
| 101 | | - phylink_start(dp->pl); |
|---|
| 102 | 94 | |
|---|
| 103 | 95 | return 0; |
|---|
| 104 | 96 | |
|---|
| .. | .. |
|---|
| 120 | 112 | struct net_device *master = dsa_slave_to_master(dev); |
|---|
| 121 | 113 | struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 122 | 114 | |
|---|
| 123 | | - phylink_stop(dp->pl); |
|---|
| 124 | | - |
|---|
| 125 | | - dsa_port_disable(dp, dev->phydev); |
|---|
| 115 | + dsa_port_disable_rt(dp); |
|---|
| 126 | 116 | |
|---|
| 127 | 117 | dev_mc_unsync(master, dev); |
|---|
| 128 | 118 | dev_uc_unsync(master, dev); |
|---|
| .. | .. |
|---|
| 295 | 285 | case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: |
|---|
| 296 | 286 | ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans); |
|---|
| 297 | 287 | break; |
|---|
| 288 | + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: |
|---|
| 289 | + ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, |
|---|
| 290 | + trans); |
|---|
| 291 | + break; |
|---|
| 292 | + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: |
|---|
| 293 | + ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans); |
|---|
| 294 | + break; |
|---|
| 295 | + case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: |
|---|
| 296 | + ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans); |
|---|
| 297 | + break; |
|---|
| 298 | 298 | default: |
|---|
| 299 | 299 | ret = -EOPNOTSUPP; |
|---|
| 300 | 300 | break; |
|---|
| .. | .. |
|---|
| 303 | 303 | return ret; |
|---|
| 304 | 304 | } |
|---|
| 305 | 305 | |
|---|
| 306 | +/* Must be called under rcu_read_lock() */ |
|---|
| 307 | +static int |
|---|
| 308 | +dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, |
|---|
| 309 | + const struct switchdev_obj_port_vlan *vlan) |
|---|
| 310 | +{ |
|---|
| 311 | + struct net_device *upper_dev; |
|---|
| 312 | + struct list_head *iter; |
|---|
| 313 | + |
|---|
| 314 | + netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { |
|---|
| 315 | + u16 vid; |
|---|
| 316 | + |
|---|
| 317 | + if (!is_vlan_dev(upper_dev)) |
|---|
| 318 | + continue; |
|---|
| 319 | + |
|---|
| 320 | + vid = vlan_dev_vlan_id(upper_dev); |
|---|
| 321 | + if (vid >= vlan->vid_begin && vid <= vlan->vid_end) |
|---|
| 322 | + return -EBUSY; |
|---|
| 323 | + } |
|---|
| 324 | + |
|---|
| 325 | + return 0; |
|---|
| 326 | +} |
|---|
| 327 | + |
|---|
| 328 | +static int dsa_slave_vlan_add(struct net_device *dev, |
|---|
| 329 | + const struct switchdev_obj *obj, |
|---|
| 330 | + struct switchdev_trans *trans) |
|---|
| 331 | +{ |
|---|
| 332 | + struct net_device *master = dsa_slave_to_master(dev); |
|---|
| 333 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 334 | + struct switchdev_obj_port_vlan vlan; |
|---|
| 335 | + int vid, err; |
|---|
| 336 | + |
|---|
| 337 | + if (obj->orig_dev != dev) |
|---|
| 338 | + return -EOPNOTSUPP; |
|---|
| 339 | + |
|---|
| 340 | + if (dsa_port_skip_vlan_configuration(dp)) |
|---|
| 341 | + return 0; |
|---|
| 342 | + |
|---|
| 343 | + vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); |
|---|
| 344 | + |
|---|
| 345 | + /* Deny adding a bridge VLAN when there is already an 802.1Q upper with |
|---|
| 346 | + * the same VID. |
|---|
| 347 | + */ |
|---|
| 348 | + if (trans->ph_prepare && br_vlan_enabled(dp->bridge_dev)) { |
|---|
| 349 | + rcu_read_lock(); |
|---|
| 350 | + err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan); |
|---|
| 351 | + rcu_read_unlock(); |
|---|
| 352 | + if (err) |
|---|
| 353 | + return err; |
|---|
| 354 | + } |
|---|
| 355 | + |
|---|
| 356 | + err = dsa_port_vlan_add(dp, &vlan, trans); |
|---|
| 357 | + if (err) |
|---|
| 358 | + return err; |
|---|
| 359 | + |
|---|
| 360 | + /* We need the dedicated CPU port to be a member of the VLAN as well. |
|---|
| 361 | + * Even though drivers often handle CPU membership in special ways, |
|---|
| 362 | + * it doesn't make sense to program a PVID, so clear this flag. |
|---|
| 363 | + */ |
|---|
| 364 | + vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; |
|---|
| 365 | + |
|---|
| 366 | + err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans); |
|---|
| 367 | + if (err) |
|---|
| 368 | + return err; |
|---|
| 369 | + |
|---|
| 370 | + for (vid = vlan.vid_begin; vid <= vlan.vid_end; vid++) { |
|---|
| 371 | + err = vlan_vid_add(master, htons(ETH_P_8021Q), vid); |
|---|
| 372 | + if (err) |
|---|
| 373 | + return err; |
|---|
| 374 | + } |
|---|
| 375 | + |
|---|
| 376 | + return 0; |
|---|
| 377 | +} |
|---|
| 378 | + |
|---|
| 306 | 379 | static int dsa_slave_port_obj_add(struct net_device *dev, |
|---|
| 307 | 380 | const struct switchdev_obj *obj, |
|---|
| 308 | | - struct switchdev_trans *trans) |
|---|
| 381 | + struct switchdev_trans *trans, |
|---|
| 382 | + struct netlink_ext_ack *extack) |
|---|
| 309 | 383 | { |
|---|
| 310 | 384 | struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 311 | 385 | int err; |
|---|
| .. | .. |
|---|
| 317 | 391 | |
|---|
| 318 | 392 | switch (obj->id) { |
|---|
| 319 | 393 | case SWITCHDEV_OBJ_ID_PORT_MDB: |
|---|
| 394 | + if (obj->orig_dev != dev) |
|---|
| 395 | + return -EOPNOTSUPP; |
|---|
| 320 | 396 | err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); |
|---|
| 321 | 397 | break; |
|---|
| 322 | 398 | case SWITCHDEV_OBJ_ID_HOST_MDB: |
|---|
| .. | .. |
|---|
| 327 | 403 | trans); |
|---|
| 328 | 404 | break; |
|---|
| 329 | 405 | case SWITCHDEV_OBJ_ID_PORT_VLAN: |
|---|
| 330 | | - err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj), |
|---|
| 331 | | - trans); |
|---|
| 406 | + err = dsa_slave_vlan_add(dev, obj, trans); |
|---|
| 332 | 407 | break; |
|---|
| 333 | 408 | default: |
|---|
| 334 | 409 | err = -EOPNOTSUPP; |
|---|
| .. | .. |
|---|
| 336 | 411 | } |
|---|
| 337 | 412 | |
|---|
| 338 | 413 | return err; |
|---|
| 414 | +} |
|---|
| 415 | + |
|---|
| 416 | +static int dsa_slave_vlan_del(struct net_device *dev, |
|---|
| 417 | + const struct switchdev_obj *obj) |
|---|
| 418 | +{ |
|---|
| 419 | + struct net_device *master = dsa_slave_to_master(dev); |
|---|
| 420 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 421 | + struct switchdev_obj_port_vlan *vlan; |
|---|
| 422 | + int vid, err; |
|---|
| 423 | + |
|---|
| 424 | + if (obj->orig_dev != dev) |
|---|
| 425 | + return -EOPNOTSUPP; |
|---|
| 426 | + |
|---|
| 427 | + if (dsa_port_skip_vlan_configuration(dp)) |
|---|
| 428 | + return 0; |
|---|
| 429 | + |
|---|
| 430 | + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); |
|---|
| 431 | + |
|---|
| 432 | + /* Do not deprogram the CPU port as it may be shared with other user |
|---|
| 433 | + * ports which can be members of this VLAN as well. |
|---|
| 434 | + */ |
|---|
| 435 | + err = dsa_port_vlan_del(dp, vlan); |
|---|
| 436 | + if (err) |
|---|
| 437 | + return err; |
|---|
| 438 | + |
|---|
| 439 | + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) |
|---|
| 440 | + vlan_vid_del(master, htons(ETH_P_8021Q), vid); |
|---|
| 441 | + |
|---|
| 442 | + return 0; |
|---|
| 339 | 443 | } |
|---|
| 340 | 444 | |
|---|
| 341 | 445 | static int dsa_slave_port_obj_del(struct net_device *dev, |
|---|
| .. | .. |
|---|
| 346 | 450 | |
|---|
| 347 | 451 | switch (obj->id) { |
|---|
| 348 | 452 | case SWITCHDEV_OBJ_ID_PORT_MDB: |
|---|
| 453 | + if (obj->orig_dev != dev) |
|---|
| 454 | + return -EOPNOTSUPP; |
|---|
| 349 | 455 | err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); |
|---|
| 350 | 456 | break; |
|---|
| 351 | 457 | case SWITCHDEV_OBJ_ID_HOST_MDB: |
|---|
| .. | .. |
|---|
| 355 | 461 | err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj)); |
|---|
| 356 | 462 | break; |
|---|
| 357 | 463 | case SWITCHDEV_OBJ_ID_PORT_VLAN: |
|---|
| 358 | | - err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj)); |
|---|
| 464 | + err = dsa_slave_vlan_del(dev, obj); |
|---|
| 359 | 465 | break; |
|---|
| 360 | 466 | default: |
|---|
| 361 | 467 | err = -EOPNOTSUPP; |
|---|
| .. | .. |
|---|
| 365 | 471 | return err; |
|---|
| 366 | 472 | } |
|---|
| 367 | 473 | |
|---|
| 368 | | -static int dsa_slave_port_attr_get(struct net_device *dev, |
|---|
| 369 | | - struct switchdev_attr *attr) |
|---|
| 474 | +static int dsa_slave_get_port_parent_id(struct net_device *dev, |
|---|
| 475 | + struct netdev_phys_item_id *ppid) |
|---|
| 370 | 476 | { |
|---|
| 371 | 477 | struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 372 | 478 | struct dsa_switch *ds = dp->ds; |
|---|
| 373 | 479 | struct dsa_switch_tree *dst = ds->dst; |
|---|
| 374 | 480 | |
|---|
| 375 | | - switch (attr->id) { |
|---|
| 376 | | - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: |
|---|
| 377 | | - attr->u.ppid.id_len = sizeof(dst->index); |
|---|
| 378 | | - memcpy(&attr->u.ppid.id, &dst->index, attr->u.ppid.id_len); |
|---|
| 379 | | - break; |
|---|
| 380 | | - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: |
|---|
| 381 | | - attr->u.brport_flags_support = 0; |
|---|
| 382 | | - break; |
|---|
| 383 | | - default: |
|---|
| 481 | + /* For non-legacy ports, devlink is used and it takes |
|---|
| 482 | + * care of the name generation. This ndo implementation |
|---|
| 483 | + * should be removed with legacy support. |
|---|
| 484 | + */ |
|---|
| 485 | + if (dp->ds->devlink) |
|---|
| 384 | 486 | return -EOPNOTSUPP; |
|---|
| 385 | | - } |
|---|
| 487 | + |
|---|
| 488 | + ppid->id_len = sizeof(dst->index); |
|---|
| 489 | + memcpy(&ppid->id, &dst->index, ppid->id_len); |
|---|
| 386 | 490 | |
|---|
| 387 | 491 | return 0; |
|---|
| 388 | 492 | } |
|---|
| .. | .. |
|---|
| 393 | 497 | #ifdef CONFIG_NET_POLL_CONTROLLER |
|---|
| 394 | 498 | struct dsa_slave_priv *p = netdev_priv(dev); |
|---|
| 395 | 499 | |
|---|
| 396 | | - if (p->netpoll) |
|---|
| 397 | | - netpoll_send_skb(p->netpoll, skb); |
|---|
| 500 | + return netpoll_send_skb(p->netpoll, skb); |
|---|
| 398 | 501 | #else |
|---|
| 399 | 502 | BUG(); |
|---|
| 400 | | -#endif |
|---|
| 401 | 503 | return NETDEV_TX_OK; |
|---|
| 504 | +#endif |
|---|
| 402 | 505 | } |
|---|
| 403 | 506 | |
|---|
| 404 | 507 | static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, |
|---|
| .. | .. |
|---|
| 419 | 522 | if (!clone) |
|---|
| 420 | 523 | return; |
|---|
| 421 | 524 | |
|---|
| 525 | + DSA_SKB_CB(skb)->clone = clone; |
|---|
| 526 | + |
|---|
| 422 | 527 | if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) |
|---|
| 423 | 528 | return; |
|---|
| 424 | 529 | |
|---|
| 425 | 530 | kfree_skb(clone); |
|---|
| 531 | +} |
|---|
| 532 | + |
|---|
| 533 | +netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) |
|---|
| 534 | +{ |
|---|
| 535 | + /* SKB for netpoll still need to be mangled with the protocol-specific |
|---|
| 536 | + * tag to be successfully transmitted |
|---|
| 537 | + */ |
|---|
| 538 | + if (unlikely(netpoll_tx_running(dev))) |
|---|
| 539 | + return dsa_slave_netpoll_send_skb(dev, skb); |
|---|
| 540 | + |
|---|
| 541 | + /* Queue the SKB for transmission on the parent interface, but |
|---|
| 542 | + * do not modify its EtherType |
|---|
| 543 | + */ |
|---|
| 544 | + skb->dev = dsa_slave_to_master(dev); |
|---|
| 545 | + dev_queue_xmit(skb); |
|---|
| 546 | + |
|---|
| 547 | + return NETDEV_TX_OK; |
|---|
| 548 | +} |
|---|
| 549 | +EXPORT_SYMBOL_GPL(dsa_enqueue_skb); |
|---|
| 550 | + |
|---|
| 551 | +static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) |
|---|
| 552 | +{ |
|---|
| 553 | + int needed_headroom = dev->needed_headroom; |
|---|
| 554 | + int needed_tailroom = dev->needed_tailroom; |
|---|
| 555 | + |
|---|
| 556 | + /* For tail taggers, we need to pad short frames ourselves, to ensure |
|---|
| 557 | + * that the tail tag does not fail at its role of being at the end of |
|---|
| 558 | + * the packet, once the master interface pads the frame. Account for |
|---|
| 559 | + * that pad length here, and pad later. |
|---|
| 560 | + */ |
|---|
| 561 | + if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) |
|---|
| 562 | + needed_tailroom += ETH_ZLEN - skb->len; |
|---|
| 563 | + /* skb_headroom() returns unsigned int... */ |
|---|
| 564 | + needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); |
|---|
| 565 | + needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); |
|---|
| 566 | + |
|---|
| 567 | + if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) |
|---|
| 568 | + /* No reallocation needed, yay! */ |
|---|
| 569 | + return 0; |
|---|
| 570 | + |
|---|
| 571 | + return pskb_expand_head(skb, needed_headroom, needed_tailroom, |
|---|
| 572 | + GFP_ATOMIC); |
|---|
| 426 | 573 | } |
|---|
| 427 | 574 | |
|---|
| 428 | 575 | static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) |
|---|
| .. | .. |
|---|
| 437 | 584 | s->tx_bytes += skb->len; |
|---|
| 438 | 585 | u64_stats_update_end(&s->syncp); |
|---|
| 439 | 586 | |
|---|
| 587 | + DSA_SKB_CB(skb)->clone = NULL; |
|---|
| 588 | + |
|---|
| 440 | 589 | /* Identify PTP protocol packets, clone them, and pass them to the |
|---|
| 441 | 590 | * switch driver |
|---|
| 442 | 591 | */ |
|---|
| 443 | 592 | dsa_skb_tx_timestamp(p, skb); |
|---|
| 593 | + |
|---|
| 594 | + if (dsa_realloc_skb(skb, dev)) { |
|---|
| 595 | + dev_kfree_skb_any(skb); |
|---|
| 596 | + return NETDEV_TX_OK; |
|---|
| 597 | + } |
|---|
| 598 | + |
|---|
| 599 | + /* needed_tailroom should still be 'warm' in the cache line from |
|---|
| 600 | + * dsa_realloc_skb(), which has also ensured that padding is safe. |
|---|
| 601 | + */ |
|---|
| 602 | + if (dev->needed_tailroom) |
|---|
| 603 | + eth_skb_pad(skb); |
|---|
| 444 | 604 | |
|---|
| 445 | 605 | /* Transmit function may have to reallocate the original SKB, |
|---|
| 446 | 606 | * in which case it must have freed it. Only free it here on error. |
|---|
| .. | .. |
|---|
| 451 | 611 | return NETDEV_TX_OK; |
|---|
| 452 | 612 | } |
|---|
| 453 | 613 | |
|---|
| 454 | | - /* SKB for netpoll still need to be mangled with the protocol-specific |
|---|
| 455 | | - * tag to be successfully transmitted |
|---|
| 456 | | - */ |
|---|
| 457 | | - if (unlikely(netpoll_tx_running(dev))) |
|---|
| 458 | | - return dsa_slave_netpoll_send_skb(dev, nskb); |
|---|
| 459 | | - |
|---|
| 460 | | - /* Queue the SKB for transmission on the parent interface, but |
|---|
| 461 | | - * do not modify its EtherType |
|---|
| 462 | | - */ |
|---|
| 463 | | - nskb->dev = dsa_slave_to_master(dev); |
|---|
| 464 | | - dev_queue_xmit(nskb); |
|---|
| 465 | | - |
|---|
| 466 | | - return NETDEV_TX_OK; |
|---|
| 614 | + return dsa_enqueue_skb(nskb, dev); |
|---|
| 467 | 615 | } |
|---|
| 468 | 616 | |
|---|
| 469 | 617 | /* ethtool operations *******************************************************/ |
|---|
| .. | .. |
|---|
| 693 | 841 | return phylink_ethtool_ksettings_set(dp->pl, cmd); |
|---|
| 694 | 842 | } |
|---|
| 695 | 843 | |
|---|
| 844 | +static void dsa_slave_get_pauseparam(struct net_device *dev, |
|---|
| 845 | + struct ethtool_pauseparam *pause) |
|---|
| 846 | +{ |
|---|
| 847 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 848 | + |
|---|
| 849 | + phylink_ethtool_get_pauseparam(dp->pl, pause); |
|---|
| 850 | +} |
|---|
| 851 | + |
|---|
| 852 | +static int dsa_slave_set_pauseparam(struct net_device *dev, |
|---|
| 853 | + struct ethtool_pauseparam *pause) |
|---|
| 854 | +{ |
|---|
| 855 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 856 | + |
|---|
| 857 | + return phylink_ethtool_set_pauseparam(dp->pl, pause); |
|---|
| 858 | +} |
|---|
| 859 | + |
|---|
| 696 | 860 | #ifdef CONFIG_NET_POLL_CONTROLLER |
|---|
| 697 | 861 | static int dsa_slave_netpoll_setup(struct net_device *dev, |
|---|
| 698 | 862 | struct netpoll_info *ni) |
|---|
| .. | .. |
|---|
| 727 | 891 | |
|---|
| 728 | 892 | p->netpoll = NULL; |
|---|
| 729 | 893 | |
|---|
| 730 | | - __netpoll_free_async(netpoll); |
|---|
| 894 | + __netpoll_free(netpoll); |
|---|
| 731 | 895 | } |
|---|
| 732 | 896 | |
|---|
| 733 | 897 | static void dsa_slave_poll_controller(struct net_device *dev) |
|---|
| .. | .. |
|---|
| 739 | 903 | char *name, size_t len) |
|---|
| 740 | 904 | { |
|---|
| 741 | 905 | struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 906 | + |
|---|
| 907 | + /* For non-legacy ports, devlink is used and it takes |
|---|
| 908 | + * care of the name generation. This ndo implementation |
|---|
| 909 | + * should be removed with legacy support. |
|---|
| 910 | + */ |
|---|
| 911 | + if (dp->ds->devlink) |
|---|
| 912 | + return -EOPNOTSUPP; |
|---|
| 742 | 913 | |
|---|
| 743 | 914 | if (snprintf(name, len, "p%d", dp->index) >= len) |
|---|
| 744 | 915 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 759 | 930 | return NULL; |
|---|
| 760 | 931 | } |
|---|
| 761 | 932 | |
|---|
| 933 | +static int |
|---|
| 934 | +dsa_slave_add_cls_matchall_mirred(struct net_device *dev, |
|---|
| 935 | + struct tc_cls_matchall_offload *cls, |
|---|
| 936 | + bool ingress) |
|---|
| 937 | +{ |
|---|
| 938 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 939 | + struct dsa_slave_priv *p = netdev_priv(dev); |
|---|
| 940 | + struct dsa_mall_mirror_tc_entry *mirror; |
|---|
| 941 | + struct dsa_mall_tc_entry *mall_tc_entry; |
|---|
| 942 | + struct dsa_switch *ds = dp->ds; |
|---|
| 943 | + struct flow_action_entry *act; |
|---|
| 944 | + struct dsa_port *to_dp; |
|---|
| 945 | + int err; |
|---|
| 946 | + |
|---|
| 947 | + if (!ds->ops->port_mirror_add) |
|---|
| 948 | + return -EOPNOTSUPP; |
|---|
| 949 | + |
|---|
| 950 | + if (!flow_action_basic_hw_stats_check(&cls->rule->action, |
|---|
| 951 | + cls->common.extack)) |
|---|
| 952 | + return -EOPNOTSUPP; |
|---|
| 953 | + |
|---|
| 954 | + act = &cls->rule->action.entries[0]; |
|---|
| 955 | + |
|---|
| 956 | + if (!act->dev) |
|---|
| 957 | + return -EINVAL; |
|---|
| 958 | + |
|---|
| 959 | + if (!dsa_slave_dev_check(act->dev)) |
|---|
| 960 | + return -EOPNOTSUPP; |
|---|
| 961 | + |
|---|
| 962 | + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); |
|---|
| 963 | + if (!mall_tc_entry) |
|---|
| 964 | + return -ENOMEM; |
|---|
| 965 | + |
|---|
| 966 | + mall_tc_entry->cookie = cls->cookie; |
|---|
| 967 | + mall_tc_entry->type = DSA_PORT_MALL_MIRROR; |
|---|
| 968 | + mirror = &mall_tc_entry->mirror; |
|---|
| 969 | + |
|---|
| 970 | + to_dp = dsa_slave_to_port(act->dev); |
|---|
| 971 | + |
|---|
| 972 | + mirror->to_local_port = to_dp->index; |
|---|
| 973 | + mirror->ingress = ingress; |
|---|
| 974 | + |
|---|
| 975 | + err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); |
|---|
| 976 | + if (err) { |
|---|
| 977 | + kfree(mall_tc_entry); |
|---|
| 978 | + return err; |
|---|
| 979 | + } |
|---|
| 980 | + |
|---|
| 981 | + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); |
|---|
| 982 | + |
|---|
| 983 | + return err; |
|---|
| 984 | +} |
|---|
| 985 | + |
|---|
| 986 | +static int |
|---|
| 987 | +dsa_slave_add_cls_matchall_police(struct net_device *dev, |
|---|
| 988 | + struct tc_cls_matchall_offload *cls, |
|---|
| 989 | + bool ingress) |
|---|
| 990 | +{ |
|---|
| 991 | + struct netlink_ext_ack *extack = cls->common.extack; |
|---|
| 992 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 993 | + struct dsa_slave_priv *p = netdev_priv(dev); |
|---|
| 994 | + struct dsa_mall_policer_tc_entry *policer; |
|---|
| 995 | + struct dsa_mall_tc_entry *mall_tc_entry; |
|---|
| 996 | + struct dsa_switch *ds = dp->ds; |
|---|
| 997 | + struct flow_action_entry *act; |
|---|
| 998 | + int err; |
|---|
| 999 | + |
|---|
| 1000 | + if (!ds->ops->port_policer_add) { |
|---|
| 1001 | + NL_SET_ERR_MSG_MOD(extack, |
|---|
| 1002 | + "Policing offload not implemented"); |
|---|
| 1003 | + return -EOPNOTSUPP; |
|---|
| 1004 | + } |
|---|
| 1005 | + |
|---|
| 1006 | + if (!ingress) { |
|---|
| 1007 | + NL_SET_ERR_MSG_MOD(extack, |
|---|
| 1008 | + "Only supported on ingress qdisc"); |
|---|
| 1009 | + return -EOPNOTSUPP; |
|---|
| 1010 | + } |
|---|
| 1011 | + |
|---|
| 1012 | + if (!flow_action_basic_hw_stats_check(&cls->rule->action, |
|---|
| 1013 | + cls->common.extack)) |
|---|
| 1014 | + return -EOPNOTSUPP; |
|---|
| 1015 | + |
|---|
| 1016 | + list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { |
|---|
| 1017 | + if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { |
|---|
| 1018 | + NL_SET_ERR_MSG_MOD(extack, |
|---|
| 1019 | + "Only one port policer allowed"); |
|---|
| 1020 | + return -EEXIST; |
|---|
| 1021 | + } |
|---|
| 1022 | + } |
|---|
| 1023 | + |
|---|
| 1024 | + act = &cls->rule->action.entries[0]; |
|---|
| 1025 | + |
|---|
| 1026 | + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); |
|---|
| 1027 | + if (!mall_tc_entry) |
|---|
| 1028 | + return -ENOMEM; |
|---|
| 1029 | + |
|---|
| 1030 | + mall_tc_entry->cookie = cls->cookie; |
|---|
| 1031 | + mall_tc_entry->type = DSA_PORT_MALL_POLICER; |
|---|
| 1032 | + policer = &mall_tc_entry->policer; |
|---|
| 1033 | + policer->rate_bytes_per_sec = act->police.rate_bytes_ps; |
|---|
| 1034 | + policer->burst = act->police.burst; |
|---|
| 1035 | + |
|---|
| 1036 | + err = ds->ops->port_policer_add(ds, dp->index, policer); |
|---|
| 1037 | + if (err) { |
|---|
| 1038 | + kfree(mall_tc_entry); |
|---|
| 1039 | + return err; |
|---|
| 1040 | + } |
|---|
| 1041 | + |
|---|
| 1042 | + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); |
|---|
| 1043 | + |
|---|
| 1044 | + return err; |
|---|
| 1045 | +} |
|---|
| 1046 | + |
|---|
| 762 | 1047 | static int dsa_slave_add_cls_matchall(struct net_device *dev, |
|---|
| 763 | 1048 | struct tc_cls_matchall_offload *cls, |
|---|
| 764 | 1049 | bool ingress) |
|---|
| 765 | 1050 | { |
|---|
| 766 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 767 | | - struct dsa_slave_priv *p = netdev_priv(dev); |
|---|
| 768 | | - struct dsa_mall_tc_entry *mall_tc_entry; |
|---|
| 769 | | - __be16 protocol = cls->common.protocol; |
|---|
| 770 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 771 | | - struct net_device *to_dev; |
|---|
| 772 | | - const struct tc_action *a; |
|---|
| 773 | | - struct dsa_port *to_dp; |
|---|
| 774 | 1051 | int err = -EOPNOTSUPP; |
|---|
| 775 | 1052 | |
|---|
| 776 | | - if (!ds->ops->port_mirror_add) |
|---|
| 777 | | - return err; |
|---|
| 1053 | + if (cls->common.protocol == htons(ETH_P_ALL) && |
|---|
| 1054 | + flow_offload_has_one_action(&cls->rule->action) && |
|---|
| 1055 | + cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) |
|---|
| 1056 | + err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); |
|---|
| 1057 | + else if (flow_offload_has_one_action(&cls->rule->action) && |
|---|
| 1058 | + cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) |
|---|
| 1059 | + err = dsa_slave_add_cls_matchall_police(dev, cls, ingress); |
|---|
| 778 | 1060 | |
|---|
| 779 | | - if (!tcf_exts_has_one_action(cls->exts)) |
|---|
| 780 | | - return err; |
|---|
| 781 | | - |
|---|
| 782 | | - a = tcf_exts_first_action(cls->exts); |
|---|
| 783 | | - |
|---|
| 784 | | - if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
|---|
| 785 | | - struct dsa_mall_mirror_tc_entry *mirror; |
|---|
| 786 | | - |
|---|
| 787 | | - to_dev = tcf_mirred_dev(a); |
|---|
| 788 | | - if (!to_dev) |
|---|
| 789 | | - return -EINVAL; |
|---|
| 790 | | - |
|---|
| 791 | | - if (!dsa_slave_dev_check(to_dev)) |
|---|
| 792 | | - return -EOPNOTSUPP; |
|---|
| 793 | | - |
|---|
| 794 | | - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); |
|---|
| 795 | | - if (!mall_tc_entry) |
|---|
| 796 | | - return -ENOMEM; |
|---|
| 797 | | - |
|---|
| 798 | | - mall_tc_entry->cookie = cls->cookie; |
|---|
| 799 | | - mall_tc_entry->type = DSA_PORT_MALL_MIRROR; |
|---|
| 800 | | - mirror = &mall_tc_entry->mirror; |
|---|
| 801 | | - |
|---|
| 802 | | - to_dp = dsa_slave_to_port(to_dev); |
|---|
| 803 | | - |
|---|
| 804 | | - mirror->to_local_port = to_dp->index; |
|---|
| 805 | | - mirror->ingress = ingress; |
|---|
| 806 | | - |
|---|
| 807 | | - err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); |
|---|
| 808 | | - if (err) { |
|---|
| 809 | | - kfree(mall_tc_entry); |
|---|
| 810 | | - return err; |
|---|
| 811 | | - } |
|---|
| 812 | | - |
|---|
| 813 | | - list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); |
|---|
| 814 | | - } |
|---|
| 815 | | - |
|---|
| 816 | | - return 0; |
|---|
| 1061 | + return err; |
|---|
| 817 | 1062 | } |
|---|
| 818 | 1063 | |
|---|
| 819 | 1064 | static void dsa_slave_del_cls_matchall(struct net_device *dev, |
|---|
| .. | .. |
|---|
| 823 | 1068 | struct dsa_mall_tc_entry *mall_tc_entry; |
|---|
| 824 | 1069 | struct dsa_switch *ds = dp->ds; |
|---|
| 825 | 1070 | |
|---|
| 826 | | - if (!ds->ops->port_mirror_del) |
|---|
| 827 | | - return; |
|---|
| 828 | | - |
|---|
| 829 | 1071 | mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); |
|---|
| 830 | 1072 | if (!mall_tc_entry) |
|---|
| 831 | 1073 | return; |
|---|
| .. | .. |
|---|
| 834 | 1076 | |
|---|
| 835 | 1077 | switch (mall_tc_entry->type) { |
|---|
| 836 | 1078 | case DSA_PORT_MALL_MIRROR: |
|---|
| 837 | | - ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror); |
|---|
| 1079 | + if (ds->ops->port_mirror_del) |
|---|
| 1080 | + ds->ops->port_mirror_del(ds, dp->index, |
|---|
| 1081 | + &mall_tc_entry->mirror); |
|---|
| 1082 | + break; |
|---|
| 1083 | + case DSA_PORT_MALL_POLICER: |
|---|
| 1084 | + if (ds->ops->port_policer_del) |
|---|
| 1085 | + ds->ops->port_policer_del(ds, dp->index); |
|---|
| 838 | 1086 | break; |
|---|
| 839 | 1087 | default: |
|---|
| 840 | 1088 | WARN_ON(1); |
|---|
| .. | .. |
|---|
| 861 | 1109 | } |
|---|
| 862 | 1110 | } |
|---|
| 863 | 1111 | |
|---|
| 1112 | +static int dsa_slave_add_cls_flower(struct net_device *dev, |
|---|
| 1113 | + struct flow_cls_offload *cls, |
|---|
| 1114 | + bool ingress) |
|---|
| 1115 | +{ |
|---|
| 1116 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1117 | + struct dsa_switch *ds = dp->ds; |
|---|
| 1118 | + int port = dp->index; |
|---|
| 1119 | + |
|---|
| 1120 | + if (!ds->ops->cls_flower_add) |
|---|
| 1121 | + return -EOPNOTSUPP; |
|---|
| 1122 | + |
|---|
| 1123 | + return ds->ops->cls_flower_add(ds, port, cls, ingress); |
|---|
| 1124 | +} |
|---|
| 1125 | + |
|---|
| 1126 | +static int dsa_slave_del_cls_flower(struct net_device *dev, |
|---|
| 1127 | + struct flow_cls_offload *cls, |
|---|
| 1128 | + bool ingress) |
|---|
| 1129 | +{ |
|---|
| 1130 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1131 | + struct dsa_switch *ds = dp->ds; |
|---|
| 1132 | + int port = dp->index; |
|---|
| 1133 | + |
|---|
| 1134 | + if (!ds->ops->cls_flower_del) |
|---|
| 1135 | + return -EOPNOTSUPP; |
|---|
| 1136 | + |
|---|
| 1137 | + return ds->ops->cls_flower_del(ds, port, cls, ingress); |
|---|
| 1138 | +} |
|---|
| 1139 | + |
|---|
| 1140 | +static int dsa_slave_stats_cls_flower(struct net_device *dev, |
|---|
| 1141 | + struct flow_cls_offload *cls, |
|---|
| 1142 | + bool ingress) |
|---|
| 1143 | +{ |
|---|
| 1144 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1145 | + struct dsa_switch *ds = dp->ds; |
|---|
| 1146 | + int port = dp->index; |
|---|
| 1147 | + |
|---|
| 1148 | + if (!ds->ops->cls_flower_stats) |
|---|
| 1149 | + return -EOPNOTSUPP; |
|---|
| 1150 | + |
|---|
| 1151 | + return ds->ops->cls_flower_stats(ds, port, cls, ingress); |
|---|
| 1152 | +} |
|---|
| 1153 | + |
|---|
| 1154 | +static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, |
|---|
| 1155 | + struct flow_cls_offload *cls, |
|---|
| 1156 | + bool ingress) |
|---|
| 1157 | +{ |
|---|
| 1158 | + switch (cls->command) { |
|---|
| 1159 | + case FLOW_CLS_REPLACE: |
|---|
| 1160 | + return dsa_slave_add_cls_flower(dev, cls, ingress); |
|---|
| 1161 | + case FLOW_CLS_DESTROY: |
|---|
| 1162 | + return dsa_slave_del_cls_flower(dev, cls, ingress); |
|---|
| 1163 | + case FLOW_CLS_STATS: |
|---|
| 1164 | + return dsa_slave_stats_cls_flower(dev, cls, ingress); |
|---|
| 1165 | + default: |
|---|
| 1166 | + return -EOPNOTSUPP; |
|---|
| 1167 | + } |
|---|
| 1168 | +} |
|---|
| 1169 | + |
|---|
| 864 | 1170 | static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
|---|
| 865 | 1171 | void *cb_priv, bool ingress) |
|---|
| 866 | 1172 | { |
|---|
| .. | .. |
|---|
| 872 | 1178 | switch (type) { |
|---|
| 873 | 1179 | case TC_SETUP_CLSMATCHALL: |
|---|
| 874 | 1180 | return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); |
|---|
| 1181 | + case TC_SETUP_CLSFLOWER: |
|---|
| 1182 | + return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); |
|---|
| 875 | 1183 | default: |
|---|
| 876 | 1184 | return -EOPNOTSUPP; |
|---|
| 877 | 1185 | } |
|---|
| .. | .. |
|---|
| 889 | 1197 | return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); |
|---|
| 890 | 1198 | } |
|---|
| 891 | 1199 | |
|---|
| 892 | | -static int dsa_slave_setup_tc_block(struct net_device *dev, |
|---|
| 893 | | - struct tc_block_offload *f) |
|---|
| 894 | | -{ |
|---|
| 895 | | - tc_setup_cb_t *cb; |
|---|
| 1200 | +static LIST_HEAD(dsa_slave_block_cb_list); |
|---|
| 896 | 1201 | |
|---|
| 897 | | - if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
|---|
| 1202 | +static int dsa_slave_setup_tc_block(struct net_device *dev, |
|---|
| 1203 | + struct flow_block_offload *f) |
|---|
| 1204 | +{ |
|---|
| 1205 | + struct flow_block_cb *block_cb; |
|---|
| 1206 | + flow_setup_cb_t *cb; |
|---|
| 1207 | + |
|---|
| 1208 | + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
|---|
| 898 | 1209 | cb = dsa_slave_setup_tc_block_cb_ig; |
|---|
| 899 | | - else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) |
|---|
| 1210 | + else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) |
|---|
| 900 | 1211 | cb = dsa_slave_setup_tc_block_cb_eg; |
|---|
| 901 | 1212 | else |
|---|
| 902 | 1213 | return -EOPNOTSUPP; |
|---|
| 903 | 1214 | |
|---|
| 1215 | + f->driver_block_list = &dsa_slave_block_cb_list; |
|---|
| 1216 | + |
|---|
| 904 | 1217 | switch (f->command) { |
|---|
| 905 | | - case TC_BLOCK_BIND: |
|---|
| 906 | | - return tcf_block_cb_register(f->block, cb, dev, dev, f->extack); |
|---|
| 907 | | - case TC_BLOCK_UNBIND: |
|---|
| 908 | | - tcf_block_cb_unregister(f->block, cb, dev); |
|---|
| 1218 | + case FLOW_BLOCK_BIND: |
|---|
| 1219 | + if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) |
|---|
| 1220 | + return -EBUSY; |
|---|
| 1221 | + |
|---|
| 1222 | + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); |
|---|
| 1223 | + if (IS_ERR(block_cb)) |
|---|
| 1224 | + return PTR_ERR(block_cb); |
|---|
| 1225 | + |
|---|
| 1226 | + flow_block_cb_add(block_cb, f); |
|---|
| 1227 | + list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); |
|---|
| 1228 | + return 0; |
|---|
| 1229 | + case FLOW_BLOCK_UNBIND: |
|---|
| 1230 | + block_cb = flow_block_cb_lookup(f->block, cb, dev); |
|---|
| 1231 | + if (!block_cb) |
|---|
| 1232 | + return -ENOENT; |
|---|
| 1233 | + |
|---|
| 1234 | + flow_block_cb_remove(block_cb, f); |
|---|
| 1235 | + list_del(&block_cb->driver_list); |
|---|
| 909 | 1236 | return 0; |
|---|
| 910 | 1237 | default: |
|---|
| 911 | 1238 | return -EOPNOTSUPP; |
|---|
| .. | .. |
|---|
| 915 | 1242 | static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, |
|---|
| 916 | 1243 | void *type_data) |
|---|
| 917 | 1244 | { |
|---|
| 918 | | - switch (type) { |
|---|
| 919 | | - case TC_SETUP_BLOCK: |
|---|
| 1245 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1246 | + struct dsa_switch *ds = dp->ds; |
|---|
| 1247 | + |
|---|
| 1248 | + if (type == TC_SETUP_BLOCK) |
|---|
| 920 | 1249 | return dsa_slave_setup_tc_block(dev, type_data); |
|---|
| 921 | | - default: |
|---|
| 1250 | + |
|---|
| 1251 | + if (!ds->ops->port_setup_tc) |
|---|
| 922 | 1252 | return -EOPNOTSUPP; |
|---|
| 923 | | - } |
|---|
| 1253 | + |
|---|
| 1254 | + return ds->ops->port_setup_tc(ds, dp->index, type, type_data); |
|---|
| 924 | 1255 | } |
|---|
| 925 | 1256 | |
|---|
| 926 | 1257 | static void dsa_slave_get_stats64(struct net_device *dev, |
|---|
| 927 | 1258 | struct rtnl_link_stats64 *stats) |
|---|
| 928 | 1259 | { |
|---|
| 929 | 1260 | struct dsa_slave_priv *p = netdev_priv(dev); |
|---|
| 930 | | - struct pcpu_sw_netstats *s; |
|---|
| 931 | | - unsigned int start; |
|---|
| 932 | | - int i; |
|---|
| 933 | 1261 | |
|---|
| 934 | 1262 | netdev_stats_to_stats64(stats, &dev->stats); |
|---|
| 935 | | - for_each_possible_cpu(i) { |
|---|
| 936 | | - u64 tx_packets, tx_bytes, rx_packets, rx_bytes; |
|---|
| 937 | | - |
|---|
| 938 | | - s = per_cpu_ptr(p->stats64, i); |
|---|
| 939 | | - do { |
|---|
| 940 | | - start = u64_stats_fetch_begin_irq(&s->syncp); |
|---|
| 941 | | - tx_packets = s->tx_packets; |
|---|
| 942 | | - tx_bytes = s->tx_bytes; |
|---|
| 943 | | - rx_packets = s->rx_packets; |
|---|
| 944 | | - rx_bytes = s->rx_bytes; |
|---|
| 945 | | - } while (u64_stats_fetch_retry_irq(&s->syncp, start)); |
|---|
| 946 | | - |
|---|
| 947 | | - stats->tx_packets += tx_packets; |
|---|
| 948 | | - stats->tx_bytes += tx_bytes; |
|---|
| 949 | | - stats->rx_packets += rx_packets; |
|---|
| 950 | | - stats->rx_bytes += rx_bytes; |
|---|
| 951 | | - } |
|---|
| 1263 | + dev_fetch_sw_netstats(stats, p->stats64); |
|---|
| 952 | 1264 | } |
|---|
| 953 | 1265 | |
|---|
| 954 | 1266 | static int dsa_slave_get_rxnfc(struct net_device *dev, |
|---|
| .. | .. |
|---|
| 987 | 1299 | return ds->ops->get_ts_info(ds, p->dp->index, ts); |
|---|
| 988 | 1300 | } |
|---|
| 989 | 1301 | |
|---|
| 1302 | +static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, |
|---|
| 1303 | + u16 vid) |
|---|
| 1304 | +{ |
|---|
| 1305 | + struct net_device *master = dsa_slave_to_master(dev); |
|---|
| 1306 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1307 | + struct switchdev_obj_port_vlan vlan = { |
|---|
| 1308 | + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, |
|---|
| 1309 | + .vid_begin = vid, |
|---|
| 1310 | + .vid_end = vid, |
|---|
| 1311 | + /* This API only allows programming tagged, non-PVID VIDs */ |
|---|
| 1312 | + .flags = 0, |
|---|
| 1313 | + }; |
|---|
| 1314 | + struct switchdev_trans trans; |
|---|
| 1315 | + int ret; |
|---|
| 1316 | + |
|---|
| 1317 | + /* User port... */ |
|---|
| 1318 | + trans.ph_prepare = true; |
|---|
| 1319 | + ret = dsa_port_vlan_add(dp, &vlan, &trans); |
|---|
| 1320 | + if (ret) |
|---|
| 1321 | + return ret; |
|---|
| 1322 | + |
|---|
| 1323 | + trans.ph_prepare = false; |
|---|
| 1324 | + ret = dsa_port_vlan_add(dp, &vlan, &trans); |
|---|
| 1325 | + if (ret) |
|---|
| 1326 | + return ret; |
|---|
| 1327 | + |
|---|
| 1328 | + /* And CPU port... */ |
|---|
| 1329 | + trans.ph_prepare = true; |
|---|
| 1330 | + ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans); |
|---|
| 1331 | + if (ret) |
|---|
| 1332 | + return ret; |
|---|
| 1333 | + |
|---|
| 1334 | + trans.ph_prepare = false; |
|---|
| 1335 | + ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans); |
|---|
| 1336 | + if (ret) |
|---|
| 1337 | + return ret; |
|---|
| 1338 | + |
|---|
| 1339 | + return vlan_vid_add(master, proto, vid); |
|---|
| 1340 | +} |
|---|
| 1341 | + |
|---|
| 1342 | +static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, |
|---|
| 1343 | + u16 vid) |
|---|
| 1344 | +{ |
|---|
| 1345 | + struct net_device *master = dsa_slave_to_master(dev); |
|---|
| 1346 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1347 | + struct switchdev_obj_port_vlan vlan = { |
|---|
| 1348 | + .vid_begin = vid, |
|---|
| 1349 | + .vid_end = vid, |
|---|
| 1350 | + /* This API only allows programming tagged, non-PVID VIDs */ |
|---|
| 1351 | + .flags = 0, |
|---|
| 1352 | + }; |
|---|
| 1353 | + int err; |
|---|
| 1354 | + |
|---|
| 1355 | + /* Do not deprogram the CPU port as it may be shared with other user |
|---|
| 1356 | + * ports which can be members of this VLAN as well. |
|---|
| 1357 | + */ |
|---|
| 1358 | + err = dsa_port_vlan_del(dp, &vlan); |
|---|
| 1359 | + if (err) |
|---|
| 1360 | + return err; |
|---|
| 1361 | + |
|---|
| 1362 | + vlan_vid_del(master, proto, vid); |
|---|
| 1363 | + |
|---|
| 1364 | + return 0; |
|---|
| 1365 | +} |
|---|
| 1366 | + |
|---|
| 1367 | +struct dsa_hw_port { |
|---|
| 1368 | + struct list_head list; |
|---|
| 1369 | + struct net_device *dev; |
|---|
| 1370 | + int old_mtu; |
|---|
| 1371 | +}; |
|---|
| 1372 | + |
|---|
| 1373 | +static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) |
|---|
| 1374 | +{ |
|---|
| 1375 | + const struct dsa_hw_port *p; |
|---|
| 1376 | + int err; |
|---|
| 1377 | + |
|---|
| 1378 | + list_for_each_entry(p, hw_port_list, list) { |
|---|
| 1379 | + if (p->dev->mtu == mtu) |
|---|
| 1380 | + continue; |
|---|
| 1381 | + |
|---|
| 1382 | + err = dev_set_mtu(p->dev, mtu); |
|---|
| 1383 | + if (err) |
|---|
| 1384 | + goto rollback; |
|---|
| 1385 | + } |
|---|
| 1386 | + |
|---|
| 1387 | + return 0; |
|---|
| 1388 | + |
|---|
| 1389 | +rollback: |
|---|
| 1390 | + list_for_each_entry_continue_reverse(p, hw_port_list, list) { |
|---|
| 1391 | + if (p->dev->mtu == p->old_mtu) |
|---|
| 1392 | + continue; |
|---|
| 1393 | + |
|---|
| 1394 | + if (dev_set_mtu(p->dev, p->old_mtu)) |
|---|
| 1395 | + netdev_err(p->dev, "Failed to restore MTU\n"); |
|---|
| 1396 | + } |
|---|
| 1397 | + |
|---|
| 1398 | + return err; |
|---|
| 1399 | +} |
|---|
| 1400 | + |
|---|
| 1401 | +static void dsa_hw_port_list_free(struct list_head *hw_port_list) |
|---|
| 1402 | +{ |
|---|
| 1403 | + struct dsa_hw_port *p, *n; |
|---|
| 1404 | + |
|---|
| 1405 | + list_for_each_entry_safe(p, n, hw_port_list, list) |
|---|
| 1406 | + kfree(p); |
|---|
| 1407 | +} |
|---|
| 1408 | + |
|---|
| 1409 | +/* Make the hardware datapath to/from @dev limited to a common MTU */ |
|---|
| 1410 | +static void dsa_bridge_mtu_normalization(struct dsa_port *dp) |
|---|
| 1411 | +{ |
|---|
| 1412 | + struct list_head hw_port_list; |
|---|
| 1413 | + struct dsa_switch_tree *dst; |
|---|
| 1414 | + int min_mtu = ETH_MAX_MTU; |
|---|
| 1415 | + struct dsa_port *other_dp; |
|---|
| 1416 | + int err; |
|---|
| 1417 | + |
|---|
| 1418 | + if (!dp->ds->mtu_enforcement_ingress) |
|---|
| 1419 | + return; |
|---|
| 1420 | + |
|---|
| 1421 | + if (!dp->bridge_dev) |
|---|
| 1422 | + return; |
|---|
| 1423 | + |
|---|
| 1424 | + INIT_LIST_HEAD(&hw_port_list); |
|---|
| 1425 | + |
|---|
| 1426 | + /* Populate the list of ports that are part of the same bridge |
|---|
| 1427 | + * as the newly added/modified port |
|---|
| 1428 | + */ |
|---|
| 1429 | + list_for_each_entry(dst, &dsa_tree_list, list) { |
|---|
| 1430 | + list_for_each_entry(other_dp, &dst->ports, list) { |
|---|
| 1431 | + struct dsa_hw_port *hw_port; |
|---|
| 1432 | + struct net_device *slave; |
|---|
| 1433 | + |
|---|
| 1434 | + if (other_dp->type != DSA_PORT_TYPE_USER) |
|---|
| 1435 | + continue; |
|---|
| 1436 | + |
|---|
| 1437 | + if (other_dp->bridge_dev != dp->bridge_dev) |
|---|
| 1438 | + continue; |
|---|
| 1439 | + |
|---|
| 1440 | + if (!other_dp->ds->mtu_enforcement_ingress) |
|---|
| 1441 | + continue; |
|---|
| 1442 | + |
|---|
| 1443 | + slave = other_dp->slave; |
|---|
| 1444 | + |
|---|
| 1445 | + if (min_mtu > slave->mtu) |
|---|
| 1446 | + min_mtu = slave->mtu; |
|---|
| 1447 | + |
|---|
| 1448 | + hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); |
|---|
| 1449 | + if (!hw_port) |
|---|
| 1450 | + goto out; |
|---|
| 1451 | + |
|---|
| 1452 | + hw_port->dev = slave; |
|---|
| 1453 | + hw_port->old_mtu = slave->mtu; |
|---|
| 1454 | + |
|---|
| 1455 | + list_add(&hw_port->list, &hw_port_list); |
|---|
| 1456 | + } |
|---|
| 1457 | + } |
|---|
| 1458 | + |
|---|
| 1459 | + /* Attempt to configure the entire hardware bridge to the newly added |
|---|
| 1460 | + * interface's MTU first, regardless of whether the intention of the |
|---|
| 1461 | + * user was to raise or lower it. |
|---|
| 1462 | + */ |
|---|
| 1463 | + err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); |
|---|
| 1464 | + if (!err) |
|---|
| 1465 | + goto out; |
|---|
| 1466 | + |
|---|
| 1467 | + /* Clearly that didn't work out so well, so just set the minimum MTU on |
|---|
| 1468 | + * all hardware bridge ports now. If this fails too, then all ports will |
|---|
| 1469 | + * still have their old MTU rolled back anyway. |
|---|
| 1470 | + */ |
|---|
| 1471 | + dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); |
|---|
| 1472 | + |
|---|
| 1473 | +out: |
|---|
| 1474 | + dsa_hw_port_list_free(&hw_port_list); |
|---|
| 1475 | +} |
|---|
| 1476 | + |
|---|
| 1477 | +static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) |
|---|
| 1478 | +{ |
|---|
| 1479 | + struct net_device *master = dsa_slave_to_master(dev); |
|---|
| 1480 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1481 | + struct dsa_slave_priv *p = netdev_priv(dev); |
|---|
| 1482 | + struct dsa_switch *ds = p->dp->ds; |
|---|
| 1483 | + struct dsa_port *cpu_dp; |
|---|
| 1484 | + int port = p->dp->index; |
|---|
| 1485 | + int largest_mtu = 0; |
|---|
| 1486 | + int new_master_mtu; |
|---|
| 1487 | + int old_master_mtu; |
|---|
| 1488 | + int mtu_limit; |
|---|
| 1489 | + int cpu_mtu; |
|---|
| 1490 | + int err, i; |
|---|
| 1491 | + |
|---|
| 1492 | + if (!ds->ops->port_change_mtu) |
|---|
| 1493 | + return -EOPNOTSUPP; |
|---|
| 1494 | + |
|---|
| 1495 | + for (i = 0; i < ds->num_ports; i++) { |
|---|
| 1496 | + int slave_mtu; |
|---|
| 1497 | + |
|---|
| 1498 | + if (!dsa_is_user_port(ds, i)) |
|---|
| 1499 | + continue; |
|---|
| 1500 | + |
|---|
| 1501 | + /* During probe, this function will be called for each slave |
|---|
| 1502 | + * device, while not all of them have been allocated. That's |
|---|
| 1503 | + * ok, it doesn't change what the maximum is, so ignore it. |
|---|
| 1504 | + */ |
|---|
| 1505 | + if (!dsa_to_port(ds, i)->slave) |
|---|
| 1506 | + continue; |
|---|
| 1507 | + |
|---|
| 1508 | + /* Pretend that we already applied the setting, which we |
|---|
| 1509 | + * actually haven't (still haven't done all integrity checks) |
|---|
| 1510 | + */ |
|---|
| 1511 | + if (i == port) |
|---|
| 1512 | + slave_mtu = new_mtu; |
|---|
| 1513 | + else |
|---|
| 1514 | + slave_mtu = dsa_to_port(ds, i)->slave->mtu; |
|---|
| 1515 | + |
|---|
| 1516 | + if (largest_mtu < slave_mtu) |
|---|
| 1517 | + largest_mtu = slave_mtu; |
|---|
| 1518 | + } |
|---|
| 1519 | + |
|---|
| 1520 | + cpu_dp = dsa_to_port(ds, port)->cpu_dp; |
|---|
| 1521 | + |
|---|
| 1522 | + mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); |
|---|
| 1523 | + old_master_mtu = master->mtu; |
|---|
| 1524 | + new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead; |
|---|
| 1525 | + if (new_master_mtu > mtu_limit) |
|---|
| 1526 | + return -ERANGE; |
|---|
| 1527 | + |
|---|
| 1528 | + /* If the master MTU isn't over limit, there's no need to check the CPU |
|---|
| 1529 | + * MTU, since that surely isn't either. |
|---|
| 1530 | + */ |
|---|
| 1531 | + cpu_mtu = largest_mtu; |
|---|
| 1532 | + |
|---|
| 1533 | + /* Start applying stuff */ |
|---|
| 1534 | + if (new_master_mtu != old_master_mtu) { |
|---|
| 1535 | + err = dev_set_mtu(master, new_master_mtu); |
|---|
| 1536 | + if (err < 0) |
|---|
| 1537 | + goto out_master_failed; |
|---|
| 1538 | + |
|---|
| 1539 | + /* We only need to propagate the MTU of the CPU port to |
|---|
| 1540 | + * upstream switches. |
|---|
| 1541 | + */ |
|---|
| 1542 | + err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true); |
|---|
| 1543 | + if (err) |
|---|
| 1544 | + goto out_cpu_failed; |
|---|
| 1545 | + } |
|---|
| 1546 | + |
|---|
| 1547 | + err = dsa_port_mtu_change(dp, new_mtu, false); |
|---|
| 1548 | + if (err) |
|---|
| 1549 | + goto out_port_failed; |
|---|
| 1550 | + |
|---|
| 1551 | + dev->mtu = new_mtu; |
|---|
| 1552 | + |
|---|
| 1553 | + dsa_bridge_mtu_normalization(dp); |
|---|
| 1554 | + |
|---|
| 1555 | + return 0; |
|---|
| 1556 | + |
|---|
| 1557 | +out_port_failed: |
|---|
| 1558 | + if (new_master_mtu != old_master_mtu) |
|---|
| 1559 | + dsa_port_mtu_change(cpu_dp, old_master_mtu - |
|---|
| 1560 | + cpu_dp->tag_ops->overhead, |
|---|
| 1561 | + true); |
|---|
| 1562 | +out_cpu_failed: |
|---|
| 1563 | + if (new_master_mtu != old_master_mtu) |
|---|
| 1564 | + dev_set_mtu(master, old_master_mtu); |
|---|
| 1565 | +out_master_failed: |
|---|
| 1566 | + return err; |
|---|
| 1567 | +} |
|---|
| 1568 | + |
|---|
| 990 | 1569 | static const struct ethtool_ops dsa_slave_ethtool_ops = { |
|---|
| 991 | 1570 | .get_drvinfo = dsa_slave_get_drvinfo, |
|---|
| 992 | 1571 | .get_regs_len = dsa_slave_get_regs_len, |
|---|
| .. | .. |
|---|
| 1005 | 1584 | .get_eee = dsa_slave_get_eee, |
|---|
| 1006 | 1585 | .get_link_ksettings = dsa_slave_get_link_ksettings, |
|---|
| 1007 | 1586 | .set_link_ksettings = dsa_slave_set_link_ksettings, |
|---|
| 1587 | + .get_pauseparam = dsa_slave_get_pauseparam, |
|---|
| 1588 | + .set_pauseparam = dsa_slave_set_pauseparam, |
|---|
| 1008 | 1589 | .get_rxnfc = dsa_slave_get_rxnfc, |
|---|
| 1009 | 1590 | .set_rxnfc = dsa_slave_set_rxnfc, |
|---|
| 1010 | 1591 | .get_ts_info = dsa_slave_get_ts_info, |
|---|
| .. | .. |
|---|
| 1014 | 1595 | int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
|---|
| 1015 | 1596 | struct net_device *dev, |
|---|
| 1016 | 1597 | const unsigned char *addr, u16 vid, |
|---|
| 1017 | | - u16 flags) |
|---|
| 1598 | + u16 flags, |
|---|
| 1599 | + struct netlink_ext_ack *extack) |
|---|
| 1018 | 1600 | { |
|---|
| 1019 | 1601 | struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1020 | 1602 | |
|---|
| .. | .. |
|---|
| 1028 | 1610 | struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1029 | 1611 | |
|---|
| 1030 | 1612 | return dsa_port_fdb_del(dp, addr, vid); |
|---|
| 1613 | +} |
|---|
| 1614 | + |
|---|
| 1615 | +static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev) |
|---|
| 1616 | +{ |
|---|
| 1617 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1618 | + |
|---|
| 1619 | + return dp->ds->devlink ? &dp->devlink_port : NULL; |
|---|
| 1031 | 1620 | } |
|---|
| 1032 | 1621 | |
|---|
| 1033 | 1622 | static const struct net_device_ops dsa_slave_netdev_ops = { |
|---|
| .. | .. |
|---|
| 1050 | 1639 | .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, |
|---|
| 1051 | 1640 | .ndo_setup_tc = dsa_slave_setup_tc, |
|---|
| 1052 | 1641 | .ndo_get_stats64 = dsa_slave_get_stats64, |
|---|
| 1053 | | -}; |
|---|
| 1054 | | - |
|---|
| 1055 | | -static const struct switchdev_ops dsa_slave_switchdev_ops = { |
|---|
| 1056 | | - .switchdev_port_attr_get = dsa_slave_port_attr_get, |
|---|
| 1057 | | - .switchdev_port_attr_set = dsa_slave_port_attr_set, |
|---|
| 1058 | | - .switchdev_port_obj_add = dsa_slave_port_obj_add, |
|---|
| 1059 | | - .switchdev_port_obj_del = dsa_slave_port_obj_del, |
|---|
| 1642 | + .ndo_get_port_parent_id = dsa_slave_get_port_parent_id, |
|---|
| 1643 | + .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, |
|---|
| 1644 | + .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, |
|---|
| 1645 | + .ndo_get_devlink_port = dsa_slave_get_devlink_port, |
|---|
| 1646 | + .ndo_change_mtu = dsa_slave_change_mtu, |
|---|
| 1060 | 1647 | }; |
|---|
| 1061 | 1648 | |
|---|
| 1062 | 1649 | static struct device_type dsa_type = { |
|---|
| 1063 | 1650 | .name = "dsa", |
|---|
| 1064 | 1651 | }; |
|---|
| 1065 | 1652 | |
|---|
| 1066 | | -static void dsa_slave_phylink_validate(struct net_device *dev, |
|---|
| 1067 | | - unsigned long *supported, |
|---|
| 1068 | | - struct phylink_link_state *state) |
|---|
| 1069 | | -{ |
|---|
| 1070 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1071 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 1072 | | - |
|---|
| 1073 | | - if (!ds->ops->phylink_validate) |
|---|
| 1074 | | - return; |
|---|
| 1075 | | - |
|---|
| 1076 | | - ds->ops->phylink_validate(ds, dp->index, supported, state); |
|---|
| 1077 | | -} |
|---|
| 1078 | | - |
|---|
| 1079 | | -static int dsa_slave_phylink_mac_link_state(struct net_device *dev, |
|---|
| 1080 | | - struct phylink_link_state *state) |
|---|
| 1081 | | -{ |
|---|
| 1082 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1083 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 1084 | | - |
|---|
| 1085 | | - /* Only called for SGMII and 802.3z */ |
|---|
| 1086 | | - if (!ds->ops->phylink_mac_link_state) |
|---|
| 1087 | | - return -EOPNOTSUPP; |
|---|
| 1088 | | - |
|---|
| 1089 | | - return ds->ops->phylink_mac_link_state(ds, dp->index, state); |
|---|
| 1090 | | -} |
|---|
| 1091 | | - |
|---|
| 1092 | | -static void dsa_slave_phylink_mac_config(struct net_device *dev, |
|---|
| 1093 | | - unsigned int mode, |
|---|
| 1094 | | - const struct phylink_link_state *state) |
|---|
| 1095 | | -{ |
|---|
| 1096 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1097 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 1098 | | - |
|---|
| 1099 | | - if (!ds->ops->phylink_mac_config) |
|---|
| 1100 | | - return; |
|---|
| 1101 | | - |
|---|
| 1102 | | - ds->ops->phylink_mac_config(ds, dp->index, mode, state); |
|---|
| 1103 | | -} |
|---|
| 1104 | | - |
|---|
| 1105 | | -static void dsa_slave_phylink_mac_an_restart(struct net_device *dev) |
|---|
| 1106 | | -{ |
|---|
| 1107 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1108 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 1109 | | - |
|---|
| 1110 | | - if (!ds->ops->phylink_mac_an_restart) |
|---|
| 1111 | | - return; |
|---|
| 1112 | | - |
|---|
| 1113 | | - ds->ops->phylink_mac_an_restart(ds, dp->index); |
|---|
| 1114 | | -} |
|---|
| 1115 | | - |
|---|
| 1116 | | -static void dsa_slave_phylink_mac_link_down(struct net_device *dev, |
|---|
| 1117 | | - unsigned int mode, |
|---|
| 1118 | | - phy_interface_t interface) |
|---|
| 1119 | | -{ |
|---|
| 1120 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1121 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 1122 | | - |
|---|
| 1123 | | - if (!ds->ops->phylink_mac_link_down) { |
|---|
| 1124 | | - if (ds->ops->adjust_link && dev->phydev) |
|---|
| 1125 | | - ds->ops->adjust_link(ds, dp->index, dev->phydev); |
|---|
| 1126 | | - return; |
|---|
| 1127 | | - } |
|---|
| 1128 | | - |
|---|
| 1129 | | - ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); |
|---|
| 1130 | | -} |
|---|
| 1131 | | - |
|---|
| 1132 | | -static void dsa_slave_phylink_mac_link_up(struct net_device *dev, |
|---|
| 1133 | | - unsigned int mode, |
|---|
| 1134 | | - phy_interface_t interface, |
|---|
| 1135 | | - struct phy_device *phydev) |
|---|
| 1136 | | -{ |
|---|
| 1137 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1138 | | - struct dsa_switch *ds = dp->ds; |
|---|
| 1139 | | - |
|---|
| 1140 | | - if (!ds->ops->phylink_mac_link_up) { |
|---|
| 1141 | | - if (ds->ops->adjust_link && dev->phydev) |
|---|
| 1142 | | - ds->ops->adjust_link(ds, dp->index, dev->phydev); |
|---|
| 1143 | | - return; |
|---|
| 1144 | | - } |
|---|
| 1145 | | - |
|---|
| 1146 | | - ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev); |
|---|
| 1147 | | -} |
|---|
| 1148 | | - |
|---|
| 1149 | | -static const struct phylink_mac_ops dsa_slave_phylink_mac_ops = { |
|---|
| 1150 | | - .validate = dsa_slave_phylink_validate, |
|---|
| 1151 | | - .mac_link_state = dsa_slave_phylink_mac_link_state, |
|---|
| 1152 | | - .mac_config = dsa_slave_phylink_mac_config, |
|---|
| 1153 | | - .mac_an_restart = dsa_slave_phylink_mac_an_restart, |
|---|
| 1154 | | - .mac_link_down = dsa_slave_phylink_mac_link_down, |
|---|
| 1155 | | - .mac_link_up = dsa_slave_phylink_mac_link_up, |
|---|
| 1156 | | -}; |
|---|
| 1157 | | - |
|---|
| 1158 | 1653 | void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) |
|---|
| 1159 | 1654 | { |
|---|
| 1160 | 1655 | const struct dsa_port *dp = dsa_to_port(ds, port); |
|---|
| 1161 | 1656 | |
|---|
| 1162 | | - phylink_mac_change(dp->pl, up); |
|---|
| 1657 | + if (dp->pl) |
|---|
| 1658 | + phylink_mac_change(dp->pl, up); |
|---|
| 1163 | 1659 | } |
|---|
| 1164 | 1660 | EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); |
|---|
| 1165 | 1661 | |
|---|
| 1166 | | -static void dsa_slave_phylink_fixed_state(struct net_device *dev, |
|---|
| 1662 | +static void dsa_slave_phylink_fixed_state(struct phylink_config *config, |
|---|
| 1167 | 1663 | struct phylink_link_state *state) |
|---|
| 1168 | 1664 | { |
|---|
| 1169 | | - struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 1665 | + struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); |
|---|
| 1170 | 1666 | struct dsa_switch *ds = dp->ds; |
|---|
| 1171 | 1667 | |
|---|
| 1172 | 1668 | /* No need to check that this operation is valid, the callback would |
|---|
| .. | .. |
|---|
| 1195 | 1691 | struct dsa_port *dp = dsa_slave_to_port(slave_dev); |
|---|
| 1196 | 1692 | struct device_node *port_dn = dp->dn; |
|---|
| 1197 | 1693 | struct dsa_switch *ds = dp->ds; |
|---|
| 1694 | + phy_interface_t mode; |
|---|
| 1198 | 1695 | u32 phy_flags = 0; |
|---|
| 1199 | | - int mode, ret; |
|---|
| 1696 | + int ret; |
|---|
| 1200 | 1697 | |
|---|
| 1201 | | - mode = of_get_phy_mode(port_dn); |
|---|
| 1202 | | - if (mode < 0) |
|---|
| 1698 | + ret = of_get_phy_mode(port_dn, &mode); |
|---|
| 1699 | + if (ret) |
|---|
| 1203 | 1700 | mode = PHY_INTERFACE_MODE_NA; |
|---|
| 1204 | 1701 | |
|---|
| 1205 | | - dp->pl = phylink_create(slave_dev, of_fwnode_handle(port_dn), mode, |
|---|
| 1206 | | - &dsa_slave_phylink_mac_ops); |
|---|
| 1702 | + dp->pl_config.dev = &slave_dev->dev; |
|---|
| 1703 | + dp->pl_config.type = PHYLINK_NETDEV; |
|---|
| 1704 | + |
|---|
| 1705 | + /* The get_fixed_state callback takes precedence over polling the |
|---|
| 1706 | + * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set |
|---|
| 1707 | + * this if the switch provides such a callback. |
|---|
| 1708 | + */ |
|---|
| 1709 | + if (ds->ops->phylink_fixed_state) { |
|---|
| 1710 | + dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state; |
|---|
| 1711 | + dp->pl_config.poll_fixed_state = true; |
|---|
| 1712 | + } |
|---|
| 1713 | + |
|---|
| 1714 | + dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, |
|---|
| 1715 | + &dsa_port_phylink_mac_ops); |
|---|
| 1207 | 1716 | if (IS_ERR(dp->pl)) { |
|---|
| 1208 | 1717 | netdev_err(slave_dev, |
|---|
| 1209 | 1718 | "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); |
|---|
| 1210 | 1719 | return PTR_ERR(dp->pl); |
|---|
| 1211 | 1720 | } |
|---|
| 1212 | | - |
|---|
| 1213 | | - /* Register only if the switch provides such a callback, since this |
|---|
| 1214 | | - * callback takes precedence over polling the link GPIO in PHYLINK |
|---|
| 1215 | | - * (see phylink_get_fixed_state). |
|---|
| 1216 | | - */ |
|---|
| 1217 | | - if (ds->ops->phylink_fixed_state) |
|---|
| 1218 | | - phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state); |
|---|
| 1219 | 1721 | |
|---|
| 1220 | 1722 | if (ds->ops->get_phy_flags) |
|---|
| 1221 | 1723 | phy_flags = ds->ops->get_phy_flags(ds, dp->index); |
|---|
| .. | .. |
|---|
| 1311 | 1813 | return -ENOMEM; |
|---|
| 1312 | 1814 | |
|---|
| 1313 | 1815 | slave_dev->features = master->vlan_features | NETIF_F_HW_TC; |
|---|
| 1816 | + if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) |
|---|
| 1817 | + slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
|---|
| 1314 | 1818 | slave_dev->hw_features |= NETIF_F_HW_TC; |
|---|
| 1819 | + slave_dev->features |= NETIF_F_LLTX; |
|---|
| 1315 | 1820 | slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; |
|---|
| 1316 | | - if (port->mac && is_valid_ether_addr(port->mac)) |
|---|
| 1821 | + if (!IS_ERR_OR_NULL(port->mac)) |
|---|
| 1317 | 1822 | ether_addr_copy(slave_dev->dev_addr, port->mac); |
|---|
| 1318 | 1823 | else |
|---|
| 1319 | 1824 | eth_hw_addr_inherit(slave_dev, master); |
|---|
| 1320 | 1825 | slave_dev->priv_flags |= IFF_NO_QUEUE; |
|---|
| 1321 | 1826 | slave_dev->netdev_ops = &dsa_slave_netdev_ops; |
|---|
| 1322 | | - slave_dev->switchdev_ops = &dsa_slave_switchdev_ops; |
|---|
| 1323 | | - slave_dev->min_mtu = 0; |
|---|
| 1324 | | - slave_dev->max_mtu = ETH_MAX_MTU; |
|---|
| 1827 | + if (ds->ops->port_max_mtu) |
|---|
| 1828 | + slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); |
|---|
| 1829 | + if (cpu_dp->tag_ops->tail_tag) |
|---|
| 1830 | + slave_dev->needed_tailroom = cpu_dp->tag_ops->overhead; |
|---|
| 1831 | + else |
|---|
| 1832 | + slave_dev->needed_headroom = cpu_dp->tag_ops->overhead; |
|---|
| 1833 | + /* Try to save one extra realloc later in the TX path (in the master) |
|---|
| 1834 | + * by also inheriting the master's needed headroom and tailroom. |
|---|
| 1835 | + * The 8021q driver also does this. |
|---|
| 1836 | + */ |
|---|
| 1837 | + slave_dev->needed_headroom += master->needed_headroom; |
|---|
| 1838 | + slave_dev->needed_tailroom += master->needed_tailroom; |
|---|
| 1325 | 1839 | SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); |
|---|
| 1326 | 1840 | |
|---|
| 1327 | 1841 | netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, |
|---|
| .. | .. |
|---|
| 1347 | 1861 | p->xmit = cpu_dp->tag_ops->xmit; |
|---|
| 1348 | 1862 | port->slave = slave_dev; |
|---|
| 1349 | 1863 | |
|---|
| 1864 | + rtnl_lock(); |
|---|
| 1865 | + ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); |
|---|
| 1866 | + rtnl_unlock(); |
|---|
| 1867 | + if (ret && ret != -EOPNOTSUPP) |
|---|
| 1868 | + dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n", |
|---|
| 1869 | + ret, port->index); |
|---|
| 1870 | + |
|---|
| 1350 | 1871 | netif_carrier_off(slave_dev); |
|---|
| 1351 | 1872 | |
|---|
| 1352 | 1873 | ret = dsa_slave_phy_setup(slave_dev); |
|---|
| 1353 | 1874 | if (ret) { |
|---|
| 1354 | | - netdev_err(master, "error %d setting up slave phy\n", ret); |
|---|
| 1875 | + netdev_err(slave_dev, |
|---|
| 1876 | + "error %d setting up PHY for tree %d, switch %d, port %d\n", |
|---|
| 1877 | + ret, ds->dst->index, ds->index, port->index); |
|---|
| 1355 | 1878 | goto out_gcells; |
|---|
| 1356 | 1879 | } |
|---|
| 1357 | 1880 | |
|---|
| 1358 | 1881 | dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); |
|---|
| 1359 | 1882 | |
|---|
| 1360 | | - ret = register_netdev(slave_dev); |
|---|
| 1883 | + rtnl_lock(); |
|---|
| 1884 | + |
|---|
| 1885 | + ret = register_netdevice(slave_dev); |
|---|
| 1361 | 1886 | if (ret) { |
|---|
| 1362 | 1887 | netdev_err(master, "error %d registering interface %s\n", |
|---|
| 1363 | 1888 | ret, slave_dev->name); |
|---|
| 1889 | + rtnl_unlock(); |
|---|
| 1364 | 1890 | goto out_phy; |
|---|
| 1365 | 1891 | } |
|---|
| 1366 | 1892 | |
|---|
| 1893 | + ret = netdev_upper_dev_link(master, slave_dev, NULL); |
|---|
| 1894 | + |
|---|
| 1895 | + rtnl_unlock(); |
|---|
| 1896 | + |
|---|
| 1897 | + if (ret) |
|---|
| 1898 | + goto out_unregister; |
|---|
| 1899 | + |
|---|
| 1367 | 1900 | return 0; |
|---|
| 1368 | 1901 | |
|---|
| 1902 | +out_unregister: |
|---|
| 1903 | + unregister_netdev(slave_dev); |
|---|
| 1369 | 1904 | out_phy: |
|---|
| 1370 | 1905 | rtnl_lock(); |
|---|
| 1371 | 1906 | phylink_disconnect_phy(p->dp->pl); |
|---|
| .. | .. |
|---|
| 1382 | 1917 | |
|---|
| 1383 | 1918 | void dsa_slave_destroy(struct net_device *slave_dev) |
|---|
| 1384 | 1919 | { |
|---|
| 1920 | + struct net_device *master = dsa_slave_to_master(slave_dev); |
|---|
| 1385 | 1921 | struct dsa_port *dp = dsa_slave_to_port(slave_dev); |
|---|
| 1386 | 1922 | struct dsa_slave_priv *p = netdev_priv(slave_dev); |
|---|
| 1387 | 1923 | |
|---|
| 1388 | 1924 | netif_carrier_off(slave_dev); |
|---|
| 1389 | 1925 | rtnl_lock(); |
|---|
| 1926 | + netdev_upper_dev_unlink(master, slave_dev); |
|---|
| 1927 | + unregister_netdevice(slave_dev); |
|---|
| 1390 | 1928 | phylink_disconnect_phy(dp->pl); |
|---|
| 1391 | 1929 | rtnl_unlock(); |
|---|
| 1392 | 1930 | |
|---|
| 1393 | 1931 | dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); |
|---|
| 1394 | | - unregister_netdev(slave_dev); |
|---|
| 1395 | 1932 | phylink_destroy(dp->pl); |
|---|
| 1396 | 1933 | gro_cells_destroy(&p->gcells); |
|---|
| 1397 | 1934 | free_percpu(p->stats64); |
|---|
| 1398 | 1935 | free_netdev(slave_dev); |
|---|
| 1399 | 1936 | } |
|---|
| 1400 | 1937 | |
|---|
| 1401 | | -static bool dsa_slave_dev_check(struct net_device *dev) |
|---|
| 1938 | +bool dsa_slave_dev_check(const struct net_device *dev) |
|---|
| 1402 | 1939 | { |
|---|
| 1403 | 1940 | return dev->netdev_ops == &dsa_slave_netdev_ops; |
|---|
| 1404 | 1941 | } |
|---|
| .. | .. |
|---|
| 1412 | 1949 | if (netif_is_bridge_master(info->upper_dev)) { |
|---|
| 1413 | 1950 | if (info->linking) { |
|---|
| 1414 | 1951 | err = dsa_port_bridge_join(dp, info->upper_dev); |
|---|
| 1952 | + if (!err) |
|---|
| 1953 | + dsa_bridge_mtu_normalization(dp); |
|---|
| 1415 | 1954 | err = notifier_from_errno(err); |
|---|
| 1416 | 1955 | } else { |
|---|
| 1417 | 1956 | dsa_port_bridge_leave(dp, info->upper_dev); |
|---|
| .. | .. |
|---|
| 1422 | 1961 | return err; |
|---|
| 1423 | 1962 | } |
|---|
| 1424 | 1963 | |
|---|
| 1964 | +static int |
|---|
| 1965 | +dsa_prevent_bridging_8021q_upper(struct net_device *dev, |
|---|
| 1966 | + struct netdev_notifier_changeupper_info *info) |
|---|
| 1967 | +{ |
|---|
| 1968 | + struct netlink_ext_ack *ext_ack; |
|---|
| 1969 | + struct net_device *slave; |
|---|
| 1970 | + struct dsa_port *dp; |
|---|
| 1971 | + |
|---|
| 1972 | + ext_ack = netdev_notifier_info_to_extack(&info->info); |
|---|
| 1973 | + |
|---|
| 1974 | + if (!is_vlan_dev(dev)) |
|---|
| 1975 | + return NOTIFY_DONE; |
|---|
| 1976 | + |
|---|
| 1977 | + slave = vlan_dev_real_dev(dev); |
|---|
| 1978 | + if (!dsa_slave_dev_check(slave)) |
|---|
| 1979 | + return NOTIFY_DONE; |
|---|
| 1980 | + |
|---|
| 1981 | + dp = dsa_slave_to_port(slave); |
|---|
| 1982 | + if (!dp->bridge_dev) |
|---|
| 1983 | + return NOTIFY_DONE; |
|---|
| 1984 | + |
|---|
| 1985 | + /* Deny enslaving a VLAN device into a VLAN-aware bridge */ |
|---|
| 1986 | + if (br_vlan_enabled(dp->bridge_dev) && |
|---|
| 1987 | + netif_is_bridge_master(info->upper_dev) && info->linking) { |
|---|
| 1988 | + NL_SET_ERR_MSG_MOD(ext_ack, |
|---|
| 1989 | + "Cannot enslave VLAN device into VLAN aware bridge"); |
|---|
| 1990 | + return notifier_from_errno(-EINVAL); |
|---|
| 1991 | + } |
|---|
| 1992 | + |
|---|
| 1993 | + return NOTIFY_DONE; |
|---|
| 1994 | +} |
|---|
| 1995 | + |
|---|
| 1996 | +static int |
|---|
| 1997 | +dsa_slave_check_8021q_upper(struct net_device *dev, |
|---|
| 1998 | + struct netdev_notifier_changeupper_info *info) |
|---|
| 1999 | +{ |
|---|
| 2000 | + struct dsa_port *dp = dsa_slave_to_port(dev); |
|---|
| 2001 | + struct net_device *br = dp->bridge_dev; |
|---|
| 2002 | + struct bridge_vlan_info br_info; |
|---|
| 2003 | + struct netlink_ext_ack *extack; |
|---|
| 2004 | + int err = NOTIFY_DONE; |
|---|
| 2005 | + u16 vid; |
|---|
| 2006 | + |
|---|
| 2007 | + if (!br || !br_vlan_enabled(br)) |
|---|
| 2008 | + return NOTIFY_DONE; |
|---|
| 2009 | + |
|---|
| 2010 | + extack = netdev_notifier_info_to_extack(&info->info); |
|---|
| 2011 | + vid = vlan_dev_vlan_id(info->upper_dev); |
|---|
| 2012 | + |
|---|
| 2013 | + /* br_vlan_get_info() returns -EINVAL or -ENOENT if the |
|---|
| 2014 | + * device, respectively the VID is not found, returning |
|---|
| 2015 | + * 0 means success, which is a failure for us here. |
|---|
| 2016 | + */ |
|---|
| 2017 | + err = br_vlan_get_info(br, vid, &br_info); |
|---|
| 2018 | + if (err == 0) { |
|---|
| 2019 | + NL_SET_ERR_MSG_MOD(extack, |
|---|
| 2020 | + "This VLAN is already configured by the bridge"); |
|---|
| 2021 | + return notifier_from_errno(-EBUSY); |
|---|
| 2022 | + } |
|---|
| 2023 | + |
|---|
| 2024 | + return NOTIFY_DONE; |
|---|
| 2025 | +} |
|---|
| 2026 | + |
|---|
| 1425 | 2027 | static int dsa_slave_netdevice_event(struct notifier_block *nb, |
|---|
| 1426 | 2028 | unsigned long event, void *ptr) |
|---|
| 1427 | 2029 | { |
|---|
| 1428 | 2030 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
|---|
| 1429 | 2031 | |
|---|
| 1430 | | - if (!dsa_slave_dev_check(dev)) |
|---|
| 1431 | | - return NOTIFY_DONE; |
|---|
| 2032 | + switch (event) { |
|---|
| 2033 | + case NETDEV_PRECHANGEUPPER: { |
|---|
| 2034 | + struct netdev_notifier_changeupper_info *info = ptr; |
|---|
| 1432 | 2035 | |
|---|
| 1433 | | - if (event == NETDEV_CHANGEUPPER) |
|---|
| 2036 | + if (!dsa_slave_dev_check(dev)) |
|---|
| 2037 | + return dsa_prevent_bridging_8021q_upper(dev, ptr); |
|---|
| 2038 | + |
|---|
| 2039 | + if (is_vlan_dev(info->upper_dev)) |
|---|
| 2040 | + return dsa_slave_check_8021q_upper(dev, ptr); |
|---|
| 2041 | + break; |
|---|
| 2042 | + } |
|---|
| 2043 | + case NETDEV_CHANGEUPPER: |
|---|
| 2044 | + if (!dsa_slave_dev_check(dev)) |
|---|
| 2045 | + return NOTIFY_DONE; |
|---|
| 2046 | + |
|---|
| 1434 | 2047 | return dsa_slave_changeupper(dev, ptr); |
|---|
| 2048 | + } |
|---|
| 1435 | 2049 | |
|---|
| 1436 | 2050 | return NOTIFY_DONE; |
|---|
| 1437 | 2051 | } |
|---|
| .. | .. |
|---|
| 1464 | 2078 | netdev_dbg(dev, "fdb add failed err=%d\n", err); |
|---|
| 1465 | 2079 | break; |
|---|
| 1466 | 2080 | } |
|---|
| 2081 | + fdb_info->offloaded = true; |
|---|
| 1467 | 2082 | call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, |
|---|
| 1468 | | - &fdb_info->info); |
|---|
| 2083 | + &fdb_info->info, NULL); |
|---|
| 1469 | 2084 | break; |
|---|
| 1470 | 2085 | |
|---|
| 1471 | 2086 | case SWITCHDEV_FDB_DEL_TO_DEVICE: |
|---|
| .. | .. |
|---|
| 1509 | 2124 | { |
|---|
| 1510 | 2125 | struct net_device *dev = switchdev_notifier_info_to_dev(ptr); |
|---|
| 1511 | 2126 | struct dsa_switchdev_event_work *switchdev_work; |
|---|
| 2127 | + int err; |
|---|
| 2128 | + |
|---|
| 2129 | + if (event == SWITCHDEV_PORT_ATTR_SET) { |
|---|
| 2130 | + err = switchdev_handle_port_attr_set(dev, ptr, |
|---|
| 2131 | + dsa_slave_dev_check, |
|---|
| 2132 | + dsa_slave_port_attr_set); |
|---|
| 2133 | + return notifier_from_errno(err); |
|---|
| 2134 | + } |
|---|
| 1512 | 2135 | |
|---|
| 1513 | 2136 | if (!dsa_slave_dev_check(dev)) |
|---|
| 1514 | 2137 | return NOTIFY_DONE; |
|---|
| .. | .. |
|---|
| 1523 | 2146 | switchdev_work->event = event; |
|---|
| 1524 | 2147 | |
|---|
| 1525 | 2148 | switch (event) { |
|---|
| 1526 | | - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ |
|---|
| 2149 | + case SWITCHDEV_FDB_ADD_TO_DEVICE: |
|---|
| 1527 | 2150 | case SWITCHDEV_FDB_DEL_TO_DEVICE: |
|---|
| 1528 | 2151 | if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr)) |
|---|
| 1529 | 2152 | goto err_fdb_work_init; |
|---|
| .. | .. |
|---|
| 1542 | 2165 | return NOTIFY_BAD; |
|---|
| 1543 | 2166 | } |
|---|
| 1544 | 2167 | |
|---|
| 2168 | +static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, |
|---|
| 2169 | + unsigned long event, void *ptr) |
|---|
| 2170 | +{ |
|---|
| 2171 | + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); |
|---|
| 2172 | + int err; |
|---|
| 2173 | + |
|---|
| 2174 | + switch (event) { |
|---|
| 2175 | + case SWITCHDEV_PORT_OBJ_ADD: |
|---|
| 2176 | + err = switchdev_handle_port_obj_add(dev, ptr, |
|---|
| 2177 | + dsa_slave_dev_check, |
|---|
| 2178 | + dsa_slave_port_obj_add); |
|---|
| 2179 | + return notifier_from_errno(err); |
|---|
| 2180 | + case SWITCHDEV_PORT_OBJ_DEL: |
|---|
| 2181 | + err = switchdev_handle_port_obj_del(dev, ptr, |
|---|
| 2182 | + dsa_slave_dev_check, |
|---|
| 2183 | + dsa_slave_port_obj_del); |
|---|
| 2184 | + return notifier_from_errno(err); |
|---|
| 2185 | + case SWITCHDEV_PORT_ATTR_SET: |
|---|
| 2186 | + err = switchdev_handle_port_attr_set(dev, ptr, |
|---|
| 2187 | + dsa_slave_dev_check, |
|---|
| 2188 | + dsa_slave_port_attr_set); |
|---|
| 2189 | + return notifier_from_errno(err); |
|---|
| 2190 | + } |
|---|
| 2191 | + |
|---|
| 2192 | + return NOTIFY_DONE; |
|---|
| 2193 | +} |
|---|
| 2194 | + |
|---|
| 1545 | 2195 | static struct notifier_block dsa_slave_nb __read_mostly = { |
|---|
| 1546 | 2196 | .notifier_call = dsa_slave_netdevice_event, |
|---|
| 1547 | 2197 | }; |
|---|
| .. | .. |
|---|
| 1550 | 2200 | .notifier_call = dsa_slave_switchdev_event, |
|---|
| 1551 | 2201 | }; |
|---|
| 1552 | 2202 | |
|---|
| 2203 | +static struct notifier_block dsa_slave_switchdev_blocking_notifier = { |
|---|
| 2204 | + .notifier_call = dsa_slave_switchdev_blocking_event, |
|---|
| 2205 | +}; |
|---|
| 2206 | + |
|---|
| 1553 | 2207 | int dsa_slave_register_notifier(void) |
|---|
| 1554 | 2208 | { |
|---|
| 2209 | + struct notifier_block *nb; |
|---|
| 1555 | 2210 | int err; |
|---|
| 1556 | 2211 | |
|---|
| 1557 | 2212 | err = register_netdevice_notifier(&dsa_slave_nb); |
|---|
| .. | .. |
|---|
| 1562 | 2217 | if (err) |
|---|
| 1563 | 2218 | goto err_switchdev_nb; |
|---|
| 1564 | 2219 | |
|---|
| 2220 | + nb = &dsa_slave_switchdev_blocking_notifier; |
|---|
| 2221 | + err = register_switchdev_blocking_notifier(nb); |
|---|
| 2222 | + if (err) |
|---|
| 2223 | + goto err_switchdev_blocking_nb; |
|---|
| 2224 | + |
|---|
| 1565 | 2225 | return 0; |
|---|
| 1566 | 2226 | |
|---|
| 2227 | +err_switchdev_blocking_nb: |
|---|
| 2228 | + unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); |
|---|
| 1567 | 2229 | err_switchdev_nb: |
|---|
| 1568 | 2230 | unregister_netdevice_notifier(&dsa_slave_nb); |
|---|
| 1569 | 2231 | return err; |
|---|
| .. | .. |
|---|
| 1571 | 2233 | |
|---|
| 1572 | 2234 | void dsa_slave_unregister_notifier(void) |
|---|
| 1573 | 2235 | { |
|---|
| 2236 | + struct notifier_block *nb; |
|---|
| 1574 | 2237 | int err; |
|---|
| 1575 | 2238 | |
|---|
| 2239 | + nb = &dsa_slave_switchdev_blocking_notifier; |
|---|
| 2240 | + err = unregister_switchdev_blocking_notifier(nb); |
|---|
| 2241 | + if (err) |
|---|
| 2242 | + pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); |
|---|
| 2243 | + |
|---|
| 1576 | 2244 | err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); |
|---|
| 1577 | 2245 | if (err) |
|---|
| 1578 | 2246 | pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); |
|---|