.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver |
---|
3 | 4 | * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> |
---|
4 | 5 | * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License as published by |
---|
8 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
9 | | - * (at your option) any later version. |
---|
10 | 6 | */ |
---|
11 | 7 | |
---|
12 | 8 | #include <linux/kernel.h> |
---|
.. | .. |
---|
204 | 200 | buf = alloc + offset; |
---|
205 | 201 | expect = buf + ROCKER_TEST_DMA_BUF_SIZE; |
---|
206 | 202 | |
---|
207 | | - dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, |
---|
208 | | - PCI_DMA_BIDIRECTIONAL); |
---|
209 | | - if (pci_dma_mapping_error(pdev, dma_handle)) { |
---|
| 203 | + dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE, |
---|
| 204 | + DMA_BIDIRECTIONAL); |
---|
| 205 | + if (dma_mapping_error(&pdev->dev, dma_handle)) { |
---|
210 | 206 | err = -EIO; |
---|
211 | 207 | goto free_alloc; |
---|
212 | 208 | } |
---|
.. | .. |
---|
238 | 234 | goto unmap; |
---|
239 | 235 | |
---|
240 | 236 | unmap: |
---|
241 | | - pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, |
---|
242 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 237 | + dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, |
---|
| 238 | + DMA_BIDIRECTIONAL); |
---|
243 | 239 | free_alloc: |
---|
244 | 240 | kfree(alloc); |
---|
245 | 241 | |
---|
.. | .. |
---|
371 | 367 | static struct rocker_desc_info * |
---|
372 | 368 | rocker_desc_head_get(const struct rocker_dma_ring_info *info) |
---|
373 | 369 | { |
---|
374 | | - static struct rocker_desc_info *desc_info; |
---|
| 370 | + struct rocker_desc_info *desc_info; |
---|
375 | 371 | u32 head = __pos_inc(info->head, info->size); |
---|
376 | 372 | |
---|
377 | 373 | desc_info = &info->desc_info[info->head]; |
---|
.. | .. |
---|
402 | 398 | static struct rocker_desc_info * |
---|
403 | 399 | rocker_desc_tail_get(struct rocker_dma_ring_info *info) |
---|
404 | 400 | { |
---|
405 | | - static struct rocker_desc_info *desc_info; |
---|
| 401 | + struct rocker_desc_info *desc_info; |
---|
406 | 402 | |
---|
407 | 403 | if (info->tail == info->head) |
---|
408 | 404 | return NULL; /* nothing to be done between head and tail */ |
---|
.. | .. |
---|
445 | 441 | if (!info->desc_info) |
---|
446 | 442 | return -ENOMEM; |
---|
447 | 443 | |
---|
448 | | - info->desc = pci_alloc_consistent(rocker->pdev, |
---|
449 | | - info->size * sizeof(*info->desc), |
---|
450 | | - &info->mapaddr); |
---|
| 444 | + info->desc = dma_alloc_coherent(&rocker->pdev->dev, |
---|
| 445 | + info->size * sizeof(*info->desc), |
---|
| 446 | + &info->mapaddr, GFP_KERNEL); |
---|
451 | 447 | if (!info->desc) { |
---|
452 | 448 | kfree(info->desc_info); |
---|
453 | 449 | return -ENOMEM; |
---|
.. | .. |
---|
469 | 465 | { |
---|
470 | 466 | rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); |
---|
471 | 467 | |
---|
472 | | - pci_free_consistent(rocker->pdev, |
---|
473 | | - info->size * sizeof(struct rocker_desc), |
---|
474 | | - info->desc, info->mapaddr); |
---|
| 468 | + dma_free_coherent(&rocker->pdev->dev, |
---|
| 469 | + info->size * sizeof(struct rocker_desc), info->desc, |
---|
| 470 | + info->mapaddr); |
---|
475 | 471 | kfree(info->desc_info); |
---|
476 | 472 | } |
---|
477 | 473 | |
---|
.. | .. |
---|
510 | 506 | goto rollback; |
---|
511 | 507 | } |
---|
512 | 508 | |
---|
513 | | - dma_handle = pci_map_single(pdev, buf, buf_size, direction); |
---|
514 | | - if (pci_dma_mapping_error(pdev, dma_handle)) { |
---|
| 509 | + dma_handle = dma_map_single(&pdev->dev, buf, buf_size, |
---|
| 510 | + direction); |
---|
| 511 | + if (dma_mapping_error(&pdev->dev, dma_handle)) { |
---|
515 | 512 | kfree(buf); |
---|
516 | 513 | err = -EIO; |
---|
517 | 514 | goto rollback; |
---|
.. | .. |
---|
530 | 527 | for (i--; i >= 0; i--) { |
---|
531 | 528 | const struct rocker_desc_info *desc_info = &info->desc_info[i]; |
---|
532 | 529 | |
---|
533 | | - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), |
---|
| 530 | + dma_unmap_single(&pdev->dev, |
---|
| 531 | + dma_unmap_addr(desc_info, mapaddr), |
---|
534 | 532 | desc_info->data_size, direction); |
---|
535 | 533 | kfree(desc_info->data); |
---|
536 | 534 | } |
---|
.. | .. |
---|
550 | 548 | |
---|
551 | 549 | desc->buf_addr = 0; |
---|
552 | 550 | desc->buf_size = 0; |
---|
553 | | - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), |
---|
| 551 | + dma_unmap_single(&pdev->dev, |
---|
| 552 | + dma_unmap_addr(desc_info, mapaddr), |
---|
554 | 553 | desc_info->data_size, direction); |
---|
555 | 554 | kfree(desc_info->data); |
---|
556 | 555 | } |
---|
.. | .. |
---|
619 | 618 | spin_lock_init(&rocker->cmd_ring_lock); |
---|
620 | 619 | |
---|
621 | 620 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, |
---|
622 | | - PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); |
---|
| 621 | + DMA_BIDIRECTIONAL, PAGE_SIZE); |
---|
623 | 622 | if (err) { |
---|
624 | 623 | dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); |
---|
625 | 624 | goto err_dma_cmd_ring_bufs_alloc; |
---|
.. | .. |
---|
640 | 639 | } |
---|
641 | 640 | |
---|
642 | 641 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, |
---|
643 | | - PCI_DMA_FROMDEVICE, PAGE_SIZE); |
---|
| 642 | + DMA_FROM_DEVICE, PAGE_SIZE); |
---|
644 | 643 | if (err) { |
---|
645 | 644 | dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); |
---|
646 | 645 | goto err_dma_event_ring_bufs_alloc; |
---|
.. | .. |
---|
654 | 653 | rocker_dma_cmd_ring_waits_free(rocker); |
---|
655 | 654 | err_dma_cmd_ring_waits_alloc: |
---|
656 | 655 | rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, |
---|
657 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 656 | + DMA_BIDIRECTIONAL); |
---|
658 | 657 | err_dma_cmd_ring_bufs_alloc: |
---|
659 | 658 | rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); |
---|
660 | 659 | return err; |
---|
.. | .. |
---|
663 | 662 | static void rocker_dma_rings_fini(struct rocker *rocker) |
---|
664 | 663 | { |
---|
665 | 664 | rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, |
---|
666 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 665 | + DMA_BIDIRECTIONAL); |
---|
667 | 666 | rocker_dma_ring_destroy(rocker, &rocker->event_ring); |
---|
668 | 667 | rocker_dma_cmd_ring_waits_free(rocker); |
---|
669 | 668 | rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, |
---|
670 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 669 | + DMA_BIDIRECTIONAL); |
---|
671 | 670 | rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); |
---|
672 | 671 | } |
---|
673 | 672 | |
---|
.. | .. |
---|
679 | 678 | struct pci_dev *pdev = rocker->pdev; |
---|
680 | 679 | dma_addr_t dma_handle; |
---|
681 | 680 | |
---|
682 | | - dma_handle = pci_map_single(pdev, skb->data, buf_len, |
---|
683 | | - PCI_DMA_FROMDEVICE); |
---|
684 | | - if (pci_dma_mapping_error(pdev, dma_handle)) |
---|
| 681 | + dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len, |
---|
| 682 | + DMA_FROM_DEVICE); |
---|
| 683 | + if (dma_mapping_error(&pdev->dev, dma_handle)) |
---|
685 | 684 | return -EIO; |
---|
686 | 685 | if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) |
---|
687 | 686 | goto tlv_put_failure; |
---|
.. | .. |
---|
690 | 689 | return 0; |
---|
691 | 690 | |
---|
692 | 691 | tlv_put_failure: |
---|
693 | | - pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); |
---|
| 692 | + dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE); |
---|
694 | 693 | desc_info->tlv_size = 0; |
---|
695 | 694 | return -EMSGSIZE; |
---|
696 | 695 | } |
---|
.. | .. |
---|
738 | 737 | return; |
---|
739 | 738 | dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); |
---|
740 | 739 | len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); |
---|
741 | | - pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); |
---|
| 740 | + dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE); |
---|
742 | 741 | } |
---|
743 | 742 | |
---|
744 | 743 | static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, |
---|
.. | .. |
---|
800 | 799 | } |
---|
801 | 800 | |
---|
802 | 801 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, |
---|
803 | | - PCI_DMA_TODEVICE, |
---|
| 802 | + DMA_TO_DEVICE, |
---|
804 | 803 | ROCKER_DMA_TX_DESC_SIZE); |
---|
805 | 804 | if (err) { |
---|
806 | 805 | netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); |
---|
.. | .. |
---|
817 | 816 | } |
---|
818 | 817 | |
---|
819 | 818 | err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, |
---|
820 | | - PCI_DMA_BIDIRECTIONAL, |
---|
| 819 | + DMA_BIDIRECTIONAL, |
---|
821 | 820 | ROCKER_DMA_RX_DESC_SIZE); |
---|
822 | 821 | if (err) { |
---|
823 | 822 | netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); |
---|
.. | .. |
---|
835 | 834 | |
---|
836 | 835 | err_dma_rx_ring_skbs_alloc: |
---|
837 | 836 | rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, |
---|
838 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 837 | + DMA_BIDIRECTIONAL); |
---|
839 | 838 | err_dma_rx_ring_bufs_alloc: |
---|
840 | 839 | rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); |
---|
841 | 840 | err_dma_rx_ring_create: |
---|
842 | 841 | rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, |
---|
843 | | - PCI_DMA_TODEVICE); |
---|
| 842 | + DMA_TO_DEVICE); |
---|
844 | 843 | err_dma_tx_ring_bufs_alloc: |
---|
845 | 844 | rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); |
---|
846 | 845 | return err; |
---|
.. | .. |
---|
852 | 851 | |
---|
853 | 852 | rocker_dma_rx_ring_skbs_free(rocker_port); |
---|
854 | 853 | rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, |
---|
855 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 854 | + DMA_BIDIRECTIONAL); |
---|
856 | 855 | rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); |
---|
857 | 856 | rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, |
---|
858 | | - PCI_DMA_TODEVICE); |
---|
| 857 | + DMA_TO_DEVICE); |
---|
859 | 858 | rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); |
---|
860 | 859 | } |
---|
861 | 860 | |
---|
.. | .. |
---|
1566 | 1565 | } |
---|
1567 | 1566 | |
---|
1568 | 1567 | static int |
---|
| 1568 | +rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * |
---|
| 1569 | + rocker_port, |
---|
| 1570 | + unsigned long * |
---|
| 1571 | + p_brport_flags_support) |
---|
| 1572 | +{ |
---|
| 1573 | + struct rocker_world_ops *wops = rocker_port->rocker->wops; |
---|
| 1574 | + |
---|
| 1575 | + if (!wops->port_attr_bridge_flags_support_get) |
---|
| 1576 | + return -EOPNOTSUPP; |
---|
| 1577 | + return wops->port_attr_bridge_flags_support_get(rocker_port, |
---|
| 1578 | + p_brport_flags_support); |
---|
| 1579 | +} |
---|
| 1580 | + |
---|
| 1581 | +static int |
---|
| 1582 | +rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port, |
---|
| 1583 | + unsigned long brport_flags, |
---|
| 1584 | + struct switchdev_trans *trans) |
---|
| 1585 | +{ |
---|
| 1586 | + struct rocker_world_ops *wops = rocker_port->rocker->wops; |
---|
| 1587 | + unsigned long brport_flags_s; |
---|
| 1588 | + int err; |
---|
| 1589 | + |
---|
| 1590 | + if (!wops->port_attr_bridge_flags_set) |
---|
| 1591 | + return -EOPNOTSUPP; |
---|
| 1592 | + |
---|
| 1593 | + err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, |
---|
| 1594 | + &brport_flags_s); |
---|
| 1595 | + if (err) |
---|
| 1596 | + return err; |
---|
| 1597 | + |
---|
| 1598 | + if (brport_flags & ~brport_flags_s) |
---|
| 1599 | + return -EINVAL; |
---|
| 1600 | + |
---|
| 1601 | + return 0; |
---|
| 1602 | +} |
---|
| 1603 | + |
---|
| 1604 | +static int |
---|
1569 | 1605 | rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, |
---|
1570 | 1606 | unsigned long brport_flags, |
---|
1571 | 1607 | struct switchdev_trans *trans) |
---|
.. | .. |
---|
1580 | 1616 | |
---|
1581 | 1617 | return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, |
---|
1582 | 1618 | trans); |
---|
1583 | | -} |
---|
1584 | | - |
---|
1585 | | -static int |
---|
1586 | | -rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, |
---|
1587 | | - unsigned long *p_brport_flags) |
---|
1588 | | -{ |
---|
1589 | | - struct rocker_world_ops *wops = rocker_port->rocker->wops; |
---|
1590 | | - |
---|
1591 | | - if (!wops->port_attr_bridge_flags_get) |
---|
1592 | | - return -EOPNOTSUPP; |
---|
1593 | | - return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); |
---|
1594 | | -} |
---|
1595 | | - |
---|
1596 | | -static int |
---|
1597 | | -rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * |
---|
1598 | | - rocker_port, |
---|
1599 | | - unsigned long * |
---|
1600 | | - p_brport_flags_support) |
---|
1601 | | -{ |
---|
1602 | | - struct rocker_world_ops *wops = rocker_port->rocker->wops; |
---|
1603 | | - |
---|
1604 | | - if (!wops->port_attr_bridge_flags_support_get) |
---|
1605 | | - return -EOPNOTSUPP; |
---|
1606 | | - return wops->port_attr_bridge_flags_support_get(rocker_port, |
---|
1607 | | - p_brport_flags_support); |
---|
1608 | 1619 | } |
---|
1609 | 1620 | |
---|
1610 | 1621 | static int |
---|
.. | .. |
---|
1631 | 1642 | struct switchdev_trans *trans) |
---|
1632 | 1643 | { |
---|
1633 | 1644 | struct rocker_world_ops *wops = rocker_port->rocker->wops; |
---|
1634 | | - |
---|
1635 | | - if (netif_is_bridge_master(vlan->obj.orig_dev)) |
---|
1636 | | - return -EOPNOTSUPP; |
---|
1637 | 1645 | |
---|
1638 | 1646 | if (!wops->port_obj_vlan_add) |
---|
1639 | 1647 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
1853 | 1861 | continue; |
---|
1854 | 1862 | dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); |
---|
1855 | 1863 | len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); |
---|
1856 | | - pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); |
---|
| 1864 | + dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE); |
---|
1857 | 1865 | } |
---|
1858 | 1866 | } |
---|
1859 | 1867 | |
---|
.. | .. |
---|
1866 | 1874 | dma_addr_t dma_handle; |
---|
1867 | 1875 | struct rocker_tlv *frag; |
---|
1868 | 1876 | |
---|
1869 | | - dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); |
---|
1870 | | - if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { |
---|
| 1877 | + dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE); |
---|
| 1878 | + if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) { |
---|
1871 | 1879 | if (net_ratelimit()) |
---|
1872 | 1880 | netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); |
---|
1873 | 1881 | return -EIO; |
---|
.. | .. |
---|
1887 | 1895 | nest_cancel: |
---|
1888 | 1896 | rocker_tlv_nest_cancel(desc_info, frag); |
---|
1889 | 1897 | unmap_frag: |
---|
1890 | | - pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); |
---|
| 1898 | + dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE); |
---|
1891 | 1899 | return -EMSGSIZE; |
---|
1892 | 1900 | } |
---|
1893 | 1901 | |
---|
.. | .. |
---|
2029 | 2037 | err); |
---|
2030 | 2038 | } |
---|
2031 | 2039 | |
---|
| 2040 | +static int rocker_port_get_port_parent_id(struct net_device *dev, |
---|
| 2041 | + struct netdev_phys_item_id *ppid) |
---|
| 2042 | +{ |
---|
| 2043 | + const struct rocker_port *rocker_port = netdev_priv(dev); |
---|
| 2044 | + const struct rocker *rocker = rocker_port->rocker; |
---|
| 2045 | + |
---|
| 2046 | + ppid->id_len = sizeof(rocker->hw.id); |
---|
| 2047 | + memcpy(&ppid->id, &rocker->hw.id, ppid->id_len); |
---|
| 2048 | + |
---|
| 2049 | + return 0; |
---|
| 2050 | +} |
---|
| 2051 | + |
---|
2032 | 2052 | static const struct net_device_ops rocker_port_netdev_ops = { |
---|
2033 | 2053 | .ndo_open = rocker_port_open, |
---|
2034 | 2054 | .ndo_stop = rocker_port_stop, |
---|
.. | .. |
---|
2038 | 2058 | .ndo_get_phys_port_name = rocker_port_get_phys_port_name, |
---|
2039 | 2059 | .ndo_change_proto_down = rocker_port_change_proto_down, |
---|
2040 | 2060 | .ndo_neigh_destroy = rocker_port_neigh_destroy, |
---|
| 2061 | + .ndo_get_port_parent_id = rocker_port_get_port_parent_id, |
---|
2041 | 2062 | }; |
---|
2042 | 2063 | |
---|
2043 | 2064 | /******************** |
---|
2044 | 2065 | * swdev interface |
---|
2045 | 2066 | ********************/ |
---|
2046 | | - |
---|
2047 | | -static int rocker_port_attr_get(struct net_device *dev, |
---|
2048 | | - struct switchdev_attr *attr) |
---|
2049 | | -{ |
---|
2050 | | - const struct rocker_port *rocker_port = netdev_priv(dev); |
---|
2051 | | - const struct rocker *rocker = rocker_port->rocker; |
---|
2052 | | - int err = 0; |
---|
2053 | | - |
---|
2054 | | - switch (attr->id) { |
---|
2055 | | - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: |
---|
2056 | | - attr->u.ppid.id_len = sizeof(rocker->hw.id); |
---|
2057 | | - memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); |
---|
2058 | | - break; |
---|
2059 | | - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: |
---|
2060 | | - err = rocker_world_port_attr_bridge_flags_get(rocker_port, |
---|
2061 | | - &attr->u.brport_flags); |
---|
2062 | | - break; |
---|
2063 | | - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: |
---|
2064 | | - err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, |
---|
2065 | | - &attr->u.brport_flags_support); |
---|
2066 | | - break; |
---|
2067 | | - default: |
---|
2068 | | - return -EOPNOTSUPP; |
---|
2069 | | - } |
---|
2070 | | - |
---|
2071 | | - return err; |
---|
2072 | | -} |
---|
2073 | 2067 | |
---|
2074 | 2068 | static int rocker_port_attr_set(struct net_device *dev, |
---|
2075 | 2069 | const struct switchdev_attr *attr, |
---|
.. | .. |
---|
2083 | 2077 | err = rocker_world_port_attr_stp_state_set(rocker_port, |
---|
2084 | 2078 | attr->u.stp_state, |
---|
2085 | 2079 | trans); |
---|
| 2080 | + break; |
---|
| 2081 | + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: |
---|
| 2082 | + err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port, |
---|
| 2083 | + attr->u.brport_flags, |
---|
| 2084 | + trans); |
---|
2086 | 2085 | break; |
---|
2087 | 2086 | case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: |
---|
2088 | 2087 | err = rocker_world_port_attr_bridge_flags_set(rocker_port, |
---|
.. | .. |
---|
2142 | 2141 | return err; |
---|
2143 | 2142 | } |
---|
2144 | 2143 | |
---|
2145 | | -static const struct switchdev_ops rocker_port_switchdev_ops = { |
---|
2146 | | - .switchdev_port_attr_get = rocker_port_attr_get, |
---|
2147 | | - .switchdev_port_attr_set = rocker_port_attr_set, |
---|
2148 | | - .switchdev_port_obj_add = rocker_port_obj_add, |
---|
2149 | | - .switchdev_port_obj_del = rocker_port_obj_del, |
---|
2150 | | -}; |
---|
2151 | | - |
---|
2152 | 2144 | struct rocker_fib_event_work { |
---|
2153 | 2145 | struct work_struct work; |
---|
2154 | 2146 | union { |
---|
.. | .. |
---|
2170 | 2162 | /* Protect internal structures from changes */ |
---|
2171 | 2163 | rtnl_lock(); |
---|
2172 | 2164 | switch (fib_work->event) { |
---|
2173 | | - case FIB_EVENT_ENTRY_ADD: |
---|
| 2165 | + case FIB_EVENT_ENTRY_REPLACE: |
---|
2174 | 2166 | err = rocker_world_fib4_add(rocker, &fib_work->fen_info); |
---|
2175 | 2167 | if (err) |
---|
2176 | 2168 | rocker_world_fib4_abort(rocker); |
---|
.. | .. |
---|
2180 | 2172 | rocker_world_fib4_del(rocker, &fib_work->fen_info); |
---|
2181 | 2173 | fib_info_put(fib_work->fen_info.fi); |
---|
2182 | 2174 | break; |
---|
2183 | | - case FIB_EVENT_RULE_ADD: /* fall through */ |
---|
| 2175 | + case FIB_EVENT_RULE_ADD: |
---|
2184 | 2176 | case FIB_EVENT_RULE_DEL: |
---|
2185 | 2177 | rule = fib_work->fr_info.rule; |
---|
2186 | 2178 | if (!fib4_rule_default(rule)) |
---|
.. | .. |
---|
2212 | 2204 | fib_work->event = event; |
---|
2213 | 2205 | |
---|
2214 | 2206 | switch (event) { |
---|
2215 | | - case FIB_EVENT_ENTRY_ADD: /* fall through */ |
---|
| 2207 | + case FIB_EVENT_ENTRY_REPLACE: |
---|
2216 | 2208 | case FIB_EVENT_ENTRY_DEL: |
---|
| 2209 | + if (info->family == AF_INET) { |
---|
| 2210 | + struct fib_entry_notifier_info *fen_info = ptr; |
---|
| 2211 | + |
---|
| 2212 | + if (fen_info->fi->fib_nh_is_v6) { |
---|
| 2213 | + NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); |
---|
| 2214 | + kfree(fib_work); |
---|
| 2215 | + return notifier_from_errno(-EINVAL); |
---|
| 2216 | + } |
---|
| 2217 | + if (fen_info->fi->nh) { |
---|
| 2218 | + NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); |
---|
| 2219 | + kfree(fib_work); |
---|
| 2220 | + return notifier_from_errno(-EINVAL); |
---|
| 2221 | + } |
---|
| 2222 | + } |
---|
| 2223 | + |
---|
2217 | 2224 | memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); |
---|
2218 | 2225 | /* Take referece on fib_info to prevent it from being |
---|
2219 | 2226 | * freed while work is queued. Release it afterwards. |
---|
2220 | 2227 | */ |
---|
2221 | 2228 | fib_info_hold(fib_work->fen_info.fi); |
---|
2222 | 2229 | break; |
---|
2223 | | - case FIB_EVENT_RULE_ADD: /* fall through */ |
---|
| 2230 | + case FIB_EVENT_RULE_ADD: |
---|
2224 | 2231 | case FIB_EVENT_RULE_DEL: |
---|
2225 | 2232 | memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); |
---|
2226 | 2233 | fib_rule_get(fib_work->fr_info.rule); |
---|
.. | .. |
---|
2602 | 2609 | rocker_port_dev_addr_init(rocker_port); |
---|
2603 | 2610 | dev->netdev_ops = &rocker_port_netdev_ops; |
---|
2604 | 2611 | dev->ethtool_ops = &rocker_port_ethtool_ops; |
---|
2605 | | - dev->switchdev_ops = &rocker_port_switchdev_ops; |
---|
2606 | 2612 | netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, |
---|
2607 | 2613 | NAPI_POLL_WEIGHT); |
---|
2608 | 2614 | netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, |
---|
.. | .. |
---|
2713 | 2719 | return dev->netdev_ops == &rocker_port_netdev_ops; |
---|
2714 | 2720 | } |
---|
2715 | 2721 | |
---|
| 2722 | +static int |
---|
| 2723 | +rocker_switchdev_port_attr_set_event(struct net_device *netdev, |
---|
| 2724 | + struct switchdev_notifier_port_attr_info *port_attr_info) |
---|
| 2725 | +{ |
---|
| 2726 | + int err; |
---|
| 2727 | + |
---|
| 2728 | + err = rocker_port_attr_set(netdev, port_attr_info->attr, |
---|
| 2729 | + port_attr_info->trans); |
---|
| 2730 | + |
---|
| 2731 | + port_attr_info->handled = true; |
---|
| 2732 | + return notifier_from_errno(err); |
---|
| 2733 | +} |
---|
| 2734 | + |
---|
2716 | 2735 | struct rocker_switchdev_event_work { |
---|
2717 | 2736 | struct work_struct work; |
---|
2718 | 2737 | struct switchdev_notifier_fdb_info fdb_info; |
---|
.. | .. |
---|
2728 | 2747 | |
---|
2729 | 2748 | info.addr = recv_info->addr; |
---|
2730 | 2749 | info.vid = recv_info->vid; |
---|
| 2750 | + info.offloaded = true; |
---|
2731 | 2751 | call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, |
---|
2732 | | - rocker_port->dev, &info.info); |
---|
| 2752 | + rocker_port->dev, &info.info, NULL); |
---|
2733 | 2753 | } |
---|
2734 | 2754 | |
---|
2735 | 2755 | static void rocker_switchdev_event_work(struct work_struct *work) |
---|
.. | .. |
---|
2781 | 2801 | if (!rocker_port_dev_check(dev)) |
---|
2782 | 2802 | return NOTIFY_DONE; |
---|
2783 | 2803 | |
---|
| 2804 | + if (event == SWITCHDEV_PORT_ATTR_SET) |
---|
| 2805 | + return rocker_switchdev_port_attr_set_event(dev, ptr); |
---|
| 2806 | + |
---|
2784 | 2807 | rocker_port = netdev_priv(dev); |
---|
2785 | 2808 | switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); |
---|
2786 | 2809 | if (WARN_ON(!switchdev_work)) |
---|
.. | .. |
---|
2791 | 2814 | switchdev_work->event = event; |
---|
2792 | 2815 | |
---|
2793 | 2816 | switch (event) { |
---|
2794 | | - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ |
---|
| 2817 | + case SWITCHDEV_FDB_ADD_TO_DEVICE: |
---|
2795 | 2818 | case SWITCHDEV_FDB_DEL_TO_DEVICE: |
---|
2796 | 2819 | memcpy(&switchdev_work->fdb_info, ptr, |
---|
2797 | 2820 | sizeof(switchdev_work->fdb_info)); |
---|
2798 | 2821 | switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); |
---|
| 2822 | + if (unlikely(!switchdev_work->fdb_info.addr)) { |
---|
| 2823 | + kfree(switchdev_work); |
---|
| 2824 | + return NOTIFY_BAD; |
---|
| 2825 | + } |
---|
| 2826 | + |
---|
2799 | 2827 | ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, |
---|
2800 | 2828 | fdb_info->addr); |
---|
2801 | 2829 | /* Take a reference on the rocker device */ |
---|
.. | .. |
---|
2811 | 2839 | return NOTIFY_DONE; |
---|
2812 | 2840 | } |
---|
2813 | 2841 | |
---|
| 2842 | +static int |
---|
| 2843 | +rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev, |
---|
| 2844 | + struct switchdev_notifier_port_obj_info *port_obj_info) |
---|
| 2845 | +{ |
---|
| 2846 | + int err = -EOPNOTSUPP; |
---|
| 2847 | + |
---|
| 2848 | + switch (event) { |
---|
| 2849 | + case SWITCHDEV_PORT_OBJ_ADD: |
---|
| 2850 | + err = rocker_port_obj_add(netdev, port_obj_info->obj, |
---|
| 2851 | + port_obj_info->trans); |
---|
| 2852 | + break; |
---|
| 2853 | + case SWITCHDEV_PORT_OBJ_DEL: |
---|
| 2854 | + err = rocker_port_obj_del(netdev, port_obj_info->obj); |
---|
| 2855 | + break; |
---|
| 2856 | + } |
---|
| 2857 | + |
---|
| 2858 | + port_obj_info->handled = true; |
---|
| 2859 | + return notifier_from_errno(err); |
---|
| 2860 | +} |
---|
| 2861 | + |
---|
| 2862 | +static int rocker_switchdev_blocking_event(struct notifier_block *unused, |
---|
| 2863 | + unsigned long event, void *ptr) |
---|
| 2864 | +{ |
---|
| 2865 | + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); |
---|
| 2866 | + |
---|
| 2867 | + if (!rocker_port_dev_check(dev)) |
---|
| 2868 | + return NOTIFY_DONE; |
---|
| 2869 | + |
---|
| 2870 | + switch (event) { |
---|
| 2871 | + case SWITCHDEV_PORT_OBJ_ADD: |
---|
| 2872 | + case SWITCHDEV_PORT_OBJ_DEL: |
---|
| 2873 | + return rocker_switchdev_port_obj_event(event, dev, ptr); |
---|
| 2874 | + case SWITCHDEV_PORT_ATTR_SET: |
---|
| 2875 | + return rocker_switchdev_port_attr_set_event(dev, ptr); |
---|
| 2876 | + } |
---|
| 2877 | + |
---|
| 2878 | + return NOTIFY_DONE; |
---|
| 2879 | +} |
---|
| 2880 | + |
---|
2814 | 2881 | static struct notifier_block rocker_switchdev_notifier = { |
---|
2815 | 2882 | .notifier_call = rocker_switchdev_event, |
---|
2816 | 2883 | }; |
---|
2817 | 2884 | |
---|
| 2885 | +static struct notifier_block rocker_switchdev_blocking_notifier = { |
---|
| 2886 | + .notifier_call = rocker_switchdev_blocking_event, |
---|
| 2887 | +}; |
---|
| 2888 | + |
---|
2818 | 2889 | static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
---|
2819 | 2890 | { |
---|
| 2891 | + struct notifier_block *nb; |
---|
2820 | 2892 | struct rocker *rocker; |
---|
2821 | 2893 | int err; |
---|
2822 | 2894 | |
---|
.. | .. |
---|
2836 | 2908 | goto err_pci_request_regions; |
---|
2837 | 2909 | } |
---|
2838 | 2910 | |
---|
2839 | | - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
| 2911 | + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
---|
2840 | 2912 | if (!err) { |
---|
2841 | | - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
| 2913 | + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
---|
2842 | 2914 | if (err) { |
---|
2843 | | - dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); |
---|
| 2915 | + dev_err(&pdev->dev, "dma_set_coherent_mask failed\n"); |
---|
2844 | 2916 | goto err_pci_set_dma_mask; |
---|
2845 | 2917 | } |
---|
2846 | 2918 | } else { |
---|
2847 | | - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 2919 | + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
2848 | 2920 | if (err) { |
---|
2849 | | - dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); |
---|
| 2921 | + dev_err(&pdev->dev, "dma_set_mask failed\n"); |
---|
2850 | 2922 | goto err_pci_set_dma_mask; |
---|
2851 | 2923 | } |
---|
2852 | 2924 | } |
---|
.. | .. |
---|
2922 | 2994 | * the device, so no need to pass a callback. |
---|
2923 | 2995 | */ |
---|
2924 | 2996 | rocker->fib_nb.notifier_call = rocker_router_fib_event; |
---|
2925 | | - err = register_fib_notifier(&rocker->fib_nb, NULL); |
---|
| 2997 | + err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL); |
---|
2926 | 2998 | if (err) |
---|
2927 | 2999 | goto err_register_fib_notifier; |
---|
2928 | 3000 | |
---|
.. | .. |
---|
2932 | 3004 | goto err_register_switchdev_notifier; |
---|
2933 | 3005 | } |
---|
2934 | 3006 | |
---|
| 3007 | + nb = &rocker_switchdev_blocking_notifier; |
---|
| 3008 | + err = register_switchdev_blocking_notifier(nb); |
---|
| 3009 | + if (err) { |
---|
| 3010 | + dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n"); |
---|
| 3011 | + goto err_register_switchdev_blocking_notifier; |
---|
| 3012 | + } |
---|
| 3013 | + |
---|
2935 | 3014 | rocker->hw.id = rocker_read64(rocker, SWITCH_ID); |
---|
2936 | 3015 | |
---|
2937 | 3016 | dev_info(&pdev->dev, "Rocker switch with id %*phN\n", |
---|
.. | .. |
---|
2939 | 3018 | |
---|
2940 | 3019 | return 0; |
---|
2941 | 3020 | |
---|
| 3021 | +err_register_switchdev_blocking_notifier: |
---|
| 3022 | + unregister_switchdev_notifier(&rocker_switchdev_notifier); |
---|
2942 | 3023 | err_register_switchdev_notifier: |
---|
2943 | | - unregister_fib_notifier(&rocker->fib_nb); |
---|
| 3024 | + unregister_fib_notifier(&init_net, &rocker->fib_nb); |
---|
2944 | 3025 | err_register_fib_notifier: |
---|
2945 | 3026 | rocker_remove_ports(rocker); |
---|
2946 | 3027 | err_probe_ports: |
---|
.. | .. |
---|
2970 | 3051 | static void rocker_remove(struct pci_dev *pdev) |
---|
2971 | 3052 | { |
---|
2972 | 3053 | struct rocker *rocker = pci_get_drvdata(pdev); |
---|
| 3054 | + struct notifier_block *nb; |
---|
| 3055 | + |
---|
| 3056 | + nb = &rocker_switchdev_blocking_notifier; |
---|
| 3057 | + unregister_switchdev_blocking_notifier(nb); |
---|
2973 | 3058 | |
---|
2974 | 3059 | unregister_switchdev_notifier(&rocker_switchdev_notifier); |
---|
2975 | | - unregister_fib_notifier(&rocker->fib_nb); |
---|
| 3060 | + unregister_fib_notifier(&init_net, &rocker->fib_nb); |
---|
2976 | 3061 | rocker_remove_ports(rocker); |
---|
2977 | 3062 | rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); |
---|
2978 | 3063 | destroy_workqueue(rocker->rocker_owq); |
---|
.. | .. |
---|
3017 | 3102 | struct rocker_port *port; |
---|
3018 | 3103 | }; |
---|
3019 | 3104 | |
---|
3020 | | -static int rocker_lower_dev_walk(struct net_device *lower_dev, void *_data) |
---|
| 3105 | +static int rocker_lower_dev_walk(struct net_device *lower_dev, |
---|
| 3106 | + struct netdev_nested_priv *priv) |
---|
3021 | 3107 | { |
---|
3022 | | - struct rocker_walk_data *data = _data; |
---|
| 3108 | + struct rocker_walk_data *data = (struct rocker_walk_data *)priv->data; |
---|
3023 | 3109 | int ret = 0; |
---|
3024 | 3110 | |
---|
3025 | 3111 | if (rocker_port_dev_check_under(lower_dev, data->rocker)) { |
---|
.. | .. |
---|
3033 | 3119 | struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, |
---|
3034 | 3120 | struct rocker *rocker) |
---|
3035 | 3121 | { |
---|
| 3122 | + struct netdev_nested_priv priv; |
---|
3036 | 3123 | struct rocker_walk_data data; |
---|
3037 | 3124 | |
---|
3038 | 3125 | if (rocker_port_dev_check_under(dev, rocker)) |
---|
.. | .. |
---|
3040 | 3127 | |
---|
3041 | 3128 | data.rocker = rocker; |
---|
3042 | 3129 | data.port = NULL; |
---|
3043 | | - netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &data); |
---|
| 3130 | + priv.data = (void *)&data; |
---|
| 3131 | + netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &priv); |
---|
3044 | 3132 | |
---|
3045 | 3133 | return data.port; |
---|
3046 | 3134 | } |
---|