.. | .. |
---|
6 | 6 | #include <linux/pm_runtime.h> |
---|
7 | 7 | #include <linux/acpi.h> |
---|
8 | 8 | #include <linux/of_mdio.h> |
---|
| 9 | +#include <linux/of_net.h> |
---|
9 | 10 | #include <linux/etherdevice.h> |
---|
10 | 11 | #include <linux/interrupt.h> |
---|
11 | 12 | #include <linux/io.h> |
---|
| 13 | +#include <linux/netlink.h> |
---|
| 14 | +#include <linux/bpf.h> |
---|
| 15 | +#include <linux/bpf_trace.h> |
---|
12 | 16 | |
---|
13 | 17 | #include <net/tcp.h> |
---|
| 18 | +#include <net/page_pool.h> |
---|
14 | 19 | #include <net/ip6_checksum.h> |
---|
15 | 20 | |
---|
16 | 21 | #define NETSEC_REG_SOFT_RST 0x104 |
---|
.. | .. |
---|
234 | 239 | |
---|
235 | 240 | #define DESC_NUM 256 |
---|
236 | 241 | |
---|
| 242 | +#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) |
---|
| 243 | +#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \ |
---|
| 244 | + NET_IP_ALIGN) |
---|
| 245 | +#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \ |
---|
| 246 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
---|
| 247 | +#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA) |
---|
| 248 | + |
---|
237 | 249 | #define DESC_SZ sizeof(struct netsec_de) |
---|
238 | 250 | |
---|
239 | 251 | #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) |
---|
| 252 | + |
---|
| 253 | +#define NETSEC_XDP_PASS 0 |
---|
| 254 | +#define NETSEC_XDP_CONSUMED BIT(0) |
---|
| 255 | +#define NETSEC_XDP_TX BIT(1) |
---|
| 256 | +#define NETSEC_XDP_REDIR BIT(2) |
---|
240 | 257 | |
---|
241 | 258 | enum ring_id { |
---|
242 | 259 | NETSEC_RING_TX = 0, |
---|
243 | 260 | NETSEC_RING_RX |
---|
244 | 261 | }; |
---|
245 | 262 | |
---|
| 263 | +enum buf_type { |
---|
| 264 | + TYPE_NETSEC_SKB = 0, |
---|
| 265 | + TYPE_NETSEC_XDP_TX, |
---|
| 266 | + TYPE_NETSEC_XDP_NDO, |
---|
| 267 | +}; |
---|
| 268 | + |
---|
246 | 269 | struct netsec_desc { |
---|
247 | | - struct sk_buff *skb; |
---|
| 270 | + union { |
---|
| 271 | + struct sk_buff *skb; |
---|
| 272 | + struct xdp_frame *xdpf; |
---|
| 273 | + }; |
---|
248 | 274 | dma_addr_t dma_addr; |
---|
249 | 275 | void *addr; |
---|
250 | 276 | u16 len; |
---|
| 277 | + u8 buf_type; |
---|
251 | 278 | }; |
---|
252 | 279 | |
---|
253 | 280 | struct netsec_desc_ring { |
---|
254 | 281 | dma_addr_t desc_dma; |
---|
255 | 282 | struct netsec_desc *desc; |
---|
256 | 283 | void *vaddr; |
---|
257 | | - u16 pkt_cnt; |
---|
258 | 284 | u16 head, tail; |
---|
| 285 | + u16 xdp_xmit; /* netsec_xdp_xmit packets */ |
---|
| 286 | + struct page_pool *page_pool; |
---|
| 287 | + struct xdp_rxq_info xdp_rxq; |
---|
| 288 | + spinlock_t lock; /* XDP tx queue locking */ |
---|
259 | 289 | }; |
---|
260 | 290 | |
---|
261 | 291 | struct netsec_priv { |
---|
262 | 292 | struct netsec_desc_ring desc_ring[NETSEC_RING_MAX]; |
---|
263 | 293 | struct ethtool_coalesce et_coalesce; |
---|
| 294 | + struct bpf_prog *xdp_prog; |
---|
264 | 295 | spinlock_t reglock; /* protect reg access */ |
---|
265 | 296 | struct napi_struct napi; |
---|
266 | 297 | phy_interface_t phy_interface; |
---|
.. | .. |
---|
559 | 590 | } |
---|
560 | 591 | |
---|
561 | 592 | static const struct ethtool_ops netsec_ethtool_ops = { |
---|
| 593 | + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
---|
| 594 | + ETHTOOL_COALESCE_MAX_FRAMES, |
---|
562 | 595 | .get_drvinfo = netsec_et_get_drvinfo, |
---|
563 | 596 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
---|
564 | 597 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
---|
.. | .. |
---|
571 | 604 | |
---|
572 | 605 | /************* NETDEV_OPS FOLLOW *************/ |
---|
573 | 606 | |
---|
574 | | -static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv, |
---|
575 | | - struct netsec_desc *desc) |
---|
576 | | -{ |
---|
577 | | - struct sk_buff *skb; |
---|
578 | | - |
---|
579 | | - if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) { |
---|
580 | | - skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len); |
---|
581 | | - } else { |
---|
582 | | - desc->len = L1_CACHE_ALIGN(desc->len); |
---|
583 | | - skb = netdev_alloc_skb(priv->ndev, desc->len); |
---|
584 | | - } |
---|
585 | | - if (!skb) |
---|
586 | | - return NULL; |
---|
587 | | - |
---|
588 | | - desc->addr = skb->data; |
---|
589 | | - desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len, |
---|
590 | | - DMA_FROM_DEVICE); |
---|
591 | | - if (dma_mapping_error(priv->dev, desc->dma_addr)) { |
---|
592 | | - dev_kfree_skb_any(skb); |
---|
593 | | - return NULL; |
---|
594 | | - } |
---|
595 | | - return skb; |
---|
596 | | -} |
---|
597 | 607 | |
---|
598 | 608 | static void netsec_set_rx_de(struct netsec_priv *priv, |
---|
599 | 609 | struct netsec_desc_ring *dring, u16 idx, |
---|
600 | | - const struct netsec_desc *desc, |
---|
601 | | - struct sk_buff *skb) |
---|
| 610 | + const struct netsec_desc *desc) |
---|
602 | 611 | { |
---|
603 | 612 | struct netsec_de *de = dring->vaddr + DESC_SZ * idx; |
---|
604 | 613 | u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | |
---|
.. | .. |
---|
617 | 626 | dring->desc[idx].dma_addr = desc->dma_addr; |
---|
618 | 627 | dring->desc[idx].addr = desc->addr; |
---|
619 | 628 | dring->desc[idx].len = desc->len; |
---|
620 | | - dring->desc[idx].skb = skb; |
---|
621 | 629 | } |
---|
622 | 630 | |
---|
623 | | -static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv, |
---|
624 | | - struct netsec_desc_ring *dring, |
---|
625 | | - u16 idx, |
---|
626 | | - struct netsec_rx_pkt_info *rxpi, |
---|
627 | | - struct netsec_desc *desc, u16 *len) |
---|
628 | | -{ |
---|
629 | | - struct netsec_de de = {}; |
---|
630 | | - |
---|
631 | | - memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ); |
---|
632 | | - |
---|
633 | | - *len = de.buf_len_info >> 16; |
---|
634 | | - |
---|
635 | | - rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1; |
---|
636 | | - rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3; |
---|
637 | | - rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) & |
---|
638 | | - NETSEC_RX_PKT_ERR_MASK; |
---|
639 | | - *desc = dring->desc[idx]; |
---|
640 | | - return desc->skb; |
---|
641 | | -} |
---|
642 | | - |
---|
643 | | -static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, |
---|
644 | | - struct netsec_rx_pkt_info *rxpi, |
---|
645 | | - struct netsec_desc *desc, |
---|
646 | | - u16 *len) |
---|
647 | | -{ |
---|
648 | | - struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; |
---|
649 | | - struct sk_buff *tmp_skb, *skb = NULL; |
---|
650 | | - struct netsec_desc td; |
---|
651 | | - int tail; |
---|
652 | | - |
---|
653 | | - *rxpi = (struct netsec_rx_pkt_info){}; |
---|
654 | | - |
---|
655 | | - td.len = priv->ndev->mtu + 22; |
---|
656 | | - |
---|
657 | | - tmp_skb = netsec_alloc_skb(priv, &td); |
---|
658 | | - |
---|
659 | | - tail = dring->tail; |
---|
660 | | - |
---|
661 | | - if (!tmp_skb) { |
---|
662 | | - netsec_set_rx_de(priv, dring, tail, &dring->desc[tail], |
---|
663 | | - dring->desc[tail].skb); |
---|
664 | | - } else { |
---|
665 | | - skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len); |
---|
666 | | - netsec_set_rx_de(priv, dring, tail, &td, tmp_skb); |
---|
667 | | - } |
---|
668 | | - |
---|
669 | | - /* move tail ahead */ |
---|
670 | | - dring->tail = (dring->tail + 1) % DESC_NUM; |
---|
671 | | - |
---|
672 | | - return skb; |
---|
673 | | -} |
---|
674 | | - |
---|
675 | | -static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget) |
---|
| 631 | +static bool netsec_clean_tx_dring(struct netsec_priv *priv) |
---|
676 | 632 | { |
---|
677 | 633 | struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; |
---|
678 | | - unsigned int pkts, bytes; |
---|
| 634 | + struct netsec_de *entry; |
---|
| 635 | + int tail = dring->tail; |
---|
| 636 | + unsigned int bytes; |
---|
| 637 | + int cnt = 0; |
---|
679 | 638 | |
---|
680 | | - dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); |
---|
| 639 | + spin_lock(&dring->lock); |
---|
681 | 640 | |
---|
682 | | - if (dring->pkt_cnt < budget) |
---|
683 | | - budget = dring->pkt_cnt; |
---|
684 | | - |
---|
685 | | - pkts = 0; |
---|
686 | 641 | bytes = 0; |
---|
| 642 | + entry = dring->vaddr + DESC_SZ * tail; |
---|
687 | 643 | |
---|
688 | | - while (pkts < budget) { |
---|
| 644 | + while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && |
---|
| 645 | + cnt < DESC_NUM) { |
---|
689 | 646 | struct netsec_desc *desc; |
---|
690 | | - struct netsec_de *entry; |
---|
691 | | - int tail, eop; |
---|
| 647 | + int eop; |
---|
692 | 648 | |
---|
693 | | - tail = dring->tail; |
---|
| 649 | + desc = &dring->desc[tail]; |
---|
| 650 | + eop = (entry->attr >> NETSEC_TX_LAST) & 1; |
---|
| 651 | + dma_rmb(); |
---|
694 | 652 | |
---|
| 653 | + /* if buf_type is either TYPE_NETSEC_SKB or |
---|
| 654 | + * TYPE_NETSEC_XDP_NDO we mapped it |
---|
| 655 | + */ |
---|
| 656 | + if (desc->buf_type != TYPE_NETSEC_XDP_TX) |
---|
| 657 | + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, |
---|
| 658 | + DMA_TO_DEVICE); |
---|
| 659 | + |
---|
| 660 | + if (!eop) |
---|
| 661 | + goto next; |
---|
| 662 | + |
---|
| 663 | + if (desc->buf_type == TYPE_NETSEC_SKB) { |
---|
| 664 | + bytes += desc->skb->len; |
---|
| 665 | + dev_kfree_skb(desc->skb); |
---|
| 666 | + } else { |
---|
| 667 | + bytes += desc->xdpf->len; |
---|
| 668 | + xdp_return_frame(desc->xdpf); |
---|
| 669 | + } |
---|
| 670 | +next: |
---|
| 671 | + /* clean up so netsec_uninit_pkt_dring() won't free the skb |
---|
| 672 | + * again |
---|
| 673 | + */ |
---|
| 674 | + *desc = (struct netsec_desc){}; |
---|
| 675 | + |
---|
| 676 | + /* entry->attr is not going to be accessed by the NIC until |
---|
| 677 | + * netsec_set_tx_de() is called. No need for a dma_wmb() here |
---|
| 678 | + */ |
---|
| 679 | + entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; |
---|
695 | 680 | /* move tail ahead */ |
---|
696 | 681 | dring->tail = (tail + 1) % DESC_NUM; |
---|
697 | 682 | |
---|
698 | | - desc = &dring->desc[tail]; |
---|
| 683 | + tail = dring->tail; |
---|
699 | 684 | entry = dring->vaddr + DESC_SZ * tail; |
---|
700 | | - |
---|
701 | | - eop = (entry->attr >> NETSEC_TX_LAST) & 1; |
---|
702 | | - |
---|
703 | | - dma_unmap_single(priv->dev, desc->dma_addr, desc->len, |
---|
704 | | - DMA_TO_DEVICE); |
---|
705 | | - if (eop) { |
---|
706 | | - pkts++; |
---|
707 | | - bytes += desc->skb->len; |
---|
708 | | - dev_kfree_skb(desc->skb); |
---|
709 | | - } |
---|
710 | | - *desc = (struct netsec_desc){}; |
---|
| 685 | + cnt++; |
---|
711 | 686 | } |
---|
712 | | - dring->pkt_cnt -= budget; |
---|
713 | 687 | |
---|
714 | | - priv->ndev->stats.tx_packets += budget; |
---|
| 688 | + spin_unlock(&dring->lock); |
---|
| 689 | + |
---|
| 690 | + if (!cnt) |
---|
| 691 | + return false; |
---|
| 692 | + |
---|
| 693 | + /* reading the register clears the irq */ |
---|
| 694 | + netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); |
---|
| 695 | + |
---|
| 696 | + priv->ndev->stats.tx_packets += cnt; |
---|
715 | 697 | priv->ndev->stats.tx_bytes += bytes; |
---|
716 | 698 | |
---|
717 | | - netdev_completed_queue(priv->ndev, budget, bytes); |
---|
| 699 | + netdev_completed_queue(priv->ndev, cnt, bytes); |
---|
718 | 700 | |
---|
719 | | - return budget; |
---|
| 701 | + return true; |
---|
720 | 702 | } |
---|
721 | 703 | |
---|
722 | | -static int netsec_process_tx(struct netsec_priv *priv, int budget) |
---|
| 704 | +static void netsec_process_tx(struct netsec_priv *priv) |
---|
723 | 705 | { |
---|
724 | 706 | struct net_device *ndev = priv->ndev; |
---|
725 | | - int new, done = 0; |
---|
| 707 | + bool cleaned; |
---|
726 | 708 | |
---|
727 | | - do { |
---|
728 | | - new = netsec_clean_tx_dring(priv, budget); |
---|
729 | | - done += new; |
---|
730 | | - budget -= new; |
---|
731 | | - } while (new); |
---|
| 709 | + cleaned = netsec_clean_tx_dring(priv); |
---|
732 | 710 | |
---|
733 | | - if (done && netif_queue_stopped(ndev)) |
---|
| 711 | + if (cleaned && netif_queue_stopped(ndev)) { |
---|
| 712 | + /* Make sure we update the value, anyone stopping the queue |
---|
| 713 | + * after this will read the proper consumer idx |
---|
| 714 | + */ |
---|
| 715 | + smp_wmb(); |
---|
734 | 716 | netif_wake_queue(ndev); |
---|
735 | | - |
---|
736 | | - return done; |
---|
| 717 | + } |
---|
737 | 718 | } |
---|
738 | 719 | |
---|
739 | | -static int netsec_process_rx(struct netsec_priv *priv, int budget) |
---|
| 720 | +static void *netsec_alloc_rx_data(struct netsec_priv *priv, |
---|
| 721 | + dma_addr_t *dma_handle, u16 *desc_len) |
---|
| 722 | + |
---|
| 723 | +{ |
---|
| 724 | + |
---|
| 725 | + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; |
---|
| 726 | + struct page *page; |
---|
| 727 | + |
---|
| 728 | + page = page_pool_dev_alloc_pages(dring->page_pool); |
---|
| 729 | + if (!page) |
---|
| 730 | + return NULL; |
---|
| 731 | + |
---|
| 732 | + /* We allocate the same buffer length for XDP and non-XDP cases. |
---|
| 733 | + * page_pool API will map the whole page, skip what's needed for |
---|
| 734 | + * network payloads and/or XDP |
---|
| 735 | + */ |
---|
| 736 | + *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM; |
---|
| 737 | + /* Make sure the incoming payload fits in the page for XDP and non-XDP |
---|
| 738 | + * cases and reserve enough space for headroom + skb_shared_info |
---|
| 739 | + */ |
---|
| 740 | + *desc_len = NETSEC_RX_BUF_SIZE; |
---|
| 741 | + |
---|
| 742 | + return page_address(page); |
---|
| 743 | +} |
---|
| 744 | + |
---|
| 745 | +static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) |
---|
740 | 746 | { |
---|
741 | 747 | struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; |
---|
742 | | - struct net_device *ndev = priv->ndev; |
---|
743 | | - struct netsec_rx_pkt_info rx_info; |
---|
744 | | - int done = 0; |
---|
745 | | - struct netsec_desc desc; |
---|
746 | | - struct sk_buff *skb; |
---|
747 | | - u16 len; |
---|
| 748 | + u16 idx = from; |
---|
748 | 749 | |
---|
749 | | - while (done < budget) { |
---|
750 | | - u16 idx = dring->tail; |
---|
751 | | - struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); |
---|
752 | | - |
---|
753 | | - if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { |
---|
754 | | - /* reading the register clears the irq */ |
---|
755 | | - netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); |
---|
756 | | - break; |
---|
757 | | - } |
---|
758 | | - |
---|
759 | | - /* This barrier is needed to keep us from reading |
---|
760 | | - * any other fields out of the netsec_de until we have |
---|
761 | | - * verified the descriptor has been written back |
---|
762 | | - */ |
---|
763 | | - dma_rmb(); |
---|
764 | | - done++; |
---|
765 | | - skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len); |
---|
766 | | - if (unlikely(!skb) || rx_info.err_flag) { |
---|
767 | | - netif_err(priv, drv, priv->ndev, |
---|
768 | | - "%s: rx fail err(%d)\n", |
---|
769 | | - __func__, rx_info.err_code); |
---|
770 | | - ndev->stats.rx_dropped++; |
---|
771 | | - continue; |
---|
772 | | - } |
---|
773 | | - |
---|
774 | | - dma_unmap_single(priv->dev, desc.dma_addr, desc.len, |
---|
775 | | - DMA_FROM_DEVICE); |
---|
776 | | - skb_put(skb, len); |
---|
777 | | - skb->protocol = eth_type_trans(skb, priv->ndev); |
---|
778 | | - |
---|
779 | | - if (priv->rx_cksum_offload_flag && |
---|
780 | | - rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) |
---|
781 | | - skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
782 | | - |
---|
783 | | - if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) { |
---|
784 | | - ndev->stats.rx_packets++; |
---|
785 | | - ndev->stats.rx_bytes += len; |
---|
786 | | - } |
---|
| 750 | + while (num) { |
---|
| 751 | + netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); |
---|
| 752 | + idx++; |
---|
| 753 | + if (idx >= DESC_NUM) |
---|
| 754 | + idx = 0; |
---|
| 755 | + num--; |
---|
787 | 756 | } |
---|
788 | | - |
---|
789 | | - return done; |
---|
790 | 757 | } |
---|
791 | 758 | |
---|
792 | | -static int netsec_napi_poll(struct napi_struct *napi, int budget) |
---|
| 759 | +static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts) |
---|
793 | 760 | { |
---|
794 | | - struct netsec_priv *priv; |
---|
795 | | - int tx, rx, done, todo; |
---|
| 761 | + if (likely(pkts)) |
---|
| 762 | + netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts); |
---|
| 763 | +} |
---|
796 | 764 | |
---|
797 | | - priv = container_of(napi, struct netsec_priv, napi); |
---|
| 765 | +static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, |
---|
| 766 | + u16 pkts) |
---|
| 767 | +{ |
---|
| 768 | + if (xdp_res & NETSEC_XDP_REDIR) |
---|
| 769 | + xdp_do_flush_map(); |
---|
798 | 770 | |
---|
799 | | - todo = budget; |
---|
800 | | - do { |
---|
801 | | - if (!todo) |
---|
802 | | - break; |
---|
803 | | - |
---|
804 | | - tx = netsec_process_tx(priv, todo); |
---|
805 | | - todo -= tx; |
---|
806 | | - |
---|
807 | | - if (!todo) |
---|
808 | | - break; |
---|
809 | | - |
---|
810 | | - rx = netsec_process_rx(priv, todo); |
---|
811 | | - todo -= rx; |
---|
812 | | - } while (rx || tx); |
---|
813 | | - |
---|
814 | | - done = budget - todo; |
---|
815 | | - |
---|
816 | | - if (done < budget && napi_complete_done(napi, done)) { |
---|
817 | | - unsigned long flags; |
---|
818 | | - |
---|
819 | | - spin_lock_irqsave(&priv->reglock, flags); |
---|
820 | | - netsec_write(priv, NETSEC_REG_INTEN_SET, |
---|
821 | | - NETSEC_IRQ_RX | NETSEC_IRQ_TX); |
---|
822 | | - spin_unlock_irqrestore(&priv->reglock, flags); |
---|
823 | | - } |
---|
824 | | - |
---|
825 | | - return done; |
---|
| 771 | + if (xdp_res & NETSEC_XDP_TX) |
---|
| 772 | + netsec_xdp_ring_tx_db(priv, pkts); |
---|
826 | 773 | } |
---|
827 | 774 | |
---|
828 | 775 | static void netsec_set_tx_de(struct netsec_priv *priv, |
---|
829 | 776 | struct netsec_desc_ring *dring, |
---|
830 | 777 | const struct netsec_tx_pkt_ctrl *tx_ctrl, |
---|
831 | | - const struct netsec_desc *desc, |
---|
832 | | - struct sk_buff *skb) |
---|
| 778 | + const struct netsec_desc *desc, void *buf) |
---|
833 | 779 | { |
---|
834 | 780 | int idx = dring->head; |
---|
835 | 781 | struct netsec_de *de; |
---|
.. | .. |
---|
852 | 798 | de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); |
---|
853 | 799 | de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; |
---|
854 | 800 | de->attr = attr; |
---|
855 | | - dma_wmb(); |
---|
856 | 801 | |
---|
857 | 802 | dring->desc[idx] = *desc; |
---|
858 | | - dring->desc[idx].skb = skb; |
---|
| 803 | + if (desc->buf_type == TYPE_NETSEC_SKB) |
---|
| 804 | + dring->desc[idx].skb = buf; |
---|
| 805 | + else if (desc->buf_type == TYPE_NETSEC_XDP_TX || |
---|
| 806 | + desc->buf_type == TYPE_NETSEC_XDP_NDO) |
---|
| 807 | + dring->desc[idx].xdpf = buf; |
---|
859 | 808 | |
---|
860 | 809 | /* move head ahead */ |
---|
861 | 810 | dring->head = (dring->head + 1) % DESC_NUM; |
---|
| 811 | +} |
---|
| 812 | + |
---|
| 813 | +/* The current driver only supports 1 Txq, this should run under spin_lock() */ |
---|
| 814 | +static u32 netsec_xdp_queue_one(struct netsec_priv *priv, |
---|
| 815 | + struct xdp_frame *xdpf, bool is_ndo) |
---|
| 816 | + |
---|
| 817 | +{ |
---|
| 818 | + struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; |
---|
| 819 | + struct page *page = virt_to_page(xdpf->data); |
---|
| 820 | + struct netsec_tx_pkt_ctrl tx_ctrl = {}; |
---|
| 821 | + struct netsec_desc tx_desc; |
---|
| 822 | + dma_addr_t dma_handle; |
---|
| 823 | + u16 filled; |
---|
| 824 | + |
---|
| 825 | + if (tx_ring->head >= tx_ring->tail) |
---|
| 826 | + filled = tx_ring->head - tx_ring->tail; |
---|
| 827 | + else |
---|
| 828 | + filled = tx_ring->head + DESC_NUM - tx_ring->tail; |
---|
| 829 | + |
---|
| 830 | + if (DESC_NUM - filled <= 1) |
---|
| 831 | + return NETSEC_XDP_CONSUMED; |
---|
| 832 | + |
---|
| 833 | + if (is_ndo) { |
---|
| 834 | + /* this is for ndo_xdp_xmit, the buffer needs mapping before |
---|
| 835 | + * sending |
---|
| 836 | + */ |
---|
| 837 | + dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len, |
---|
| 838 | + DMA_TO_DEVICE); |
---|
| 839 | + if (dma_mapping_error(priv->dev, dma_handle)) |
---|
| 840 | + return NETSEC_XDP_CONSUMED; |
---|
| 841 | + tx_desc.buf_type = TYPE_NETSEC_XDP_NDO; |
---|
| 842 | + } else { |
---|
| 843 | + /* This is the device Rx buffer from page_pool. No need to remap |
---|
| 844 | + * just sync and send it |
---|
| 845 | + */ |
---|
| 846 | + struct netsec_desc_ring *rx_ring = |
---|
| 847 | + &priv->desc_ring[NETSEC_RING_RX]; |
---|
| 848 | + enum dma_data_direction dma_dir = |
---|
| 849 | + page_pool_get_dma_dir(rx_ring->page_pool); |
---|
| 850 | + |
---|
| 851 | + dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + |
---|
| 852 | + sizeof(*xdpf); |
---|
| 853 | + dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, |
---|
| 854 | + dma_dir); |
---|
| 855 | + tx_desc.buf_type = TYPE_NETSEC_XDP_TX; |
---|
| 856 | + } |
---|
| 857 | + |
---|
| 858 | + tx_desc.dma_addr = dma_handle; |
---|
| 859 | + tx_desc.addr = xdpf->data; |
---|
| 860 | + tx_desc.len = xdpf->len; |
---|
| 861 | + |
---|
| 862 | + netdev_sent_queue(priv->ndev, xdpf->len); |
---|
| 863 | + netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf); |
---|
| 864 | + |
---|
| 865 | + return NETSEC_XDP_TX; |
---|
| 866 | +} |
---|
| 867 | + |
---|
| 868 | +static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) |
---|
| 869 | +{ |
---|
| 870 | + struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; |
---|
| 871 | + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); |
---|
| 872 | + u32 ret; |
---|
| 873 | + |
---|
| 874 | + if (unlikely(!xdpf)) |
---|
| 875 | + return NETSEC_XDP_CONSUMED; |
---|
| 876 | + |
---|
| 877 | + spin_lock(&tx_ring->lock); |
---|
| 878 | + ret = netsec_xdp_queue_one(priv, xdpf, false); |
---|
| 879 | + spin_unlock(&tx_ring->lock); |
---|
| 880 | + |
---|
| 881 | + return ret; |
---|
| 882 | +} |
---|
| 883 | + |
---|
| 884 | +static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, |
---|
| 885 | + struct xdp_buff *xdp) |
---|
| 886 | +{ |
---|
| 887 | + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; |
---|
| 888 | + unsigned int sync, len = xdp->data_end - xdp->data; |
---|
| 889 | + u32 ret = NETSEC_XDP_PASS; |
---|
| 890 | + struct page *page; |
---|
| 891 | + int err; |
---|
| 892 | + u32 act; |
---|
| 893 | + |
---|
| 894 | + act = bpf_prog_run_xdp(prog, xdp); |
---|
| 895 | + |
---|
| 896 | + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ |
---|
| 897 | + sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; |
---|
| 898 | + sync = max(sync, len); |
---|
| 899 | + |
---|
| 900 | + switch (act) { |
---|
| 901 | + case XDP_PASS: |
---|
| 902 | + ret = NETSEC_XDP_PASS; |
---|
| 903 | + break; |
---|
| 904 | + case XDP_TX: |
---|
| 905 | + ret = netsec_xdp_xmit_back(priv, xdp); |
---|
| 906 | + if (ret != NETSEC_XDP_TX) { |
---|
| 907 | + page = virt_to_head_page(xdp->data); |
---|
| 908 | + page_pool_put_page(dring->page_pool, page, sync, true); |
---|
| 909 | + } |
---|
| 910 | + break; |
---|
| 911 | + case XDP_REDIRECT: |
---|
| 912 | + err = xdp_do_redirect(priv->ndev, xdp, prog); |
---|
| 913 | + if (!err) { |
---|
| 914 | + ret = NETSEC_XDP_REDIR; |
---|
| 915 | + } else { |
---|
| 916 | + ret = NETSEC_XDP_CONSUMED; |
---|
| 917 | + page = virt_to_head_page(xdp->data); |
---|
| 918 | + page_pool_put_page(dring->page_pool, page, sync, true); |
---|
| 919 | + } |
---|
| 920 | + break; |
---|
| 921 | + default: |
---|
| 922 | + bpf_warn_invalid_xdp_action(act); |
---|
| 923 | + fallthrough; |
---|
| 924 | + case XDP_ABORTED: |
---|
| 925 | + trace_xdp_exception(priv->ndev, prog, act); |
---|
| 926 | + fallthrough; /* handle aborts by dropping packet */ |
---|
| 927 | + case XDP_DROP: |
---|
| 928 | + ret = NETSEC_XDP_CONSUMED; |
---|
| 929 | + page = virt_to_head_page(xdp->data); |
---|
| 930 | + page_pool_put_page(dring->page_pool, page, sync, true); |
---|
| 931 | + break; |
---|
| 932 | + } |
---|
| 933 | + |
---|
| 934 | + return ret; |
---|
| 935 | +} |
---|
| 936 | + |
---|
| 937 | +static int netsec_process_rx(struct netsec_priv *priv, int budget) |
---|
| 938 | +{ |
---|
| 939 | + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; |
---|
| 940 | + struct net_device *ndev = priv->ndev; |
---|
| 941 | + struct netsec_rx_pkt_info rx_info; |
---|
| 942 | + enum dma_data_direction dma_dir; |
---|
| 943 | + struct bpf_prog *xdp_prog; |
---|
| 944 | + struct xdp_buff xdp; |
---|
| 945 | + u16 xdp_xmit = 0; |
---|
| 946 | + u32 xdp_act = 0; |
---|
| 947 | + int done = 0; |
---|
| 948 | + |
---|
| 949 | + xdp.rxq = &dring->xdp_rxq; |
---|
| 950 | + xdp.frame_sz = PAGE_SIZE; |
---|
| 951 | + |
---|
| 952 | + rcu_read_lock(); |
---|
| 953 | + xdp_prog = READ_ONCE(priv->xdp_prog); |
---|
| 954 | + dma_dir = page_pool_get_dma_dir(dring->page_pool); |
---|
| 955 | + |
---|
| 956 | + while (done < budget) { |
---|
| 957 | + u16 idx = dring->tail; |
---|
| 958 | + struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); |
---|
| 959 | + struct netsec_desc *desc = &dring->desc[idx]; |
---|
| 960 | + struct page *page = virt_to_page(desc->addr); |
---|
| 961 | + u32 xdp_result = NETSEC_XDP_PASS; |
---|
| 962 | + struct sk_buff *skb = NULL; |
---|
| 963 | + u16 pkt_len, desc_len; |
---|
| 964 | + dma_addr_t dma_handle; |
---|
| 965 | + void *buf_addr; |
---|
| 966 | + |
---|
| 967 | + if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { |
---|
| 968 | + /* reading the register clears the irq */ |
---|
| 969 | + netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); |
---|
| 970 | + break; |
---|
| 971 | + } |
---|
| 972 | + |
---|
| 973 | + /* This barrier is needed to keep us from reading |
---|
| 974 | + * any other fields out of the netsec_de until we have |
---|
| 975 | + * verified the descriptor has been written back |
---|
| 976 | + */ |
---|
| 977 | + dma_rmb(); |
---|
| 978 | + done++; |
---|
| 979 | + |
---|
| 980 | + pkt_len = de->buf_len_info >> 16; |
---|
| 981 | + rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & |
---|
| 982 | + NETSEC_RX_PKT_ERR_MASK; |
---|
| 983 | + rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; |
---|
| 984 | + if (rx_info.err_flag) { |
---|
| 985 | + netif_err(priv, drv, priv->ndev, |
---|
| 986 | + "%s: rx fail err(%d)\n", __func__, |
---|
| 987 | + rx_info.err_code); |
---|
| 988 | + ndev->stats.rx_dropped++; |
---|
| 989 | + dring->tail = (dring->tail + 1) % DESC_NUM; |
---|
| 990 | + /* reuse buffer page frag */ |
---|
| 991 | + netsec_rx_fill(priv, idx, 1); |
---|
| 992 | + continue; |
---|
| 993 | + } |
---|
| 994 | + rx_info.rx_cksum_result = |
---|
| 995 | + (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; |
---|
| 996 | + |
---|
| 997 | + /* allocate a fresh buffer and map it to the hardware. |
---|
| 998 | + * This will eventually replace the old buffer in the hardware |
---|
| 999 | + */ |
---|
| 1000 | + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); |
---|
| 1001 | + |
---|
| 1002 | + if (unlikely(!buf_addr)) |
---|
| 1003 | + break; |
---|
| 1004 | + |
---|
| 1005 | + dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, |
---|
| 1006 | + dma_dir); |
---|
| 1007 | + prefetch(desc->addr); |
---|
| 1008 | + |
---|
| 1009 | + xdp.data_hard_start = desc->addr; |
---|
| 1010 | + xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM; |
---|
| 1011 | + xdp_set_data_meta_invalid(&xdp); |
---|
| 1012 | + xdp.data_end = xdp.data + pkt_len; |
---|
| 1013 | + |
---|
| 1014 | + if (xdp_prog) { |
---|
| 1015 | + xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); |
---|
| 1016 | + if (xdp_result != NETSEC_XDP_PASS) { |
---|
| 1017 | + xdp_act |= xdp_result; |
---|
| 1018 | + if (xdp_result == NETSEC_XDP_TX) |
---|
| 1019 | + xdp_xmit++; |
---|
| 1020 | + goto next; |
---|
| 1021 | + } |
---|
| 1022 | + } |
---|
| 1023 | + skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA); |
---|
| 1024 | + |
---|
| 1025 | + if (unlikely(!skb)) { |
---|
| 1026 | + /* If skb fails recycle_direct will either unmap and |
---|
| 1027 | + * free the page or refill the cache depending on the |
---|
| 1028 | + * cache state. Since we paid the allocation cost if |
---|
| 1029 | + * building an skb fails try to put the page into cache |
---|
| 1030 | + */ |
---|
| 1031 | + page_pool_put_page(dring->page_pool, page, pkt_len, |
---|
| 1032 | + true); |
---|
| 1033 | + netif_err(priv, drv, priv->ndev, |
---|
| 1034 | + "rx failed to build skb\n"); |
---|
| 1035 | + break; |
---|
| 1036 | + } |
---|
| 1037 | + page_pool_release_page(dring->page_pool, page); |
---|
| 1038 | + |
---|
| 1039 | + skb_reserve(skb, xdp.data - xdp.data_hard_start); |
---|
| 1040 | + skb_put(skb, xdp.data_end - xdp.data); |
---|
| 1041 | + skb->protocol = eth_type_trans(skb, priv->ndev); |
---|
| 1042 | + |
---|
| 1043 | + if (priv->rx_cksum_offload_flag && |
---|
| 1044 | + rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) |
---|
| 1045 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
| 1046 | + |
---|
| 1047 | +next: |
---|
| 1048 | + if (skb) |
---|
| 1049 | + napi_gro_receive(&priv->napi, skb); |
---|
| 1050 | + if (skb || xdp_result) { |
---|
| 1051 | + ndev->stats.rx_packets++; |
---|
| 1052 | + ndev->stats.rx_bytes += xdp.data_end - xdp.data; |
---|
| 1053 | + } |
---|
| 1054 | + |
---|
| 1055 | + /* Update the descriptor with fresh buffers */ |
---|
| 1056 | + desc->len = desc_len; |
---|
| 1057 | + desc->dma_addr = dma_handle; |
---|
| 1058 | + desc->addr = buf_addr; |
---|
| 1059 | + |
---|
| 1060 | + netsec_rx_fill(priv, idx, 1); |
---|
| 1061 | + dring->tail = (dring->tail + 1) % DESC_NUM; |
---|
| 1062 | + } |
---|
| 1063 | + netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit); |
---|
| 1064 | + |
---|
| 1065 | + rcu_read_unlock(); |
---|
| 1066 | + |
---|
| 1067 | + return done; |
---|
| 1068 | +} |
---|
| 1069 | + |
---|
| 1070 | +static int netsec_napi_poll(struct napi_struct *napi, int budget) |
---|
| 1071 | +{ |
---|
| 1072 | + struct netsec_priv *priv; |
---|
| 1073 | + int done; |
---|
| 1074 | + |
---|
| 1075 | + priv = container_of(napi, struct netsec_priv, napi); |
---|
| 1076 | + |
---|
| 1077 | + netsec_process_tx(priv); |
---|
| 1078 | + done = netsec_process_rx(priv, budget); |
---|
| 1079 | + |
---|
| 1080 | + if (done < budget && napi_complete_done(napi, done)) { |
---|
| 1081 | + unsigned long flags; |
---|
| 1082 | + |
---|
| 1083 | + spin_lock_irqsave(&priv->reglock, flags); |
---|
| 1084 | + netsec_write(priv, NETSEC_REG_INTEN_SET, |
---|
| 1085 | + NETSEC_IRQ_RX | NETSEC_IRQ_TX); |
---|
| 1086 | + spin_unlock_irqrestore(&priv->reglock, flags); |
---|
| 1087 | + } |
---|
| 1088 | + |
---|
| 1089 | + return done; |
---|
| 1090 | +} |
---|
| 1091 | + |
---|
| 1092 | + |
---|
| 1093 | +static int netsec_desc_used(struct netsec_desc_ring *dring) |
---|
| 1094 | +{ |
---|
| 1095 | + int used; |
---|
| 1096 | + |
---|
| 1097 | + if (dring->head >= dring->tail) |
---|
| 1098 | + used = dring->head - dring->tail; |
---|
| 1099 | + else |
---|
| 1100 | + used = dring->head + DESC_NUM - dring->tail; |
---|
| 1101 | + |
---|
| 1102 | + return used; |
---|
| 1103 | +} |
---|
| 1104 | + |
---|
| 1105 | +static int netsec_check_stop_tx(struct netsec_priv *priv, int used) |
---|
| 1106 | +{ |
---|
| 1107 | + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; |
---|
| 1108 | + |
---|
| 1109 | + /* keep tail from touching the queue */ |
---|
| 1110 | + if (DESC_NUM - used < 2) { |
---|
| 1111 | + netif_stop_queue(priv->ndev); |
---|
| 1112 | + |
---|
| 1113 | + /* Make sure we read the updated value in case |
---|
| 1114 | + * descriptors got freed |
---|
| 1115 | + */ |
---|
| 1116 | + smp_rmb(); |
---|
| 1117 | + |
---|
| 1118 | + used = netsec_desc_used(dring); |
---|
| 1119 | + if (DESC_NUM - used < 2) |
---|
| 1120 | + return NETDEV_TX_BUSY; |
---|
| 1121 | + |
---|
| 1122 | + netif_wake_queue(priv->ndev); |
---|
| 1123 | + } |
---|
| 1124 | + |
---|
| 1125 | + return 0; |
---|
862 | 1126 | } |
---|
863 | 1127 | |
---|
864 | 1128 | static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, |
---|
.. | .. |
---|
871 | 1135 | u16 tso_seg_len = 0; |
---|
872 | 1136 | int filled; |
---|
873 | 1137 | |
---|
874 | | - /* differentiate between full/emtpy ring */ |
---|
875 | | - if (dring->head >= dring->tail) |
---|
876 | | - filled = dring->head - dring->tail; |
---|
877 | | - else |
---|
878 | | - filled = dring->head + DESC_NUM - dring->tail; |
---|
879 | | - |
---|
880 | | - if (DESC_NUM - filled < 2) { /* if less than 2 available */ |
---|
881 | | - netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__); |
---|
882 | | - netif_stop_queue(priv->ndev); |
---|
883 | | - dma_wmb(); |
---|
| 1138 | + spin_lock_bh(&dring->lock); |
---|
| 1139 | + filled = netsec_desc_used(dring); |
---|
| 1140 | + if (netsec_check_stop_tx(priv, filled)) { |
---|
| 1141 | + spin_unlock_bh(&dring->lock); |
---|
| 1142 | + net_warn_ratelimited("%s %s Tx queue full\n", |
---|
| 1143 | + dev_name(priv->dev), ndev->name); |
---|
884 | 1144 | return NETDEV_TX_BUSY; |
---|
885 | 1145 | } |
---|
886 | 1146 | |
---|
.. | .. |
---|
897 | 1157 | ~tcp_v4_check(0, ip_hdr(skb)->saddr, |
---|
898 | 1158 | ip_hdr(skb)->daddr, 0); |
---|
899 | 1159 | } else { |
---|
900 | | - ipv6_hdr(skb)->payload_len = 0; |
---|
901 | | - tcp_hdr(skb)->check = |
---|
902 | | - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
---|
903 | | - &ipv6_hdr(skb)->daddr, |
---|
904 | | - 0, IPPROTO_TCP, 0); |
---|
| 1160 | + tcp_v6_gso_csum_prep(skb); |
---|
905 | 1161 | } |
---|
906 | 1162 | |
---|
907 | 1163 | tx_ctrl.tcp_seg_offload_flag = true; |
---|
.. | .. |
---|
911 | 1167 | tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, |
---|
912 | 1168 | skb_headlen(skb), DMA_TO_DEVICE); |
---|
913 | 1169 | if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { |
---|
| 1170 | + spin_unlock_bh(&dring->lock); |
---|
914 | 1171 | netif_err(priv, drv, priv->ndev, |
---|
915 | 1172 | "%s: DMA mapping failed\n", __func__); |
---|
916 | 1173 | ndev->stats.tx_dropped++; |
---|
.. | .. |
---|
919 | 1176 | } |
---|
920 | 1177 | tx_desc.addr = skb->data; |
---|
921 | 1178 | tx_desc.len = skb_headlen(skb); |
---|
| 1179 | + tx_desc.buf_type = TYPE_NETSEC_SKB; |
---|
922 | 1180 | |
---|
923 | 1181 | skb_tx_timestamp(skb); |
---|
924 | 1182 | netdev_sent_queue(priv->ndev, skb->len); |
---|
925 | 1183 | |
---|
926 | 1184 | netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); |
---|
| 1185 | + spin_unlock_bh(&dring->lock); |
---|
927 | 1186 | netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ |
---|
928 | 1187 | |
---|
929 | 1188 | return NETDEV_TX_OK; |
---|
.. | .. |
---|
937 | 1196 | |
---|
938 | 1197 | if (!dring->vaddr || !dring->desc) |
---|
939 | 1198 | return; |
---|
940 | | - |
---|
941 | 1199 | for (idx = 0; idx < DESC_NUM; idx++) { |
---|
942 | 1200 | desc = &dring->desc[idx]; |
---|
943 | 1201 | if (!desc->addr) |
---|
944 | 1202 | continue; |
---|
945 | 1203 | |
---|
946 | | - dma_unmap_single(priv->dev, desc->dma_addr, desc->len, |
---|
947 | | - id == NETSEC_RING_RX ? DMA_FROM_DEVICE : |
---|
948 | | - DMA_TO_DEVICE); |
---|
949 | | - dev_kfree_skb(desc->skb); |
---|
| 1204 | + if (id == NETSEC_RING_RX) { |
---|
| 1205 | + struct page *page = virt_to_page(desc->addr); |
---|
| 1206 | + |
---|
| 1207 | + page_pool_put_full_page(dring->page_pool, page, false); |
---|
| 1208 | + } else if (id == NETSEC_RING_TX) { |
---|
| 1209 | + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, |
---|
| 1210 | + DMA_TO_DEVICE); |
---|
| 1211 | + dev_kfree_skb(desc->skb); |
---|
| 1212 | + } |
---|
| 1213 | + } |
---|
| 1214 | + |
---|
| 1215 | + /* Rx is currently using page_pool */ |
---|
| 1216 | + if (id == NETSEC_RING_RX) { |
---|
| 1217 | + if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) |
---|
| 1218 | + xdp_rxq_info_unreg(&dring->xdp_rxq); |
---|
| 1219 | + page_pool_destroy(dring->page_pool); |
---|
950 | 1220 | } |
---|
951 | 1221 | |
---|
952 | 1222 | memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); |
---|
.. | .. |
---|
954 | 1224 | |
---|
955 | 1225 | dring->head = 0; |
---|
956 | 1226 | dring->tail = 0; |
---|
957 | | - dring->pkt_cnt = 0; |
---|
958 | 1227 | |
---|
959 | 1228 | if (id == NETSEC_RING_TX) |
---|
960 | 1229 | netdev_reset_queue(priv->ndev); |
---|
.. | .. |
---|
977 | 1246 | static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) |
---|
978 | 1247 | { |
---|
979 | 1248 | struct netsec_desc_ring *dring = &priv->desc_ring[id]; |
---|
980 | | - int ret = 0; |
---|
981 | 1249 | |
---|
982 | | - dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, |
---|
983 | | - &dring->desc_dma, GFP_KERNEL); |
---|
984 | | - if (!dring->vaddr) { |
---|
985 | | - ret = -ENOMEM; |
---|
| 1250 | + dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, |
---|
| 1251 | + &dring->desc_dma, GFP_KERNEL); |
---|
| 1252 | + if (!dring->vaddr) |
---|
986 | 1253 | goto err; |
---|
987 | | - } |
---|
988 | 1254 | |
---|
989 | 1255 | dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); |
---|
990 | | - if (!dring->desc) { |
---|
991 | | - ret = -ENOMEM; |
---|
| 1256 | + if (!dring->desc) |
---|
992 | 1257 | goto err; |
---|
993 | | - } |
---|
994 | 1258 | |
---|
995 | 1259 | return 0; |
---|
996 | 1260 | err: |
---|
997 | 1261 | netsec_free_dring(priv, id); |
---|
998 | 1262 | |
---|
999 | | - return ret; |
---|
| 1263 | + return -ENOMEM; |
---|
| 1264 | +} |
---|
| 1265 | + |
---|
| 1266 | +static void netsec_setup_tx_dring(struct netsec_priv *priv) |
---|
| 1267 | +{ |
---|
| 1268 | + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; |
---|
| 1269 | + int i; |
---|
| 1270 | + |
---|
| 1271 | + for (i = 0; i < DESC_NUM; i++) { |
---|
| 1272 | + struct netsec_de *de; |
---|
| 1273 | + |
---|
| 1274 | + de = dring->vaddr + (DESC_SZ * i); |
---|
| 1275 | + /* de->attr is not going to be accessed by the NIC |
---|
| 1276 | + * until netsec_set_tx_de() is called. |
---|
| 1277 | + * No need for a dma_wmb() here |
---|
| 1278 | + */ |
---|
| 1279 | + de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; |
---|
| 1280 | + } |
---|
1000 | 1281 | } |
---|
1001 | 1282 | |
---|
1002 | 1283 | static int netsec_setup_rx_dring(struct netsec_priv *priv) |
---|
1003 | 1284 | { |
---|
1004 | 1285 | struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; |
---|
1005 | | - struct netsec_desc desc; |
---|
1006 | | - struct sk_buff *skb; |
---|
1007 | | - int n; |
---|
| 1286 | + struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); |
---|
| 1287 | + struct page_pool_params pp_params = { |
---|
| 1288 | + .order = 0, |
---|
| 1289 | + /* internal DMA mapping in page_pool */ |
---|
| 1290 | + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, |
---|
| 1291 | + .pool_size = DESC_NUM, |
---|
| 1292 | + .nid = NUMA_NO_NODE, |
---|
| 1293 | + .dev = priv->dev, |
---|
| 1294 | + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, |
---|
| 1295 | + .offset = NETSEC_RXBUF_HEADROOM, |
---|
| 1296 | + .max_len = NETSEC_RX_BUF_SIZE, |
---|
| 1297 | + }; |
---|
| 1298 | + int i, err; |
---|
1008 | 1299 | |
---|
1009 | | - desc.len = priv->ndev->mtu + 22; |
---|
1010 | | - |
---|
1011 | | - for (n = 0; n < DESC_NUM; n++) { |
---|
1012 | | - skb = netsec_alloc_skb(priv, &desc); |
---|
1013 | | - if (!skb) { |
---|
1014 | | - netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); |
---|
1015 | | - return -ENOMEM; |
---|
1016 | | - } |
---|
1017 | | - netsec_set_rx_de(priv, dring, n, &desc, skb); |
---|
| 1300 | + dring->page_pool = page_pool_create(&pp_params); |
---|
| 1301 | + if (IS_ERR(dring->page_pool)) { |
---|
| 1302 | + err = PTR_ERR(dring->page_pool); |
---|
| 1303 | + dring->page_pool = NULL; |
---|
| 1304 | + goto err_out; |
---|
1018 | 1305 | } |
---|
1019 | 1306 | |
---|
| 1307 | + err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0); |
---|
| 1308 | + if (err) |
---|
| 1309 | + goto err_out; |
---|
| 1310 | + |
---|
| 1311 | + err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL, |
---|
| 1312 | + dring->page_pool); |
---|
| 1313 | + if (err) |
---|
| 1314 | + goto err_out; |
---|
| 1315 | + |
---|
| 1316 | + for (i = 0; i < DESC_NUM; i++) { |
---|
| 1317 | + struct netsec_desc *desc = &dring->desc[i]; |
---|
| 1318 | + dma_addr_t dma_handle; |
---|
| 1319 | + void *buf; |
---|
| 1320 | + u16 len; |
---|
| 1321 | + |
---|
| 1322 | + buf = netsec_alloc_rx_data(priv, &dma_handle, &len); |
---|
| 1323 | + |
---|
| 1324 | + if (!buf) { |
---|
| 1325 | + err = -ENOMEM; |
---|
| 1326 | + goto err_out; |
---|
| 1327 | + } |
---|
| 1328 | + desc->dma_addr = dma_handle; |
---|
| 1329 | + desc->addr = buf; |
---|
| 1330 | + desc->len = len; |
---|
| 1331 | + } |
---|
| 1332 | + |
---|
| 1333 | + netsec_rx_fill(priv, 0, DESC_NUM); |
---|
| 1334 | + |
---|
1020 | 1335 | return 0; |
---|
| 1336 | + |
---|
| 1337 | +err_out: |
---|
| 1338 | + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); |
---|
| 1339 | + return err; |
---|
1021 | 1340 | } |
---|
1022 | 1341 | |
---|
1023 | 1342 | static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, |
---|
.. | .. |
---|
1291 | 1610 | |
---|
1292 | 1611 | pm_runtime_get_sync(priv->dev); |
---|
1293 | 1612 | |
---|
| 1613 | + netsec_setup_tx_dring(priv); |
---|
1294 | 1614 | ret = netsec_setup_rx_dring(priv); |
---|
1295 | 1615 | if (ret) { |
---|
1296 | 1616 | netif_err(priv, probe, priv->ndev, |
---|
.. | .. |
---|
1377 | 1697 | int ret; |
---|
1378 | 1698 | u16 data; |
---|
1379 | 1699 | |
---|
| 1700 | + BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM); |
---|
| 1701 | + |
---|
1380 | 1702 | ret = netsec_alloc_dring(priv, NETSEC_RING_TX); |
---|
1381 | 1703 | if (ret) |
---|
1382 | 1704 | return ret; |
---|
.. | .. |
---|
1396 | 1718 | |
---|
1397 | 1719 | /* Restore phy power state */ |
---|
1398 | 1720 | netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); |
---|
| 1721 | + |
---|
| 1722 | + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); |
---|
| 1723 | + spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); |
---|
1399 | 1724 | |
---|
1400 | 1725 | return 0; |
---|
1401 | 1726 | err2: |
---|
.. | .. |
---|
1423 | 1748 | return 0; |
---|
1424 | 1749 | } |
---|
1425 | 1750 | |
---|
1426 | | -static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr, |
---|
1427 | | - int cmd) |
---|
| 1751 | +static int netsec_xdp_xmit(struct net_device *ndev, int n, |
---|
| 1752 | + struct xdp_frame **frames, u32 flags) |
---|
1428 | 1753 | { |
---|
1429 | | - return phy_mii_ioctl(ndev->phydev, ifr, cmd); |
---|
| 1754 | + struct netsec_priv *priv = netdev_priv(ndev); |
---|
| 1755 | + struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; |
---|
| 1756 | + int drops = 0; |
---|
| 1757 | + int i; |
---|
| 1758 | + |
---|
| 1759 | + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
---|
| 1760 | + return -EINVAL; |
---|
| 1761 | + |
---|
| 1762 | + spin_lock(&tx_ring->lock); |
---|
| 1763 | + for (i = 0; i < n; i++) { |
---|
| 1764 | + struct xdp_frame *xdpf = frames[i]; |
---|
| 1765 | + int err; |
---|
| 1766 | + |
---|
| 1767 | + err = netsec_xdp_queue_one(priv, xdpf, true); |
---|
| 1768 | + if (err != NETSEC_XDP_TX) { |
---|
| 1769 | + xdp_return_frame_rx_napi(xdpf); |
---|
| 1770 | + drops++; |
---|
| 1771 | + } else { |
---|
| 1772 | + tx_ring->xdp_xmit++; |
---|
| 1773 | + } |
---|
| 1774 | + } |
---|
| 1775 | + spin_unlock(&tx_ring->lock); |
---|
| 1776 | + |
---|
| 1777 | + if (unlikely(flags & XDP_XMIT_FLUSH)) { |
---|
| 1778 | + netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit); |
---|
| 1779 | + tx_ring->xdp_xmit = 0; |
---|
| 1780 | + } |
---|
| 1781 | + |
---|
| 1782 | + return n - drops; |
---|
| 1783 | +} |
---|
| 1784 | + |
---|
| 1785 | +static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog, |
---|
| 1786 | + struct netlink_ext_ack *extack) |
---|
| 1787 | +{ |
---|
| 1788 | + struct net_device *dev = priv->ndev; |
---|
| 1789 | + struct bpf_prog *old_prog; |
---|
| 1790 | + |
---|
| 1791 | + /* For now just support only the usual MTU sized frames */ |
---|
| 1792 | + if (prog && dev->mtu > 1500) { |
---|
| 1793 | + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); |
---|
| 1794 | + return -EOPNOTSUPP; |
---|
| 1795 | + } |
---|
| 1796 | + |
---|
| 1797 | + if (netif_running(dev)) |
---|
| 1798 | + netsec_netdev_stop(dev); |
---|
| 1799 | + |
---|
| 1800 | + /* Detach old prog, if any */ |
---|
| 1801 | + old_prog = xchg(&priv->xdp_prog, prog); |
---|
| 1802 | + if (old_prog) |
---|
| 1803 | + bpf_prog_put(old_prog); |
---|
| 1804 | + |
---|
| 1805 | + if (netif_running(dev)) |
---|
| 1806 | + netsec_netdev_open(dev); |
---|
| 1807 | + |
---|
| 1808 | + return 0; |
---|
| 1809 | +} |
---|
| 1810 | + |
---|
| 1811 | +static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) |
---|
| 1812 | +{ |
---|
| 1813 | + struct netsec_priv *priv = netdev_priv(ndev); |
---|
| 1814 | + |
---|
| 1815 | + switch (xdp->command) { |
---|
| 1816 | + case XDP_SETUP_PROG: |
---|
| 1817 | + return netsec_xdp_setup(priv, xdp->prog, xdp->extack); |
---|
| 1818 | + default: |
---|
| 1819 | + return -EINVAL; |
---|
| 1820 | + } |
---|
1430 | 1821 | } |
---|
1431 | 1822 | |
---|
1432 | 1823 | static const struct net_device_ops netsec_netdev_ops = { |
---|
.. | .. |
---|
1438 | 1829 | .ndo_set_features = netsec_netdev_set_features, |
---|
1439 | 1830 | .ndo_set_mac_address = eth_mac_addr, |
---|
1440 | 1831 | .ndo_validate_addr = eth_validate_addr, |
---|
1441 | | - .ndo_do_ioctl = netsec_netdev_ioctl, |
---|
| 1832 | + .ndo_do_ioctl = phy_do_ioctl, |
---|
| 1833 | + .ndo_xdp_xmit = netsec_xdp_xmit, |
---|
| 1834 | + .ndo_bpf = netsec_xdp, |
---|
1442 | 1835 | }; |
---|
1443 | 1836 | |
---|
1444 | 1837 | static int netsec_of_probe(struct platform_device *pdev, |
---|
1445 | 1838 | struct netsec_priv *priv, u32 *phy_addr) |
---|
1446 | 1839 | { |
---|
| 1840 | + int err; |
---|
| 1841 | + |
---|
| 1842 | + err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); |
---|
| 1843 | + if (err) { |
---|
| 1844 | + dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); |
---|
| 1845 | + return err; |
---|
| 1846 | + } |
---|
| 1847 | + |
---|
| 1848 | + /* |
---|
| 1849 | + * SynQuacer is physically configured with TX and RX delays |
---|
| 1850 | + * but the standard firmware claimed otherwise for a long |
---|
| 1851 | + * time, ignore it. |
---|
| 1852 | + */ |
---|
| 1853 | + if (of_machine_is_compatible("socionext,developer-box") && |
---|
| 1854 | + priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { |
---|
| 1855 | + dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); |
---|
| 1856 | + priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; |
---|
| 1857 | + } |
---|
| 1858 | + |
---|
1447 | 1859 | priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
---|
1448 | 1860 | if (!priv->phy_np) { |
---|
1449 | 1861 | dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); |
---|
.. | .. |
---|
1469 | 1881 | |
---|
1470 | 1882 | if (!IS_ENABLED(CONFIG_ACPI)) |
---|
1471 | 1883 | return -ENODEV; |
---|
| 1884 | + |
---|
| 1885 | + /* ACPI systems are assumed to configure the PHY in firmware, so |
---|
| 1886 | + * there is really no need to discover the PHY mode from the DSDT. |
---|
| 1887 | + * Since firmware is known to exist in the field that configures the |
---|
| 1888 | + * PHY correctly but passes the wrong mode string in the phy-mode |
---|
| 1889 | + * device property, we have no choice but to ignore it. |
---|
| 1890 | + */ |
---|
| 1891 | + priv->phy_interface = PHY_INTERFACE_MODE_NA; |
---|
1472 | 1892 | |
---|
1473 | 1893 | ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); |
---|
1474 | 1894 | if (ret) { |
---|
.. | .. |
---|
1549 | 1969 | ret = PTR_ERR(priv->phydev); |
---|
1550 | 1970 | dev_err(priv->dev, "get_phy_device err(%d)\n", ret); |
---|
1551 | 1971 | priv->phydev = NULL; |
---|
| 1972 | + mdiobus_unregister(bus); |
---|
1552 | 1973 | return -ENODEV; |
---|
1553 | 1974 | } |
---|
1554 | 1975 | |
---|
1555 | 1976 | ret = phy_device_register(priv->phydev); |
---|
1556 | 1977 | if (ret) { |
---|
| 1978 | + phy_device_free(priv->phydev); |
---|
1557 | 1979 | mdiobus_unregister(bus); |
---|
1558 | 1980 | dev_err(priv->dev, |
---|
1559 | 1981 | "phy_device_register err(%d)\n", ret); |
---|
.. | .. |
---|
1605 | 2027 | |
---|
1606 | 2028 | priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | |
---|
1607 | 2029 | NETIF_MSG_LINK | NETIF_MSG_PROBE; |
---|
1608 | | - |
---|
1609 | | - priv->phy_interface = device_get_phy_mode(&pdev->dev); |
---|
1610 | | - if ((int)priv->phy_interface < 0) { |
---|
1611 | | - dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); |
---|
1612 | | - ret = -ENODEV; |
---|
1613 | | - goto free_ndev; |
---|
1614 | | - } |
---|
1615 | 2030 | |
---|
1616 | 2031 | priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, |
---|
1617 | 2032 | resource_size(mmio_res)); |
---|