From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 19 Dec 2024 01:47:39 +0000
Subject: [PATCH] add wifi6 8852be driver

---
 kernel/drivers/net/ethernet/emulex/benet/be_main.c |  308 ++++++++++++++------------------------------------
 1 files changed, 89 insertions(+), 219 deletions(-)

diff --git a/kernel/drivers/net/ethernet/emulex/benet/be_main.c b/kernel/drivers/net/ethernet/emulex/benet/be_main.c
index 05cb2f7..52b399a 100644
--- a/kernel/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/kernel/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.  The full GNU General
- * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -25,8 +21,7 @@
 #include <net/busy_poll.h>
 #include <net/vxlan.h>
 
-MODULE_VERSION(DRV_VER);
-MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
+MODULE_DESCRIPTION(DRV_DESC);
 MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
@@ -167,8 +162,8 @@
 	q->len = len;
 	q->entry_size = entry_size;
 	mem->size = len * entry_size;
-	mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
-				      GFP_KERNEL);
+	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+				     &mem->dma, GFP_KERNEL);
 	if (!mem->va)
 		return -ENOMEM;
 	return 0;
@@ -796,7 +791,7 @@
 	u16 vlan_tag;
 
 	vlan_tag = skb_vlan_tag_get(skb);
-	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+	vlan_prio = skb_vlan_tag_get_prio(skb);
 	/* If vlan priority provided by OS is NOT in available bmap */
 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
@@ -1018,7 +1013,7 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 		len = skb_frag_size(frag);
 
 		busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
@@ -1049,30 +1044,35 @@
 					     struct be_wrb_params
 					     *wrb_params)
 {
+	bool insert_vlan = false;
 	u16 vlan_tag = 0;
 
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (unlikely(!skb))
 		return skb;
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+		insert_vlan = true;
+	}
 
 	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
-		if (!vlan_tag)
+		if (!insert_vlan) {
 			vlan_tag = adapter->pvid;
+			insert_vlan = true;
+		}
 		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
 		 * skip VLAN insertion
 		 */
 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
 	}
 
-	if (vlan_tag) {
+	if (insert_vlan) {
 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
 						vlan_tag);
 		if (unlikely(!skb))
 			return skb;
-		skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(skb);
 	}
 
 	/* Insert the outer VLAN, if any */
@@ -1136,10 +1136,11 @@
 	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
 						VLAN_ETH_HLEN : ETH_HLEN;
 	if (skb->len <= 60 &&
-	    (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
-	    is_ipv4_pkt(skb)) {
+	    (lancer_chip(adapter) || BE3_chip(adapter) ||
+	     skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
 		ip = (struct iphdr *)ip_hdr(skb);
-		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+		if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
+			goto tx_drop;
 	}
 
 	/* If vlan tag is already inlined in the packet, skip HW VLAN
@@ -1265,10 +1266,6 @@
 #define is_arp_allowed_on_bmc(adapter, skb)	\
 	(is_arp(skb) && is_arp_filt_enabled(adapter))
 
-#define is_broadcast_packet(eh, adapter)	\
-		(is_multicast_ether_addr(eh->h_dest) && \
-		!compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
-
 #define is_arp(skb)	(skb->protocol == htons(ETH_P_ARP))
 
 #define is_arp_filt_enabled(adapter)	\
@@ -1375,7 +1372,7 @@
 	u16 q_idx = skb_get_queue_mapping(skb);
 	struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
 	struct be_wrb_params wrb_params = { 0 };
-	bool flush = !skb->xmit_more;
+	bool flush = !netdev_xmit_more();
 	u16 wrb_cnt;
 
 	skb = be_xmit_workarounds(adapter, skb, &wrb_params);
@@ -1420,7 +1417,7 @@
 	return NETDEV_TX_OK;
 }
 
-static void be_tx_timeout(struct net_device *netdev)
+static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct device *dev = &adapter->pdev->dev;
@@ -2150,7 +2147,7 @@
 	int i;
 
 	aic = &adapter->aic_obj[eqo->idx];
-	if (!aic->enable) {
+	if (!adapter->aic_enabled) {
 		if (aic->jiffies)
 			aic->jiffies = 0;
 		eqd = aic->et_eqd;
@@ -2207,7 +2204,7 @@
 	int eqd;
 	u32 mult_enc;
 
-	if (!aic->enable)
+	if (!adapter->aic_enabled)
 		return 0;
 
 	if (jiffies_to_msecs(now - aic->jiffies) < 1)
@@ -2349,8 +2346,8 @@
 		memcpy(skb->data, start, hdr_len);
 		skb_shinfo(skb)->nr_frags = 1;
 		skb_frag_set_page(skb, 0, page_info->page);
-		skb_shinfo(skb)->frags[0].page_offset =
-					page_info->page_offset + hdr_len;
+		skb_frag_off_set(&skb_shinfo(skb)->frags[0],
+				 page_info->page_offset + hdr_len);
 		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
 				  curr_frag_len - hdr_len);
 		skb->data_len = curr_frag_len - hdr_len;
@@ -2375,8 +2372,8 @@
 			/* Fresh page */
 			j++;
 			skb_frag_set_page(skb, j, page_info->page);
-			skb_shinfo(skb)->frags[j].page_offset =
-							page_info->page_offset;
+			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+					 page_info->page_offset);
 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
 			skb_shinfo(skb)->nr_frags++;
 		} else {
@@ -2457,8 +2454,8 @@
 			/* First frag or Fresh page */
 			j++;
 			skb_frag_set_page(skb, j, page_info->page);
-			skb_shinfo(skb)->frags[j].page_offset =
-							page_info->page_offset;
+			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+					 page_info->page_offset);
 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
 		} else {
 			put_page(page_info->page);
@@ -2962,6 +2959,8 @@
 				    max(adapter->cfg_num_rx_irqs,
 					adapter->cfg_num_tx_irqs));
 
+	adapter->aic_enabled = true;
+
 	for_all_evt_queues(adapter, eqo, i) {
 		int numa_node = dev_to_node(&adapter->pdev->dev);
 
@@ -2969,7 +2968,6 @@
 		eqo->adapter = adapter;
 		eqo->idx = i;
 		aic->max_eqd = BE_MAX_EQD;
-		aic->enable = true;
 
 		eq = &eqo->q;
 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -3832,8 +3830,8 @@
 		be_link_status_update(adapter, link_status);
 
 	netif_tx_start_all_queues(netdev);
-	if (skyhawk_chip(adapter))
-		udp_tunnel_get_rx_info(netdev);
+
+	udp_tunnel_nic_reset_ntf(netdev);
 
 	return 0;
 err:
@@ -3970,17 +3968,22 @@
 	}
 }
 
-static int be_enable_vxlan_offloads(struct be_adapter *adapter)
+/* VxLAN offload Notes:
+ *
+ * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
+ * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
+ * is expected to work across all types of IP tunnels once exported. Skyhawk
+ * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
+ */
+static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
+			     unsigned int entry, struct udp_tunnel_info *ti)
 {
-	struct net_device *netdev = adapter->netdev;
+	struct be_adapter *adapter = netdev_priv(netdev);
 	struct device *dev = &adapter->pdev->dev;
-	struct be_vxlan_port *vxlan_port;
-	__be16 port;
 	int status;
-
-	vxlan_port = list_first_entry(&adapter->vxlan_port_list,
-				      struct be_vxlan_port, list);
-	port = vxlan_port->port;
 
 	status = be_cmd_manage_iface(adapter, adapter->if_handle,
 				     OP_CONVERT_NORMAL_TO_TUNNEL);
@@ -3990,25 +3993,26 @@
 	}
 	adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
 
-	status = be_cmd_set_vxlan_port(adapter, port);
+	status = be_cmd_set_vxlan_port(adapter, ti->port);
 	if (status) {
 		dev_warn(dev, "Failed to add VxLAN port\n");
 		return status;
 	}
-	adapter->vxlan_port = port;
+	adapter->vxlan_port = ti->port;
 
 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 				   NETIF_F_TSO | NETIF_F_TSO6 |
 				   NETIF_F_GSO_UDP_TUNNEL;
 
 	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
-		 be16_to_cpu(port));
+		 be16_to_cpu(ti->port));
 	return 0;
 }
 
-static void be_disable_vxlan_offloads(struct be_adapter *adapter)
+static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+			       unsigned int entry, struct udp_tunnel_info *ti)
 {
-	struct net_device *netdev = adapter->netdev;
+	struct be_adapter *adapter = netdev_priv(netdev);
 
 	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
 		be_cmd_manage_iface(adapter, adapter->if_handle,
@@ -4021,7 +4025,18 @@
 	adapter->vxlan_port = 0;
 
 	netdev->hw_enc_features = 0;
+	return 0;
 }
+
+static const struct udp_tunnel_nic_info be_udp_tunnels = {
+	.set_port	= be_vxlan_set_port,
+	.unset_port	= be_vxlan_unset_port,
+	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+	.tables		= {
+		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+	},
+};
 
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
 				struct be_resources *vft_res)
@@ -4138,7 +4153,7 @@
 					&vft_res);
 	}
 
-	be_disable_vxlan_offloads(adapter);
+	be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
 
 	be_if_destroy(adapter);
 
@@ -4701,8 +4716,13 @@
 	int status;
 
 	if (netif_running(netdev)) {
+		/* be_tx_timeout() must not run concurrently with this
+		 * function, synchronize with an already-running dev_watchdog
+		 */
+		netif_tx_lock_bh(netdev);
 		/* device cannot transmit now, avoid dev_watchdog timeouts */
 		netif_carrier_off(netdev);
+		netif_tx_unlock_bh(netdev);
 
 		be_close(netdev);
 	}
@@ -4954,7 +4974,7 @@
 }
 
 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
-				 u16 flags)
+				 u16 flags, struct netlink_ext_ack *extack)
 {
 	struct be_adapter *adapter = netdev_priv(dev);
 	struct nlattr *attr, *br_spec;
@@ -5049,147 +5069,6 @@
 	INIT_WORK(&work->work, func);
 	work->adapter = adapter;
 	return work;
-}
-
-/* VxLAN offload Notes:
- *
- * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
- * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
- * is expected to work across all types of IP tunnels once exported. Skyhawk
- * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. If other (non
- * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
- * those other tunnels are unexported on the fly through ndo_features_check().
- *
- * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
- * adds more than one port, disable offloads and re-enable them again when
- * there's only one port left. We maintain a list of ports for this purpose.
- */
-static void be_work_add_vxlan_port(struct work_struct *work)
-{
-	struct be_cmd_work *cmd_work =
-				container_of(work, struct be_cmd_work, work);
-	struct be_adapter *adapter = cmd_work->adapter;
-	struct device *dev = &adapter->pdev->dev;
-	__be16 port = cmd_work->info.vxlan_port;
-	struct be_vxlan_port *vxlan_port;
-	int status;
-
-	/* Bump up the alias count if it is an existing port */
-	list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
-		if (vxlan_port->port == port) {
-			vxlan_port->port_aliases++;
-			goto done;
-		}
-	}
-
-	/* Add a new port to our list. We don't need a lock here since port
-	 * add/delete are done only in the context of a single-threaded work
-	 * queue (be_wq).
-	 */
-	vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
-	if (!vxlan_port)
-		goto done;
-
-	vxlan_port->port = port;
-	INIT_LIST_HEAD(&vxlan_port->list);
-	list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
-	adapter->vxlan_port_count++;
-
-	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
-		dev_info(dev,
-			 "Only one UDP port supported for VxLAN offloads\n");
-		dev_info(dev, "Disabling VxLAN offloads\n");
-		goto err;
-	}
-
-	if (adapter->vxlan_port_count > 1)
-		goto done;
-
-	status = be_enable_vxlan_offloads(adapter);
-	if (!status)
-		goto done;
-
-err:
-	be_disable_vxlan_offloads(adapter);
-done:
-	kfree(cmd_work);
-	return;
-}
-
-static void be_work_del_vxlan_port(struct work_struct *work)
-{
-	struct be_cmd_work *cmd_work =
-				container_of(work, struct be_cmd_work, work);
-	struct be_adapter *adapter = cmd_work->adapter;
-	__be16 port = cmd_work->info.vxlan_port;
-	struct be_vxlan_port *vxlan_port;
-
-	/* Nothing to be done if a port alias is being deleted */
-	list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
-		if (vxlan_port->port == port) {
-			if (vxlan_port->port_aliases) {
-				vxlan_port->port_aliases--;
-				goto done;
-			}
-			break;
-		}
-	}
-
-	/* No port aliases left; delete the port from the list */
-	list_del(&vxlan_port->list);
-	adapter->vxlan_port_count--;
-
-	/* Disable VxLAN offload if this is the offloaded port */
-	if (adapter->vxlan_port == vxlan_port->port) {
-		WARN_ON(adapter->vxlan_port_count);
-		be_disable_vxlan_offloads(adapter);
-		dev_info(&adapter->pdev->dev,
-			 "Disabled VxLAN offloads for UDP port %d\n",
-			 be16_to_cpu(port));
-		goto out;
-	}
-
-	/* If only 1 port is left, re-enable VxLAN offload */
-	if (adapter->vxlan_port_count == 1)
-		be_enable_vxlan_offloads(adapter);
-
-out:
-	kfree(vxlan_port);
-done:
-	kfree(cmd_work);
-}
-
-static void be_cfg_vxlan_port(struct net_device *netdev,
-			      struct udp_tunnel_info *ti,
-			      void (*func)(struct work_struct *))
-{
-	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_cmd_work *cmd_work;
-
-	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-		return;
-
-	if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
-		return;
-
-	cmd_work = be_alloc_work(adapter, func);
-	if (cmd_work) {
-		cmd_work->info.vxlan_port = ti->port;
-		queue_work(be_wq, &cmd_work->work);
-	}
-}
-
-static void be_del_vxlan_port(struct net_device *netdev,
-			      struct udp_tunnel_info *ti)
-{
-	be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
-}
-
-static void be_add_vxlan_port(struct net_device *netdev,
-			      struct udp_tunnel_info *ti)
-{
-	be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
 }
 
 static netdev_features_t be_features_check(struct sk_buff *skb,
@@ -5307,8 +5186,8 @@
 #endif
 	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
 	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
-	.ndo_udp_tunnel_add	= be_add_vxlan_port,
-	.ndo_udp_tunnel_del	= be_del_vxlan_port,
+	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
+	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
 	.ndo_features_check	= be_features_check,
 	.ndo_get_phys_port_id   = be_get_phys_port_id,
 };
@@ -5339,6 +5218,9 @@
 	netdev->netdev_ops = &be_netdev_ops;
 
 	netdev->ethtool_ops = &be_ethtool_ops;
+
+	if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
+		netdev->udp_tunnel_nic_info = &be_udp_tunnels;
 
 	/* MTU range: 256 - 9000 */
 	netdev->min_mtu = BE_MIN_MTU;
@@ -5765,9 +5647,9 @@
 	int status = 0;
 
 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-	mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
-						 &mbox_mem_alloc->dma,
-						 GFP_KERNEL);
+	mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
+						&mbox_mem_alloc->dma,
+						GFP_KERNEL);
 	if (!mbox_mem_alloc->va)
 		return -ENOMEM;
 
@@ -5776,8 +5658,8 @@
 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
 
 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
-	rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
-					    &rx_filter->dma, GFP_KERNEL);
+	rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
+					   &rx_filter->dma, GFP_KERNEL);
 	if (!rx_filter->va) {
 		status = -ENOMEM;
 		goto free_mbox;
@@ -5791,8 +5673,8 @@
 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
 	else
 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
-	stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
-					    &stats_cmd->dma, GFP_KERNEL);
+	stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
+					   &stats_cmd->dma, GFP_KERNEL);
 	if (!stats_cmd->va) {
 		status = -ENOMEM;
 		goto free_rx_filter;
@@ -5819,7 +5701,6 @@
 	/* Must be a power of 2 or else MODULO will BUG_ON */
 	adapter->be_get_temp_freq = 64;
 
-	INIT_LIST_HEAD(&adapter->vxlan_port_list);
 	return 0;
 
 free_rx_filter:
@@ -5948,8 +5829,6 @@
 	struct net_device *netdev;
 	int status = 0;
 
-	dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
-
 	status = pci_enable_device(pdev);
 	if (status)
 		goto do_none;
@@ -6040,31 +5919,22 @@
 	return status;
 }
 
-static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused be_suspend(struct device *dev_d)
 {
-	struct be_adapter *adapter = pci_get_drvdata(pdev);
+	struct be_adapter *adapter = dev_get_drvdata(dev_d);
 
 	be_intr_set(adapter, false);
 	be_cancel_err_detection(adapter);
 
 	be_cleanup(adapter);
 
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 	return 0;
 }
 
-static int be_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused be_pci_resume(struct device *dev_d)
 {
-	struct be_adapter *adapter = pci_get_drvdata(pdev);
+	struct be_adapter *adapter = dev_get_drvdata(dev_d);
 	int status = 0;
-
-	status = pci_enable_device(pdev);
-	if (status)
-		return status;
-
-	pci_restore_state(pdev);
 
 	status = be_resume(adapter);
 	if (status)
@@ -6151,7 +6021,6 @@
 	if (status)
 		return PCI_ERS_RESULT_DISCONNECT;
 
-	pci_cleanup_aer_uncorrect_error_status(pdev);
 	be_clear_error(adapter, BE_CLEAR_ALL);
 	return PCI_ERS_RESULT_RECOVERED;
 }
@@ -6238,13 +6107,14 @@
 	.resume = be_eeh_resume,
 };
 
+static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
+
 static struct pci_driver be_driver = {
 	.name = DRV_NAME,
 	.id_table = be_dev_ids,
 	.probe = be_probe,
 	.remove = be_remove,
-	.suspend = be_suspend,
-	.resume = be_pci_resume,
+	.driver.pm = &be_pci_pm_ops,
 	.shutdown = be_shutdown,
 	.sriov_configure = be_pci_sriov_configure,
 	.err_handler = &be_eeh_handlers

--
Gitblit v1.6.2