forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/net/ethernet/emulex/benet/be_main.c
....@@ -1,11 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2005 - 2016 Broadcom
34 * All rights reserved.
4
- *
5
- * This program is free software; you can redistribute it and/or
6
- * modify it under the terms of the GNU General Public License version 2
7
- * as published by the Free Software Foundation. The full GNU General
8
- * Public License is included in this distribution in the file called COPYING.
95 *
106 * Contact Information:
117 * linux-drivers@emulex.com
....@@ -25,8 +21,7 @@
2521 #include <net/busy_poll.h>
2622 #include <net/vxlan.h>
2723
28
-MODULE_VERSION(DRV_VER);
29
-MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
24
+MODULE_DESCRIPTION(DRV_DESC);
3025 MODULE_AUTHOR("Emulex Corporation");
3126 MODULE_LICENSE("GPL");
3227
....@@ -167,8 +162,8 @@
167162 q->len = len;
168163 q->entry_size = entry_size;
169164 mem->size = len * entry_size;
170
- mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
171
- GFP_KERNEL);
165
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
166
+ &mem->dma, GFP_KERNEL);
172167 if (!mem->va)
173168 return -ENOMEM;
174169 return 0;
....@@ -796,7 +791,7 @@
796791 u16 vlan_tag;
797792
798793 vlan_tag = skb_vlan_tag_get(skb);
799
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
794
+ vlan_prio = skb_vlan_tag_get_prio(skb);
800795 /* If vlan priority provided by OS is NOT in available bmap */
801796 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
802797 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
....@@ -1018,7 +1013,7 @@
10181013 }
10191014
10201015 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1021
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1016
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10221017 len = skb_frag_size(frag);
10231018
10241019 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
....@@ -1049,30 +1044,35 @@
10491044 struct be_wrb_params
10501045 *wrb_params)
10511046 {
1047
+ bool insert_vlan = false;
10521048 u16 vlan_tag = 0;
10531049
10541050 skb = skb_share_check(skb, GFP_ATOMIC);
10551051 if (unlikely(!skb))
10561052 return skb;
10571053
1058
- if (skb_vlan_tag_present(skb))
1054
+ if (skb_vlan_tag_present(skb)) {
10591055 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
1056
+ insert_vlan = true;
1057
+ }
10601058
10611059 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1062
- if (!vlan_tag)
1060
+ if (!insert_vlan) {
10631061 vlan_tag = adapter->pvid;
1062
+ insert_vlan = true;
1063
+ }
10641064 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
10651065 * skip VLAN insertion
10661066 */
10671067 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
10681068 }
10691069
1070
- if (vlan_tag) {
1070
+ if (insert_vlan) {
10711071 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
10721072 vlan_tag);
10731073 if (unlikely(!skb))
10741074 return skb;
1075
- skb->vlan_tci = 0;
1075
+ __vlan_hwaccel_clear_tag(skb);
10761076 }
10771077
10781078 /* Insert the outer VLAN, if any */
....@@ -1265,10 +1265,6 @@
12651265 #define is_arp_allowed_on_bmc(adapter, skb) \
12661266 (is_arp(skb) && is_arp_filt_enabled(adapter))
12671267
1268
-#define is_broadcast_packet(eh, adapter) \
1269
- (is_multicast_ether_addr(eh->h_dest) && \
1270
- !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1271
-
12721268 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
12731269
12741270 #define is_arp_filt_enabled(adapter) \
....@@ -1375,7 +1371,7 @@
13751371 u16 q_idx = skb_get_queue_mapping(skb);
13761372 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
13771373 struct be_wrb_params wrb_params = { 0 };
1378
- bool flush = !skb->xmit_more;
1374
+ bool flush = !netdev_xmit_more();
13791375 u16 wrb_cnt;
13801376
13811377 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
....@@ -1420,7 +1416,7 @@
14201416 return NETDEV_TX_OK;
14211417 }
14221418
1423
-static void be_tx_timeout(struct net_device *netdev)
1419
+static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
14241420 {
14251421 struct be_adapter *adapter = netdev_priv(netdev);
14261422 struct device *dev = &adapter->pdev->dev;
....@@ -2150,7 +2146,7 @@
21502146 int i;
21512147
21522148 aic = &adapter->aic_obj[eqo->idx];
2153
- if (!aic->enable) {
2149
+ if (!adapter->aic_enabled) {
21542150 if (aic->jiffies)
21552151 aic->jiffies = 0;
21562152 eqd = aic->et_eqd;
....@@ -2207,7 +2203,7 @@
22072203 int eqd;
22082204 u32 mult_enc;
22092205
2210
- if (!aic->enable)
2206
+ if (!adapter->aic_enabled)
22112207 return 0;
22122208
22132209 if (jiffies_to_msecs(now - aic->jiffies) < 1)
....@@ -2349,8 +2345,8 @@
23492345 memcpy(skb->data, start, hdr_len);
23502346 skb_shinfo(skb)->nr_frags = 1;
23512347 skb_frag_set_page(skb, 0, page_info->page);
2352
- skb_shinfo(skb)->frags[0].page_offset =
2353
- page_info->page_offset + hdr_len;
2348
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0],
2349
+ page_info->page_offset + hdr_len);
23542350 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
23552351 curr_frag_len - hdr_len);
23562352 skb->data_len = curr_frag_len - hdr_len;
....@@ -2375,8 +2371,8 @@
23752371 /* Fresh page */
23762372 j++;
23772373 skb_frag_set_page(skb, j, page_info->page);
2378
- skb_shinfo(skb)->frags[j].page_offset =
2379
- page_info->page_offset;
2374
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2375
+ page_info->page_offset);
23802376 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
23812377 skb_shinfo(skb)->nr_frags++;
23822378 } else {
....@@ -2457,8 +2453,8 @@
24572453 /* First frag or Fresh page */
24582454 j++;
24592455 skb_frag_set_page(skb, j, page_info->page);
2460
- skb_shinfo(skb)->frags[j].page_offset =
2461
- page_info->page_offset;
2456
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2457
+ page_info->page_offset);
24622458 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
24632459 } else {
24642460 put_page(page_info->page);
....@@ -2962,6 +2958,8 @@
29622958 max(adapter->cfg_num_rx_irqs,
29632959 adapter->cfg_num_tx_irqs));
29642960
2961
+ adapter->aic_enabled = true;
2962
+
29652963 for_all_evt_queues(adapter, eqo, i) {
29662964 int numa_node = dev_to_node(&adapter->pdev->dev);
29672965
....@@ -2969,7 +2967,6 @@
29692967 eqo->adapter = adapter;
29702968 eqo->idx = i;
29712969 aic->max_eqd = BE_MAX_EQD;
2972
- aic->enable = true;
29732970
29742971 eq = &eqo->q;
29752972 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
....@@ -3832,8 +3829,8 @@
38323829 be_link_status_update(adapter, link_status);
38333830
38343831 netif_tx_start_all_queues(netdev);
3835
- if (skyhawk_chip(adapter))
3836
- udp_tunnel_get_rx_info(netdev);
3832
+
3833
+ udp_tunnel_nic_reset_ntf(netdev);
38373834
38383835 return 0;
38393836 err:
....@@ -3970,17 +3967,22 @@
39703967 }
39713968 }
39723969
3973
-static int be_enable_vxlan_offloads(struct be_adapter *adapter)
3970
+/* VxLAN offload Notes:
3971
+ *
3972
+ * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3973
+ * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3974
+ * is expected to work across all types of IP tunnels once exported. Skyhawk
3975
+ * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3976
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3977
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3978
+ * those other tunnels are unexported on the fly through ndo_features_check().
3979
+ */
3980
+static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3981
+ unsigned int entry, struct udp_tunnel_info *ti)
39743982 {
3975
- struct net_device *netdev = adapter->netdev;
3983
+ struct be_adapter *adapter = netdev_priv(netdev);
39763984 struct device *dev = &adapter->pdev->dev;
3977
- struct be_vxlan_port *vxlan_port;
3978
- __be16 port;
39793985 int status;
3980
-
3981
- vxlan_port = list_first_entry(&adapter->vxlan_port_list,
3982
- struct be_vxlan_port, list);
3983
- port = vxlan_port->port;
39843986
39853987 status = be_cmd_manage_iface(adapter, adapter->if_handle,
39863988 OP_CONVERT_NORMAL_TO_TUNNEL);
....@@ -3990,25 +3992,26 @@
39903992 }
39913993 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
39923994
3993
- status = be_cmd_set_vxlan_port(adapter, port);
3995
+ status = be_cmd_set_vxlan_port(adapter, ti->port);
39943996 if (status) {
39953997 dev_warn(dev, "Failed to add VxLAN port\n");
39963998 return status;
39973999 }
3998
- adapter->vxlan_port = port;
4000
+ adapter->vxlan_port = ti->port;
39994001
40004002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
40014003 NETIF_F_TSO | NETIF_F_TSO6 |
40024004 NETIF_F_GSO_UDP_TUNNEL;
40034005
40044006 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4005
- be16_to_cpu(port));
4007
+ be16_to_cpu(ti->port));
40064008 return 0;
40074009 }
40084010
4009
-static void be_disable_vxlan_offloads(struct be_adapter *adapter)
4011
+static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4012
+ unsigned int entry, struct udp_tunnel_info *ti)
40104013 {
4011
- struct net_device *netdev = adapter->netdev;
4014
+ struct be_adapter *adapter = netdev_priv(netdev);
40124015
40134016 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
40144017 be_cmd_manage_iface(adapter, adapter->if_handle,
....@@ -4021,7 +4024,18 @@
40214024 adapter->vxlan_port = 0;
40224025
40234026 netdev->hw_enc_features = 0;
4027
+ return 0;
40244028 }
4029
+
4030
+static const struct udp_tunnel_nic_info be_udp_tunnels = {
4031
+ .set_port = be_vxlan_set_port,
4032
+ .unset_port = be_vxlan_unset_port,
4033
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4034
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4035
+ .tables = {
4036
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4037
+ },
4038
+};
40254039
40264040 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
40274041 struct be_resources *vft_res)
....@@ -4138,7 +4152,7 @@
41384152 &vft_res);
41394153 }
41404154
4141
- be_disable_vxlan_offloads(adapter);
4155
+ be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
41424156
41434157 be_if_destroy(adapter);
41444158
....@@ -4701,8 +4715,13 @@
47014715 int status;
47024716
47034717 if (netif_running(netdev)) {
4718
+ /* be_tx_timeout() must not run concurrently with this
4719
+ * function, synchronize with an already-running dev_watchdog
4720
+ */
4721
+ netif_tx_lock_bh(netdev);
47044722 /* device cannot transmit now, avoid dev_watchdog timeouts */
47054723 netif_carrier_off(netdev);
4724
+ netif_tx_unlock_bh(netdev);
47064725
47074726 be_close(netdev);
47084727 }
....@@ -4954,7 +4973,7 @@
49544973 }
49554974
49564975 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4957
- u16 flags)
4976
+ u16 flags, struct netlink_ext_ack *extack)
49584977 {
49594978 struct be_adapter *adapter = netdev_priv(dev);
49604979 struct nlattr *attr, *br_spec;
....@@ -5049,147 +5068,6 @@
50495068 INIT_WORK(&work->work, func);
50505069 work->adapter = adapter;
50515070 return work;
5052
-}
5053
-
5054
-/* VxLAN offload Notes:
5055
- *
5056
- * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5057
- * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5058
- * is expected to work across all types of IP tunnels once exported. Skyhawk
5059
- * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
5060
- * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5061
- * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5062
- * those other tunnels are unexported on the fly through ndo_features_check().
5063
- *
5064
- * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5065
- * adds more than one port, disable offloads and re-enable them again when
5066
- * there's only one port left. We maintain a list of ports for this purpose.
5067
- */
5068
-static void be_work_add_vxlan_port(struct work_struct *work)
5069
-{
5070
- struct be_cmd_work *cmd_work =
5071
- container_of(work, struct be_cmd_work, work);
5072
- struct be_adapter *adapter = cmd_work->adapter;
5073
- struct device *dev = &adapter->pdev->dev;
5074
- __be16 port = cmd_work->info.vxlan_port;
5075
- struct be_vxlan_port *vxlan_port;
5076
- int status;
5077
-
5078
- /* Bump up the alias count if it is an existing port */
5079
- list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5080
- if (vxlan_port->port == port) {
5081
- vxlan_port->port_aliases++;
5082
- goto done;
5083
- }
5084
- }
5085
-
5086
- /* Add a new port to our list. We don't need a lock here since port
5087
- * add/delete are done only in the context of a single-threaded work
5088
- * queue (be_wq).
5089
- */
5090
- vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
5091
- if (!vxlan_port)
5092
- goto done;
5093
-
5094
- vxlan_port->port = port;
5095
- INIT_LIST_HEAD(&vxlan_port->list);
5096
- list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
5097
- adapter->vxlan_port_count++;
5098
-
5099
- if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
5100
- dev_info(dev,
5101
- "Only one UDP port supported for VxLAN offloads\n");
5102
- dev_info(dev, "Disabling VxLAN offloads\n");
5103
- goto err;
5104
- }
5105
-
5106
- if (adapter->vxlan_port_count > 1)
5107
- goto done;
5108
-
5109
- status = be_enable_vxlan_offloads(adapter);
5110
- if (!status)
5111
- goto done;
5112
-
5113
-err:
5114
- be_disable_vxlan_offloads(adapter);
5115
-done:
5116
- kfree(cmd_work);
5117
- return;
5118
-}
5119
-
5120
-static void be_work_del_vxlan_port(struct work_struct *work)
5121
-{
5122
- struct be_cmd_work *cmd_work =
5123
- container_of(work, struct be_cmd_work, work);
5124
- struct be_adapter *adapter = cmd_work->adapter;
5125
- __be16 port = cmd_work->info.vxlan_port;
5126
- struct be_vxlan_port *vxlan_port;
5127
-
5128
- /* Nothing to be done if a port alias is being deleted */
5129
- list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5130
- if (vxlan_port->port == port) {
5131
- if (vxlan_port->port_aliases) {
5132
- vxlan_port->port_aliases--;
5133
- goto done;
5134
- }
5135
- break;
5136
- }
5137
- }
5138
-
5139
- /* No port aliases left; delete the port from the list */
5140
- list_del(&vxlan_port->list);
5141
- adapter->vxlan_port_count--;
5142
-
5143
- /* Disable VxLAN offload if this is the offloaded port */
5144
- if (adapter->vxlan_port == vxlan_port->port) {
5145
- WARN_ON(adapter->vxlan_port_count);
5146
- be_disable_vxlan_offloads(adapter);
5147
- dev_info(&adapter->pdev->dev,
5148
- "Disabled VxLAN offloads for UDP port %d\n",
5149
- be16_to_cpu(port));
5150
- goto out;
5151
- }
5152
-
5153
- /* If only 1 port is left, re-enable VxLAN offload */
5154
- if (adapter->vxlan_port_count == 1)
5155
- be_enable_vxlan_offloads(adapter);
5156
-
5157
-out:
5158
- kfree(vxlan_port);
5159
-done:
5160
- kfree(cmd_work);
5161
-}
5162
-
5163
-static void be_cfg_vxlan_port(struct net_device *netdev,
5164
- struct udp_tunnel_info *ti,
5165
- void (*func)(struct work_struct *))
5166
-{
5167
- struct be_adapter *adapter = netdev_priv(netdev);
5168
- struct be_cmd_work *cmd_work;
5169
-
5170
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5171
- return;
5172
-
5173
- if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5174
- return;
5175
-
5176
- cmd_work = be_alloc_work(adapter, func);
5177
- if (cmd_work) {
5178
- cmd_work->info.vxlan_port = ti->port;
5179
- queue_work(be_wq, &cmd_work->work);
5180
- }
5181
-}
5182
-
5183
-static void be_del_vxlan_port(struct net_device *netdev,
5184
- struct udp_tunnel_info *ti)
5185
-{
5186
- be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5187
-}
5188
-
5189
-static void be_add_vxlan_port(struct net_device *netdev,
5190
- struct udp_tunnel_info *ti)
5191
-{
5192
- be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
51935071 }
51945072
51955073 static netdev_features_t be_features_check(struct sk_buff *skb,
....@@ -5307,8 +5185,8 @@
53075185 #endif
53085186 .ndo_bridge_setlink = be_ndo_bridge_setlink,
53095187 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5310
- .ndo_udp_tunnel_add = be_add_vxlan_port,
5311
- .ndo_udp_tunnel_del = be_del_vxlan_port,
5188
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
5189
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
53125190 .ndo_features_check = be_features_check,
53135191 .ndo_get_phys_port_id = be_get_phys_port_id,
53145192 };
....@@ -5339,6 +5217,9 @@
53395217 netdev->netdev_ops = &be_netdev_ops;
53405218
53415219 netdev->ethtool_ops = &be_ethtool_ops;
5220
+
5221
+ if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5222
+ netdev->udp_tunnel_nic_info = &be_udp_tunnels;
53425223
53435224 /* MTU range: 256 - 9000 */
53445225 netdev->min_mtu = BE_MIN_MTU;
....@@ -5765,9 +5646,9 @@
57655646 int status = 0;
57665647
57675648 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5768
- mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5769
- &mbox_mem_alloc->dma,
5770
- GFP_KERNEL);
5649
+ mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5650
+ &mbox_mem_alloc->dma,
5651
+ GFP_KERNEL);
57715652 if (!mbox_mem_alloc->va)
57725653 return -ENOMEM;
57735654
....@@ -5776,8 +5657,8 @@
57765657 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
57775658
57785659 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5779
- rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5780
- &rx_filter->dma, GFP_KERNEL);
5660
+ rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5661
+ &rx_filter->dma, GFP_KERNEL);
57815662 if (!rx_filter->va) {
57825663 status = -ENOMEM;
57835664 goto free_mbox;
....@@ -5791,8 +5672,8 @@
57915672 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
57925673 else
57935674 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5794
- stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5795
- &stats_cmd->dma, GFP_KERNEL);
5675
+ stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5676
+ &stats_cmd->dma, GFP_KERNEL);
57965677 if (!stats_cmd->va) {
57975678 status = -ENOMEM;
57985679 goto free_rx_filter;
....@@ -5819,7 +5700,6 @@
58195700 /* Must be a power of 2 or else MODULO will BUG_ON */
58205701 adapter->be_get_temp_freq = 64;
58215702
5822
- INIT_LIST_HEAD(&adapter->vxlan_port_list);
58235703 return 0;
58245704
58255705 free_rx_filter:
....@@ -5948,8 +5828,6 @@
59485828 struct net_device *netdev;
59495829 int status = 0;
59505830
5951
- dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5952
-
59535831 status = pci_enable_device(pdev);
59545832 if (status)
59555833 goto do_none;
....@@ -6040,31 +5918,22 @@
60405918 return status;
60415919 }
60425920
6043
-static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5921
+static int __maybe_unused be_suspend(struct device *dev_d)
60445922 {
6045
- struct be_adapter *adapter = pci_get_drvdata(pdev);
5923
+ struct be_adapter *adapter = dev_get_drvdata(dev_d);
60465924
60475925 be_intr_set(adapter, false);
60485926 be_cancel_err_detection(adapter);
60495927
60505928 be_cleanup(adapter);
60515929
6052
- pci_save_state(pdev);
6053
- pci_disable_device(pdev);
6054
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
60555930 return 0;
60565931 }
60575932
6058
-static int be_pci_resume(struct pci_dev *pdev)
5933
+static int __maybe_unused be_pci_resume(struct device *dev_d)
60595934 {
6060
- struct be_adapter *adapter = pci_get_drvdata(pdev);
5935
+ struct be_adapter *adapter = dev_get_drvdata(dev_d);
60615936 int status = 0;
6062
-
6063
- status = pci_enable_device(pdev);
6064
- if (status)
6065
- return status;
6066
-
6067
- pci_restore_state(pdev);
60685937
60695938 status = be_resume(adapter);
60705939 if (status)
....@@ -6151,7 +6020,6 @@
61516020 if (status)
61526021 return PCI_ERS_RESULT_DISCONNECT;
61536022
6154
- pci_cleanup_aer_uncorrect_error_status(pdev);
61556023 be_clear_error(adapter, BE_CLEAR_ALL);
61566024 return PCI_ERS_RESULT_RECOVERED;
61576025 }
....@@ -6238,13 +6106,14 @@
62386106 .resume = be_eeh_resume,
62396107 };
62406108
6109
+static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6110
+
62416111 static struct pci_driver be_driver = {
62426112 .name = DRV_NAME,
62436113 .id_table = be_dev_ids,
62446114 .probe = be_probe,
62456115 .remove = be_remove,
6246
- .suspend = be_suspend,
6247
- .resume = be_pci_resume,
6116
+ .driver.pm = &be_pci_pm_ops,
62486117 .shutdown = be_shutdown,
62496118 .sriov_configure = be_pci_sriov_configure,
62506119 .err_handler = &be_eeh_handlers