.. | .. |
---|
5 | 5 | |
---|
6 | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
7 | 7 | |
---|
| 8 | +#include <generated/utsrelease.h> |
---|
8 | 9 | #include "ice.h" |
---|
| 10 | +#include "ice_base.h" |
---|
| 11 | +#include "ice_lib.h" |
---|
| 12 | +#include "ice_fltr.h" |
---|
| 13 | +#include "ice_dcb_lib.h" |
---|
| 14 | +#include "ice_dcb_nl.h" |
---|
| 15 | +#include "ice_devlink.h" |
---|
9 | 16 | |
---|
10 | | -#define DRV_VERSION "0.7.1-k" |
---|
11 | 17 | #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" |
---|
12 | | -const char ice_drv_ver[] = DRV_VERSION; |
---|
13 | 18 | static const char ice_driver_string[] = DRV_SUMMARY; |
---|
14 | 19 | static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; |
---|
15 | 20 | |
---|
| 21 | +/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ |
---|
| 22 | +#define ICE_DDP_PKG_PATH "intel/ice/ddp/" |
---|
| 23 | +#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" |
---|
| 24 | + |
---|
16 | 25 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
---|
17 | 26 | MODULE_DESCRIPTION(DRV_SUMMARY); |
---|
18 | | -MODULE_LICENSE("GPL"); |
---|
19 | | -MODULE_VERSION(DRV_VERSION); |
---|
| 27 | +MODULE_LICENSE("GPL v2"); |
---|
| 28 | +MODULE_FIRMWARE(ICE_DDP_PKG_FILE); |
---|
20 | 29 | |
---|
21 | 30 | static int debug = -1; |
---|
22 | 31 | module_param(debug, int, 0644); |
---|
.. | .. |
---|
27 | 36 | #endif /* !CONFIG_DYNAMIC_DEBUG */ |
---|
28 | 37 | |
---|
29 | 38 | static struct workqueue_struct *ice_wq; |
---|
| 39 | +static const struct net_device_ops ice_netdev_safe_mode_ops; |
---|
30 | 40 | static const struct net_device_ops ice_netdev_ops; |
---|
| 41 | +static int ice_vsi_open(struct ice_vsi *vsi); |
---|
31 | 42 | |
---|
32 | | -static void ice_pf_dis_all_vsi(struct ice_pf *pf); |
---|
33 | | -static void ice_rebuild(struct ice_pf *pf); |
---|
34 | | -static int ice_vsi_release(struct ice_vsi *vsi); |
---|
35 | | -static void ice_update_vsi_stats(struct ice_vsi *vsi); |
---|
36 | | -static void ice_update_pf_stats(struct ice_pf *pf); |
---|
| 43 | +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); |
---|
| 44 | + |
---|
| 45 | +static void ice_vsi_release_all(struct ice_pf *pf); |
---|
37 | 46 | |
---|
38 | 47 | /** |
---|
39 | | - * ice_get_free_slot - get the next non-NULL location index in array |
---|
40 | | - * @array: array to search |
---|
41 | | - * @size: size of the array |
---|
42 | | - * @curr: last known occupied index to be used as a search hint |
---|
43 | | - * |
---|
44 | | - * void * is being used to keep the functionality generic. This lets us use this |
---|
45 | | - * function on any array of pointers. |
---|
| 48 | + * ice_get_tx_pending - returns number of Tx descriptors not processed |
---|
| 49 | + * @ring: the ring of descriptors |
---|
46 | 50 | */ |
---|
47 | | -static int ice_get_free_slot(void *array, int size, int curr) |
---|
| 51 | +static u16 ice_get_tx_pending(struct ice_ring *ring) |
---|
48 | 52 | { |
---|
49 | | - int **tmp_array = (int **)array; |
---|
50 | | - int next; |
---|
| 53 | + u16 head, tail; |
---|
51 | 54 | |
---|
52 | | - if (curr < (size - 1) && !tmp_array[curr + 1]) { |
---|
53 | | - next = curr + 1; |
---|
54 | | - } else { |
---|
55 | | - int i = 0; |
---|
| 55 | + head = ring->next_to_clean; |
---|
| 56 | + tail = ring->next_to_use; |
---|
56 | 57 | |
---|
57 | | - while ((i < size) && (tmp_array[i])) |
---|
58 | | - i++; |
---|
59 | | - if (i == size) |
---|
60 | | - next = ICE_NO_VSI; |
---|
61 | | - else |
---|
62 | | - next = i; |
---|
63 | | - } |
---|
64 | | - return next; |
---|
65 | | -} |
---|
66 | | - |
---|
67 | | -/** |
---|
68 | | - * ice_search_res - Search the tracker for a block of resources |
---|
69 | | - * @res: pointer to the resource |
---|
70 | | - * @needed: size of the block needed |
---|
71 | | - * @id: identifier to track owner |
---|
72 | | - * Returns the base item index of the block, or -ENOMEM for error |
---|
73 | | - */ |
---|
74 | | -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) |
---|
75 | | -{ |
---|
76 | | - int start = res->search_hint; |
---|
77 | | - int end = start; |
---|
78 | | - |
---|
79 | | - id |= ICE_RES_VALID_BIT; |
---|
80 | | - |
---|
81 | | - do { |
---|
82 | | - /* skip already allocated entries */ |
---|
83 | | - if (res->list[end++] & ICE_RES_VALID_BIT) { |
---|
84 | | - start = end; |
---|
85 | | - if ((start + needed) > res->num_entries) |
---|
86 | | - break; |
---|
87 | | - } |
---|
88 | | - |
---|
89 | | - if (end == (start + needed)) { |
---|
90 | | - int i = start; |
---|
91 | | - |
---|
92 | | - /* there was enough, so assign it to the requestor */ |
---|
93 | | - while (i != end) |
---|
94 | | - res->list[i++] = id; |
---|
95 | | - |
---|
96 | | - if (end == res->num_entries) |
---|
97 | | - end = 0; |
---|
98 | | - |
---|
99 | | - res->search_hint = end; |
---|
100 | | - return start; |
---|
101 | | - } |
---|
102 | | - } while (1); |
---|
103 | | - |
---|
104 | | - return -ENOMEM; |
---|
105 | | -} |
---|
106 | | - |
---|
107 | | -/** |
---|
108 | | - * ice_get_res - get a block of resources |
---|
109 | | - * @pf: board private structure |
---|
110 | | - * @res: pointer to the resource |
---|
111 | | - * @needed: size of the block needed |
---|
112 | | - * @id: identifier to track owner |
---|
113 | | - * |
---|
114 | | - * Returns the base item index of the block, or -ENOMEM for error |
---|
115 | | - * The search_hint trick and lack of advanced fit-finding only works |
---|
116 | | - * because we're highly likely to have all the same sized requests. |
---|
117 | | - * Linear search time and any fragmentation should be minimal. |
---|
118 | | - */ |
---|
119 | | -static int |
---|
120 | | -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) |
---|
121 | | -{ |
---|
122 | | - int ret; |
---|
123 | | - |
---|
124 | | - if (!res || !pf) |
---|
125 | | - return -EINVAL; |
---|
126 | | - |
---|
127 | | - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { |
---|
128 | | - dev_err(&pf->pdev->dev, |
---|
129 | | - "param err: needed=%d, num_entries = %d id=0x%04x\n", |
---|
130 | | - needed, res->num_entries, id); |
---|
131 | | - return -EINVAL; |
---|
132 | | - } |
---|
133 | | - |
---|
134 | | - /* search based on search_hint */ |
---|
135 | | - ret = ice_search_res(res, needed, id); |
---|
136 | | - |
---|
137 | | - if (ret < 0) { |
---|
138 | | - /* previous search failed. Reset search hint and try again */ |
---|
139 | | - res->search_hint = 0; |
---|
140 | | - ret = ice_search_res(res, needed, id); |
---|
141 | | - } |
---|
142 | | - |
---|
143 | | - return ret; |
---|
144 | | -} |
---|
145 | | - |
---|
146 | | -/** |
---|
147 | | - * ice_free_res - free a block of resources |
---|
148 | | - * @res: pointer to the resource |
---|
149 | | - * @index: starting index previously returned by ice_get_res |
---|
150 | | - * @id: identifier to track owner |
---|
151 | | - * Returns number of resources freed |
---|
152 | | - */ |
---|
153 | | -static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) |
---|
154 | | -{ |
---|
155 | | - int count = 0; |
---|
156 | | - int i; |
---|
157 | | - |
---|
158 | | - if (!res || index >= res->num_entries) |
---|
159 | | - return -EINVAL; |
---|
160 | | - |
---|
161 | | - id |= ICE_RES_VALID_BIT; |
---|
162 | | - for (i = index; i < res->num_entries && res->list[i] == id; i++) { |
---|
163 | | - res->list[i] = 0; |
---|
164 | | - count++; |
---|
165 | | - } |
---|
166 | | - |
---|
167 | | - return count; |
---|
168 | | -} |
---|
169 | | - |
---|
170 | | -/** |
---|
171 | | - * ice_add_mac_to_list - Add a mac address filter entry to the list |
---|
172 | | - * @vsi: the VSI to be forwarded to |
---|
173 | | - * @add_list: pointer to the list which contains MAC filter entries |
---|
174 | | - * @macaddr: the MAC address to be added. |
---|
175 | | - * |
---|
176 | | - * Adds mac address filter entry to the temp list |
---|
177 | | - * |
---|
178 | | - * Returns 0 on success or ENOMEM on failure. |
---|
179 | | - */ |
---|
180 | | -static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, |
---|
181 | | - const u8 *macaddr) |
---|
182 | | -{ |
---|
183 | | - struct ice_fltr_list_entry *tmp; |
---|
184 | | - struct ice_pf *pf = vsi->back; |
---|
185 | | - |
---|
186 | | - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); |
---|
187 | | - if (!tmp) |
---|
188 | | - return -ENOMEM; |
---|
189 | | - |
---|
190 | | - tmp->fltr_info.flag = ICE_FLTR_TX; |
---|
191 | | - tmp->fltr_info.src = vsi->vsi_num; |
---|
192 | | - tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; |
---|
193 | | - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; |
---|
194 | | - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; |
---|
195 | | - ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); |
---|
196 | | - |
---|
197 | | - INIT_LIST_HEAD(&tmp->list_entry); |
---|
198 | | - list_add(&tmp->list_entry, add_list); |
---|
199 | | - |
---|
| 58 | + if (head != tail) |
---|
| 59 | + return (head < tail) ? |
---|
| 60 | + tail - head : (tail + ring->count - head); |
---|
200 | 61 | return 0; |
---|
201 | 62 | } |
---|
202 | 63 | |
---|
203 | 64 | /** |
---|
204 | | - * ice_add_mac_to_sync_list - creates list of mac addresses to be synced |
---|
| 65 | + * ice_check_for_hang_subtask - check for and recover hung queues |
---|
| 66 | + * @pf: pointer to PF struct |
---|
| 67 | + */ |
---|
| 68 | +static void ice_check_for_hang_subtask(struct ice_pf *pf) |
---|
| 69 | +{ |
---|
| 70 | + struct ice_vsi *vsi = NULL; |
---|
| 71 | + struct ice_hw *hw; |
---|
| 72 | + unsigned int i; |
---|
| 73 | + int packets; |
---|
| 74 | + u32 v; |
---|
| 75 | + |
---|
| 76 | + ice_for_each_vsi(pf, v) |
---|
| 77 | + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { |
---|
| 78 | + vsi = pf->vsi[v]; |
---|
| 79 | + break; |
---|
| 80 | + } |
---|
| 81 | + |
---|
| 82 | + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) |
---|
| 83 | + return; |
---|
| 84 | + |
---|
| 85 | + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) |
---|
| 86 | + return; |
---|
| 87 | + |
---|
| 88 | + hw = &vsi->back->hw; |
---|
| 89 | + |
---|
| 90 | + for (i = 0; i < vsi->num_txq; i++) { |
---|
| 91 | + struct ice_ring *tx_ring = vsi->tx_rings[i]; |
---|
| 92 | + |
---|
| 93 | + if (tx_ring && tx_ring->desc) { |
---|
| 94 | + /* If packet counter has not changed the queue is |
---|
| 95 | + * likely stalled, so force an interrupt for this |
---|
| 96 | + * queue. |
---|
| 97 | + * |
---|
| 98 | + * prev_pkt would be negative if there was no |
---|
| 99 | + * pending work. |
---|
| 100 | + */ |
---|
| 101 | + packets = tx_ring->stats.pkts & INT_MAX; |
---|
| 102 | + if (tx_ring->tx_stats.prev_pkt == packets) { |
---|
| 103 | + /* Trigger sw interrupt to revive the queue */ |
---|
| 104 | + ice_trigger_sw_intr(hw, tx_ring->q_vector); |
---|
| 105 | + continue; |
---|
| 106 | + } |
---|
| 107 | + |
---|
| 108 | + /* Memory barrier between read of packet count and call |
---|
| 109 | + * to ice_get_tx_pending() |
---|
| 110 | + */ |
---|
| 111 | + smp_rmb(); |
---|
| 112 | + tx_ring->tx_stats.prev_pkt = |
---|
| 113 | + ice_get_tx_pending(tx_ring) ? packets : -1; |
---|
| 114 | + } |
---|
| 115 | + } |
---|
| 116 | +} |
---|
| 117 | + |
---|
| 118 | +/** |
---|
| 119 | + * ice_init_mac_fltr - Set initial MAC filters |
---|
| 120 | + * @pf: board private structure |
---|
| 121 | + * |
---|
| 122 | + * Set initial set of MAC filters for PF VSI; configure filters for permanent |
---|
| 123 | + * address and broadcast address. If an error is encountered, netdevice will be |
---|
| 124 | + * unregistered. |
---|
| 125 | + */ |
---|
| 126 | +static int ice_init_mac_fltr(struct ice_pf *pf) |
---|
| 127 | +{ |
---|
| 128 | + enum ice_status status; |
---|
| 129 | + struct ice_vsi *vsi; |
---|
| 130 | + u8 *perm_addr; |
---|
| 131 | + |
---|
| 132 | + vsi = ice_get_main_vsi(pf); |
---|
| 133 | + if (!vsi) |
---|
| 134 | + return -EINVAL; |
---|
| 135 | + |
---|
| 136 | + perm_addr = vsi->port_info->mac.perm_addr; |
---|
| 137 | + status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); |
---|
| 138 | + if (!status) |
---|
| 139 | + return 0; |
---|
| 140 | + |
---|
| 141 | + /* We aren't useful with no MAC filters, so unregister if we |
---|
| 142 | + * had an error |
---|
| 143 | + */ |
---|
| 144 | + if (vsi->netdev->reg_state == NETREG_REGISTERED) { |
---|
| 145 | + dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n", |
---|
| 146 | + ice_stat_str(status)); |
---|
| 147 | + unregister_netdev(vsi->netdev); |
---|
| 148 | + free_netdev(vsi->netdev); |
---|
| 149 | + vsi->netdev = NULL; |
---|
| 150 | + } |
---|
| 151 | + |
---|
| 152 | + return -EIO; |
---|
| 153 | +} |
---|
| 154 | + |
---|
| 155 | +/** |
---|
| 156 | + * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced |
---|
205 | 157 | * @netdev: the net device on which the sync is happening |
---|
206 | | - * @addr: mac address to sync |
---|
| 158 | + * @addr: MAC address to sync |
---|
207 | 159 | * |
---|
208 | 160 | * This is a callback function which is called by the in kernel device sync |
---|
209 | 161 | * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only |
---|
210 | 162 | * populates the tmp_sync_list, which is later used by ice_add_mac to add the |
---|
211 | | - * mac filters from the hardware. |
---|
| 163 | + * MAC filters from the hardware. |
---|
212 | 164 | */ |
---|
213 | 165 | static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) |
---|
214 | 166 | { |
---|
215 | 167 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
216 | 168 | struct ice_vsi *vsi = np->vsi; |
---|
217 | 169 | |
---|
218 | | - if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) |
---|
| 170 | + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, |
---|
| 171 | + ICE_FWD_TO_VSI)) |
---|
219 | 172 | return -EINVAL; |
---|
220 | 173 | |
---|
221 | 174 | return 0; |
---|
222 | 175 | } |
---|
223 | 176 | |
---|
224 | 177 | /** |
---|
225 | | - * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced |
---|
| 178 | + * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced |
---|
226 | 179 | * @netdev: the net device on which the unsync is happening |
---|
227 | | - * @addr: mac address to unsync |
---|
| 180 | + * @addr: MAC address to unsync |
---|
228 | 181 | * |
---|
229 | 182 | * This is a callback function which is called by the in kernel device unsync |
---|
230 | 183 | * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only |
---|
231 | 184 | * populates the tmp_unsync_list, which is later used by ice_remove_mac to |
---|
232 | | - * delete the mac filters from the hardware. |
---|
| 185 | + * delete the MAC filters from the hardware. |
---|
233 | 186 | */ |
---|
234 | 187 | static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) |
---|
235 | 188 | { |
---|
236 | 189 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
237 | 190 | struct ice_vsi *vsi = np->vsi; |
---|
238 | 191 | |
---|
239 | | - if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) |
---|
| 192 | + /* Under some circumstances, we might receive a request to delete our |
---|
| 193 | + * own device address from our uc list. Because we store the device |
---|
| 194 | + * address in the VSI's MAC filter list, we need to ignore such |
---|
| 195 | + * requests and not delete our device address from this list. |
---|
| 196 | + */ |
---|
| 197 | + if (ether_addr_equal(addr, netdev->dev_addr)) |
---|
| 198 | + return 0; |
---|
| 199 | + |
---|
| 200 | + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, |
---|
| 201 | + ICE_FWD_TO_VSI)) |
---|
240 | 202 | return -EINVAL; |
---|
241 | 203 | |
---|
242 | 204 | return 0; |
---|
243 | | -} |
---|
244 | | - |
---|
245 | | -/** |
---|
246 | | - * ice_free_fltr_list - free filter lists helper |
---|
247 | | - * @dev: pointer to the device struct |
---|
248 | | - * @h: pointer to the list head to be freed |
---|
249 | | - * |
---|
250 | | - * Helper function to free filter lists previously created using |
---|
251 | | - * ice_add_mac_to_list |
---|
252 | | - */ |
---|
253 | | -static void ice_free_fltr_list(struct device *dev, struct list_head *h) |
---|
254 | | -{ |
---|
255 | | - struct ice_fltr_list_entry *e, *tmp; |
---|
256 | | - |
---|
257 | | - list_for_each_entry_safe(e, tmp, h, list_entry) { |
---|
258 | | - list_del(&e->list_entry); |
---|
259 | | - devm_kfree(dev, e); |
---|
260 | | - } |
---|
261 | 205 | } |
---|
262 | 206 | |
---|
263 | 207 | /** |
---|
.. | .. |
---|
274 | 218 | } |
---|
275 | 219 | |
---|
276 | 220 | /** |
---|
| 221 | + * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF |
---|
| 222 | + * @vsi: the VSI being configured |
---|
| 223 | + * @promisc_m: mask of promiscuous config bits |
---|
| 224 | + * @set_promisc: enable or disable promisc flag request |
---|
| 225 | + * |
---|
| 226 | + */ |
---|
| 227 | +static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) |
---|
| 228 | +{ |
---|
| 229 | + struct ice_hw *hw = &vsi->back->hw; |
---|
| 230 | + enum ice_status status = 0; |
---|
| 231 | + |
---|
| 232 | + if (vsi->type != ICE_VSI_PF) |
---|
| 233 | + return 0; |
---|
| 234 | + |
---|
| 235 | + if (vsi->vlan_ena) { |
---|
| 236 | + status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, |
---|
| 237 | + set_promisc); |
---|
| 238 | + } else { |
---|
| 239 | + if (set_promisc) |
---|
| 240 | + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, |
---|
| 241 | + 0); |
---|
| 242 | + else |
---|
| 243 | + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, |
---|
| 244 | + 0); |
---|
| 245 | + } |
---|
| 246 | + |
---|
| 247 | + if (status) |
---|
| 248 | + return -EIO; |
---|
| 249 | + |
---|
| 250 | + return 0; |
---|
| 251 | +} |
---|
| 252 | + |
---|
| 253 | +/** |
---|
277 | 254 | * ice_vsi_sync_fltr - Update the VSI filter list to the HW |
---|
278 | 255 | * @vsi: ptr to the VSI |
---|
279 | 256 | * |
---|
.. | .. |
---|
281 | 258 | */ |
---|
282 | 259 | static int ice_vsi_sync_fltr(struct ice_vsi *vsi) |
---|
283 | 260 | { |
---|
284 | | - struct device *dev = &vsi->back->pdev->dev; |
---|
| 261 | + struct device *dev = ice_pf_to_dev(vsi->back); |
---|
285 | 262 | struct net_device *netdev = vsi->netdev; |
---|
286 | 263 | bool promisc_forced_on = false; |
---|
287 | 264 | struct ice_pf *pf = vsi->back; |
---|
288 | 265 | struct ice_hw *hw = &pf->hw; |
---|
289 | 266 | enum ice_status status = 0; |
---|
290 | 267 | u32 changed_flags = 0; |
---|
| 268 | + u8 promisc_m; |
---|
291 | 269 | int err = 0; |
---|
292 | 270 | |
---|
293 | 271 | if (!vsi->netdev) |
---|
.. | .. |
---|
317 | 295 | netif_addr_unlock_bh(netdev); |
---|
318 | 296 | } |
---|
319 | 297 | |
---|
320 | | - /* Remove mac addresses in the unsync list */ |
---|
321 | | - status = ice_remove_mac(hw, &vsi->tmp_unsync_list); |
---|
322 | | - ice_free_fltr_list(dev, &vsi->tmp_unsync_list); |
---|
| 298 | + /* Remove MAC addresses in the unsync list */ |
---|
| 299 | + status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); |
---|
| 300 | + ice_fltr_free_list(dev, &vsi->tmp_unsync_list); |
---|
323 | 301 | if (status) { |
---|
324 | 302 | netdev_err(netdev, "Failed to delete MAC filters\n"); |
---|
325 | 303 | /* if we failed because of alloc failures, just bail */ |
---|
.. | .. |
---|
329 | 307 | } |
---|
330 | 308 | } |
---|
331 | 309 | |
---|
332 | | - /* Add mac addresses in the sync list */ |
---|
333 | | - status = ice_add_mac(hw, &vsi->tmp_sync_list); |
---|
334 | | - ice_free_fltr_list(dev, &vsi->tmp_sync_list); |
---|
335 | | - if (status) { |
---|
| 310 | + /* Add MAC addresses in the sync list */ |
---|
| 311 | + status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); |
---|
| 312 | + ice_fltr_free_list(dev, &vsi->tmp_sync_list); |
---|
| 313 | + /* If filter is added successfully or already exists, do not go into |
---|
| 314 | + * 'if' condition and report it as error. Instead continue processing |
---|
| 315 | + * rest of the function. |
---|
| 316 | + */ |
---|
| 317 | + if (status && status != ICE_ERR_ALREADY_EXISTS) { |
---|
336 | 318 | netdev_err(netdev, "Failed to add MAC filters\n"); |
---|
337 | | - /* If there is no more space for new umac filters, vsi |
---|
| 319 | + /* If there is no more space for new umac filters, VSI |
---|
338 | 320 | * should go into promiscuous mode. There should be some |
---|
339 | 321 | * space reserved for promiscuous filters. |
---|
340 | 322 | */ |
---|
.. | .. |
---|
342 | 324 | !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, |
---|
343 | 325 | vsi->state)) { |
---|
344 | 326 | promisc_forced_on = true; |
---|
345 | | - netdev_warn(netdev, |
---|
346 | | - "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
---|
| 327 | + netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
---|
347 | 328 | vsi->vsi_num); |
---|
348 | 329 | } else { |
---|
349 | 330 | err = -EIO; |
---|
.. | .. |
---|
351 | 332 | } |
---|
352 | 333 | } |
---|
353 | 334 | /* check for changes in promiscuous modes */ |
---|
354 | | - if (changed_flags & IFF_ALLMULTI) |
---|
355 | | - netdev_warn(netdev, "Unsupported configuration\n"); |
---|
| 335 | + if (changed_flags & IFF_ALLMULTI) { |
---|
| 336 | + if (vsi->current_netdev_flags & IFF_ALLMULTI) { |
---|
| 337 | + if (vsi->vlan_ena) |
---|
| 338 | + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; |
---|
| 339 | + else |
---|
| 340 | + promisc_m = ICE_MCAST_PROMISC_BITS; |
---|
| 341 | + |
---|
| 342 | + err = ice_cfg_promisc(vsi, promisc_m, true); |
---|
| 343 | + if (err) { |
---|
| 344 | + netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", |
---|
| 345 | + vsi->vsi_num); |
---|
| 346 | + vsi->current_netdev_flags &= ~IFF_ALLMULTI; |
---|
| 347 | + goto out_promisc; |
---|
| 348 | + } |
---|
| 349 | + } else { |
---|
| 350 | + /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ |
---|
| 351 | + if (vsi->vlan_ena) |
---|
| 352 | + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; |
---|
| 353 | + else |
---|
| 354 | + promisc_m = ICE_MCAST_PROMISC_BITS; |
---|
| 355 | + |
---|
| 356 | + err = ice_cfg_promisc(vsi, promisc_m, false); |
---|
| 357 | + if (err) { |
---|
| 358 | + netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", |
---|
| 359 | + vsi->vsi_num); |
---|
| 360 | + vsi->current_netdev_flags |= IFF_ALLMULTI; |
---|
| 361 | + goto out_promisc; |
---|
| 362 | + } |
---|
| 363 | + } |
---|
| 364 | + } |
---|
356 | 365 | |
---|
357 | 366 | if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || |
---|
358 | 367 | test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { |
---|
359 | 368 | clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); |
---|
360 | 369 | if (vsi->current_netdev_flags & IFF_PROMISC) { |
---|
361 | | - /* Apply TX filter rule to get traffic from VMs */ |
---|
362 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, |
---|
363 | | - ICE_FLTR_TX); |
---|
364 | | - if (status) { |
---|
365 | | - netdev_err(netdev, "Error setting default VSI %i tx rule\n", |
---|
366 | | - vsi->vsi_num); |
---|
367 | | - vsi->current_netdev_flags &= ~IFF_PROMISC; |
---|
368 | | - err = -EIO; |
---|
369 | | - goto out_promisc; |
---|
370 | | - } |
---|
371 | | - /* Apply RX filter rule to get traffic from wire */ |
---|
372 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, |
---|
373 | | - ICE_FLTR_RX); |
---|
374 | | - if (status) { |
---|
375 | | - netdev_err(netdev, "Error setting default VSI %i rx rule\n", |
---|
376 | | - vsi->vsi_num); |
---|
377 | | - vsi->current_netdev_flags &= ~IFF_PROMISC; |
---|
378 | | - err = -EIO; |
---|
379 | | - goto out_promisc; |
---|
| 370 | + /* Apply Rx filter rule to get traffic from wire */ |
---|
| 371 | + if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { |
---|
| 372 | + err = ice_set_dflt_vsi(pf->first_sw, vsi); |
---|
| 373 | + if (err && err != -EEXIST) { |
---|
| 374 | + netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", |
---|
| 375 | + err, vsi->vsi_num); |
---|
| 376 | + vsi->current_netdev_flags &= |
---|
| 377 | + ~IFF_PROMISC; |
---|
| 378 | + goto out_promisc; |
---|
| 379 | + } |
---|
| 380 | + ice_cfg_vlan_pruning(vsi, false, false); |
---|
380 | 381 | } |
---|
381 | 382 | } else { |
---|
382 | | - /* Clear TX filter rule to stop traffic from VMs */ |
---|
383 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, |
---|
384 | | - ICE_FLTR_TX); |
---|
385 | | - if (status) { |
---|
386 | | - netdev_err(netdev, "Error clearing default VSI %i tx rule\n", |
---|
387 | | - vsi->vsi_num); |
---|
388 | | - vsi->current_netdev_flags |= IFF_PROMISC; |
---|
389 | | - err = -EIO; |
---|
390 | | - goto out_promisc; |
---|
391 | | - } |
---|
392 | | - /* Clear filter RX to remove traffic from wire */ |
---|
393 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, |
---|
394 | | - ICE_FLTR_RX); |
---|
395 | | - if (status) { |
---|
396 | | - netdev_err(netdev, "Error clearing default VSI %i rx rule\n", |
---|
397 | | - vsi->vsi_num); |
---|
398 | | - vsi->current_netdev_flags |= IFF_PROMISC; |
---|
399 | | - err = -EIO; |
---|
400 | | - goto out_promisc; |
---|
| 383 | + /* Clear Rx filter to remove traffic from wire */ |
---|
| 384 | + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { |
---|
| 385 | + err = ice_clear_dflt_vsi(pf->first_sw); |
---|
| 386 | + if (err) { |
---|
| 387 | + netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", |
---|
| 388 | + err, vsi->vsi_num); |
---|
| 389 | + vsi->current_netdev_flags |= |
---|
| 390 | + IFF_PROMISC; |
---|
| 391 | + goto out_promisc; |
---|
| 392 | + } |
---|
| 393 | + if (vsi->num_vlan > 1) |
---|
| 394 | + ice_cfg_vlan_pruning(vsi, true, false); |
---|
401 | 395 | } |
---|
402 | 396 | } |
---|
403 | 397 | } |
---|
.. | .. |
---|
428 | 422 | |
---|
429 | 423 | clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); |
---|
430 | 424 | |
---|
431 | | - for (v = 0; v < pf->num_alloc_vsi; v++) |
---|
| 425 | + ice_for_each_vsi(pf, v) |
---|
432 | 426 | if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && |
---|
433 | 427 | ice_vsi_sync_fltr(pf->vsi[v])) { |
---|
434 | 428 | /* come back and try again later */ |
---|
.. | .. |
---|
438 | 432 | } |
---|
439 | 433 | |
---|
440 | 434 | /** |
---|
441 | | - * ice_is_reset_recovery_pending - schedule a reset |
---|
442 | | - * @state: pf state field |
---|
| 435 | + * ice_pf_dis_all_vsi - Pause all VSIs on a PF |
---|
| 436 | + * @pf: the PF |
---|
| 437 | + * @locked: is the rtnl_lock already held |
---|
443 | 438 | */ |
---|
444 | | -static bool ice_is_reset_recovery_pending(unsigned long int *state) |
---|
| 439 | +static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) |
---|
445 | 440 | { |
---|
446 | | - return test_bit(__ICE_RESET_RECOVERY_PENDING, state); |
---|
| 441 | + int v; |
---|
| 442 | + |
---|
| 443 | + ice_for_each_vsi(pf, v) |
---|
| 444 | + if (pf->vsi[v]) |
---|
| 445 | + ice_dis_vsi(pf->vsi[v], locked); |
---|
447 | 446 | } |
---|
448 | 447 | |
---|
449 | 448 | /** |
---|
.. | .. |
---|
456 | 455 | ice_prepare_for_reset(struct ice_pf *pf) |
---|
457 | 456 | { |
---|
458 | 457 | struct ice_hw *hw = &pf->hw; |
---|
459 | | - u32 v; |
---|
| 458 | + unsigned int i; |
---|
460 | 459 | |
---|
461 | | - ice_for_each_vsi(pf, v) |
---|
462 | | - if (pf->vsi[v]) |
---|
463 | | - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); |
---|
| 460 | + /* already prepared for reset */ |
---|
| 461 | + if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) |
---|
| 462 | + return; |
---|
464 | 463 | |
---|
465 | | - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); |
---|
| 464 | + /* Notify VFs of impending reset */ |
---|
| 465 | + if (ice_check_sq_alive(hw, &hw->mailboxq)) |
---|
| 466 | + ice_vc_notify_reset(pf); |
---|
466 | 467 | |
---|
| 468 | + /* Disable VFs until reset is completed */ |
---|
| 469 | + ice_for_each_vf(pf, i) |
---|
| 470 | + ice_set_vf_state_qs_dis(&pf->vf[i]); |
---|
| 471 | + |
---|
| 472 | + /* clear SW filtering DB */ |
---|
| 473 | + ice_clear_hw_tbls(hw); |
---|
467 | 474 | /* disable the VSIs and their queues that are not already DOWN */ |
---|
468 | | - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ |
---|
469 | | - ice_pf_dis_all_vsi(pf); |
---|
| 475 | + ice_pf_dis_all_vsi(pf, false); |
---|
470 | 476 | |
---|
471 | | - ice_for_each_vsi(pf, v) |
---|
472 | | - if (pf->vsi[v]) |
---|
473 | | - pf->vsi[v]->vsi_num = 0; |
---|
| 477 | + if (hw->port_info) |
---|
| 478 | + ice_sched_clear_port(hw->port_info); |
---|
474 | 479 | |
---|
475 | 480 | ice_shutdown_all_ctrlq(hw); |
---|
| 481 | + |
---|
| 482 | + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
---|
476 | 483 | } |
---|
477 | 484 | |
---|
478 | 485 | /** |
---|
.. | .. |
---|
483 | 490 | */ |
---|
484 | 491 | static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) |
---|
485 | 492 | { |
---|
486 | | - struct device *dev = &pf->pdev->dev; |
---|
| 493 | + struct device *dev = ice_pf_to_dev(pf); |
---|
487 | 494 | struct ice_hw *hw = &pf->hw; |
---|
488 | 495 | |
---|
489 | 496 | dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); |
---|
490 | | - WARN_ON(in_interrupt()); |
---|
491 | 497 | |
---|
492 | | - /* PFR is a bit of a special case because it doesn't result in an OICR |
---|
493 | | - * interrupt. So for PFR, we prepare for reset, issue the reset and |
---|
494 | | - * rebuild sequentially. |
---|
495 | | - */ |
---|
496 | | - if (reset_type == ICE_RESET_PFR) { |
---|
497 | | - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
---|
498 | | - ice_prepare_for_reset(pf); |
---|
499 | | - } |
---|
| 498 | + ice_prepare_for_reset(pf); |
---|
500 | 499 | |
---|
501 | 500 | /* trigger the reset */ |
---|
502 | 501 | if (ice_reset(hw, reset_type)) { |
---|
503 | 502 | dev_err(dev, "reset %d failed\n", reset_type); |
---|
504 | 503 | set_bit(__ICE_RESET_FAILED, pf->state); |
---|
505 | | - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
---|
| 504 | + clear_bit(__ICE_RESET_OICR_RECV, pf->state); |
---|
| 505 | + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
---|
| 506 | + clear_bit(__ICE_PFR_REQ, pf->state); |
---|
| 507 | + clear_bit(__ICE_CORER_REQ, pf->state); |
---|
| 508 | + clear_bit(__ICE_GLOBR_REQ, pf->state); |
---|
506 | 509 | return; |
---|
507 | 510 | } |
---|
508 | 511 | |
---|
| 512 | + /* PFR is a bit of a special case because it doesn't result in an OICR |
---|
| 513 | + * interrupt. So for PFR, rebuild after the reset and clear the reset- |
---|
| 514 | + * associated state bits. |
---|
| 515 | + */ |
---|
509 | 516 | if (reset_type == ICE_RESET_PFR) { |
---|
510 | 517 | pf->pfr_count++; |
---|
511 | | - ice_rebuild(pf); |
---|
512 | | - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
---|
| 518 | + ice_rebuild(pf, reset_type); |
---|
| 519 | + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
---|
| 520 | + clear_bit(__ICE_PFR_REQ, pf->state); |
---|
| 521 | + ice_reset_all_vfs(pf, true); |
---|
513 | 522 | } |
---|
514 | 523 | } |
---|
515 | 524 | |
---|
.. | .. |
---|
519 | 528 | */ |
---|
520 | 529 | static void ice_reset_subtask(struct ice_pf *pf) |
---|
521 | 530 | { |
---|
522 | | - enum ice_reset_req reset_type; |
---|
523 | | - |
---|
524 | | - rtnl_lock(); |
---|
| 531 | + enum ice_reset_req reset_type = ICE_RESET_INVAL; |
---|
525 | 532 | |
---|
526 | 533 | /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an |
---|
527 | | - * OICR interrupt. The OICR handler (ice_misc_intr) determines what |
---|
528 | | - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in |
---|
529 | | - * pf->state. So if reset/recovery is pending (as indicated by this bit) |
---|
530 | | - * we do a rebuild and return. |
---|
| 534 | + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type |
---|
| 535 | + * of reset is pending and sets bits in pf->state indicating the reset |
---|
| 536 | + * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set |
---|
| 537 | + * prepare for pending reset if not already (for PF software-initiated |
---|
| 538 | + * global resets the software should already be prepared for it as |
---|
| 539 | + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated |
---|
| 540 | + * by firmware or software on other PFs, that bit is not set so prepare |
---|
| 541 | + * for the reset now), poll for reset done, rebuild and return. |
---|
531 | 542 | */ |
---|
532 | | - if (ice_is_reset_recovery_pending(pf->state)) { |
---|
533 | | - clear_bit(__ICE_GLOBR_RECV, pf->state); |
---|
534 | | - clear_bit(__ICE_CORER_RECV, pf->state); |
---|
| 543 | + if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { |
---|
| 544 | + /* Perform the largest reset requested */ |
---|
| 545 | + if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) |
---|
| 546 | + reset_type = ICE_RESET_CORER; |
---|
| 547 | + if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) |
---|
| 548 | + reset_type = ICE_RESET_GLOBR; |
---|
| 549 | + if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) |
---|
| 550 | + reset_type = ICE_RESET_EMPR; |
---|
| 551 | + /* return if no valid reset type requested */ |
---|
| 552 | + if (reset_type == ICE_RESET_INVAL) |
---|
| 553 | + return; |
---|
535 | 554 | ice_prepare_for_reset(pf); |
---|
536 | 555 | |
---|
537 | 556 | /* make sure we are ready to rebuild */ |
---|
.. | .. |
---|
540 | 559 | } else { |
---|
541 | 560 | /* done with reset. start rebuild */ |
---|
542 | 561 | pf->hw.reset_ongoing = false; |
---|
543 | | - ice_rebuild(pf); |
---|
| 562 | + ice_rebuild(pf, reset_type); |
---|
| 563 | + /* clear bit to resume normal operations, but |
---|
| 564 | + * ICE_NEEDS_RESTART bit is set in case rebuild failed |
---|
| 565 | + */ |
---|
| 566 | + clear_bit(__ICE_RESET_OICR_RECV, pf->state); |
---|
| 567 | + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
---|
| 568 | + clear_bit(__ICE_PFR_REQ, pf->state); |
---|
| 569 | + clear_bit(__ICE_CORER_REQ, pf->state); |
---|
| 570 | + clear_bit(__ICE_GLOBR_REQ, pf->state); |
---|
| 571 | + ice_reset_all_vfs(pf, true); |
---|
544 | 572 | } |
---|
545 | | - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
---|
546 | | - goto unlock; |
---|
| 573 | + |
---|
| 574 | + return; |
---|
547 | 575 | } |
---|
548 | 576 | |
---|
549 | 577 | /* No pending resets to finish processing. Check for new resets */ |
---|
550 | | - if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) |
---|
551 | | - reset_type = ICE_RESET_GLOBR; |
---|
552 | | - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) |
---|
553 | | - reset_type = ICE_RESET_CORER; |
---|
554 | | - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) |
---|
| 578 | + if (test_bit(__ICE_PFR_REQ, pf->state)) |
---|
555 | 579 | reset_type = ICE_RESET_PFR; |
---|
556 | | - else |
---|
557 | | - goto unlock; |
---|
| 580 | + if (test_bit(__ICE_CORER_REQ, pf->state)) |
---|
| 581 | + reset_type = ICE_RESET_CORER; |
---|
| 582 | + if (test_bit(__ICE_GLOBR_REQ, pf->state)) |
---|
| 583 | + reset_type = ICE_RESET_GLOBR; |
---|
| 584 | + /* If no valid reset type requested just return */ |
---|
| 585 | + if (reset_type == ICE_RESET_INVAL) |
---|
| 586 | + return; |
---|
558 | 587 | |
---|
559 | | - /* reset if not already down or resetting */ |
---|
| 588 | + /* reset if not already down or busy */ |
---|
560 | 589 | if (!test_bit(__ICE_DOWN, pf->state) && |
---|
561 | 590 | !test_bit(__ICE_CFG_BUSY, pf->state)) { |
---|
562 | 591 | ice_do_reset(pf, reset_type); |
---|
563 | 592 | } |
---|
564 | | - |
---|
565 | | -unlock: |
---|
566 | | - rtnl_unlock(); |
---|
567 | 593 | } |
---|
568 | 594 | |
---|
569 | 595 | /** |
---|
570 | | - * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
---|
571 | | - * @pf: board private structure |
---|
| 596 | + * ice_print_topo_conflict - print topology conflict message |
---|
| 597 | + * @vsi: the VSI whose topology status is being checked |
---|
572 | 598 | */ |
---|
573 | | -static void ice_watchdog_subtask(struct ice_pf *pf) |
---|
| 599 | +static void ice_print_topo_conflict(struct ice_vsi *vsi) |
---|
574 | 600 | { |
---|
575 | | - int i; |
---|
576 | | - |
---|
577 | | - /* if interface is down do nothing */ |
---|
578 | | - if (test_bit(__ICE_DOWN, pf->state) || |
---|
579 | | - test_bit(__ICE_CFG_BUSY, pf->state)) |
---|
580 | | - return; |
---|
581 | | - |
---|
582 | | - /* make sure we don't do these things too often */ |
---|
583 | | - if (time_before(jiffies, |
---|
584 | | - pf->serv_tmr_prev + pf->serv_tmr_period)) |
---|
585 | | - return; |
---|
586 | | - |
---|
587 | | - pf->serv_tmr_prev = jiffies; |
---|
588 | | - |
---|
589 | | - /* Update the stats for active netdevs so the network stack |
---|
590 | | - * can look at updated numbers whenever it cares to |
---|
591 | | - */ |
---|
592 | | - ice_update_pf_stats(pf); |
---|
593 | | - for (i = 0; i < pf->num_alloc_vsi; i++) |
---|
594 | | - if (pf->vsi[i] && pf->vsi[i]->netdev) |
---|
595 | | - ice_update_vsi_stats(pf->vsi[i]); |
---|
| 601 | + switch (vsi->port_info->phy.link_info.topo_media_conflict) { |
---|
| 602 | + case ICE_AQ_LINK_TOPO_CONFLICT: |
---|
| 603 | + case ICE_AQ_LINK_MEDIA_CONFLICT: |
---|
| 604 | + case ICE_AQ_LINK_TOPO_UNREACH_PRT: |
---|
| 605 | + case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: |
---|
| 606 | + case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: |
---|
| 607 | + netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); |
---|
| 608 | + break; |
---|
| 609 | + case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: |
---|
| 610 | + netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); |
---|
| 611 | + break; |
---|
| 612 | + default: |
---|
| 613 | + break; |
---|
| 614 | + } |
---|
596 | 615 | } |
---|
597 | 616 | |
---|
598 | 617 | /** |
---|
.. | .. |
---|
602 | 621 | */ |
---|
603 | 622 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup) |
---|
604 | 623 | { |
---|
| 624 | + struct ice_aqc_get_phy_caps_data *caps; |
---|
| 625 | + const char *an_advertised; |
---|
| 626 | + enum ice_status status; |
---|
| 627 | + const char *fec_req; |
---|
605 | 628 | const char *speed; |
---|
| 629 | + const char *fec; |
---|
606 | 630 | const char *fc; |
---|
| 631 | + const char *an; |
---|
| 632 | + |
---|
| 633 | + if (!vsi) |
---|
| 634 | + return; |
---|
607 | 635 | |
---|
608 | 636 | if (vsi->current_isup == isup) |
---|
609 | 637 | return; |
---|
.. | .. |
---|
616 | 644 | } |
---|
617 | 645 | |
---|
618 | 646 | switch (vsi->port_info->phy.link_info.link_speed) { |
---|
| 647 | + case ICE_AQ_LINK_SPEED_100GB: |
---|
| 648 | + speed = "100 G"; |
---|
| 649 | + break; |
---|
| 650 | + case ICE_AQ_LINK_SPEED_50GB: |
---|
| 651 | + speed = "50 G"; |
---|
| 652 | + break; |
---|
619 | 653 | case ICE_AQ_LINK_SPEED_40GB: |
---|
620 | 654 | speed = "40 G"; |
---|
621 | 655 | break; |
---|
.. | .. |
---|
647 | 681 | |
---|
648 | 682 | switch (vsi->port_info->fc.current_mode) { |
---|
649 | 683 | case ICE_FC_FULL: |
---|
650 | | - fc = "RX/TX"; |
---|
| 684 | + fc = "Rx/Tx"; |
---|
651 | 685 | break; |
---|
652 | 686 | case ICE_FC_TX_PAUSE: |
---|
653 | | - fc = "TX"; |
---|
| 687 | + fc = "Tx"; |
---|
654 | 688 | break; |
---|
655 | 689 | case ICE_FC_RX_PAUSE: |
---|
656 | | - fc = "RX"; |
---|
| 690 | + fc = "Rx"; |
---|
657 | 691 | break; |
---|
658 | 692 | case ICE_FC_NONE: |
---|
659 | 693 | fc = "None"; |
---|
.. | .. |
---|
663 | 697 | break; |
---|
664 | 698 | } |
---|
665 | 699 | |
---|
666 | | - netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", |
---|
667 | | - speed, fc); |
---|
| 700 | + /* Get FEC mode based on negotiated link info */ |
---|
| 701 | + switch (vsi->port_info->phy.link_info.fec_info) { |
---|
| 702 | + case ICE_AQ_LINK_25G_RS_528_FEC_EN: |
---|
| 703 | + case ICE_AQ_LINK_25G_RS_544_FEC_EN: |
---|
| 704 | + fec = "RS-FEC"; |
---|
| 705 | + break; |
---|
| 706 | + case ICE_AQ_LINK_25G_KR_FEC_EN: |
---|
| 707 | + fec = "FC-FEC/BASE-R"; |
---|
| 708 | + break; |
---|
| 709 | + default: |
---|
| 710 | + fec = "NONE"; |
---|
| 711 | + break; |
---|
| 712 | + } |
---|
| 713 | + |
---|
| 714 | + /* check if autoneg completed, might be false due to not supported */ |
---|
| 715 | + if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) |
---|
| 716 | + an = "True"; |
---|
| 717 | + else |
---|
| 718 | + an = "False"; |
---|
| 719 | + |
---|
| 720 | + /* Get FEC mode requested based on PHY caps last SW configuration */ |
---|
| 721 | + caps = kzalloc(sizeof(*caps), GFP_KERNEL); |
---|
| 722 | + if (!caps) { |
---|
| 723 | + fec_req = "Unknown"; |
---|
| 724 | + an_advertised = "Unknown"; |
---|
| 725 | + goto done; |
---|
| 726 | + } |
---|
| 727 | + |
---|
| 728 | + status = ice_aq_get_phy_caps(vsi->port_info, false, |
---|
| 729 | + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); |
---|
| 730 | + if (status) |
---|
| 731 | + netdev_info(vsi->netdev, "Get phy capability failed.\n"); |
---|
| 732 | + |
---|
| 733 | + an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; |
---|
| 734 | + |
---|
| 735 | + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || |
---|
| 736 | + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) |
---|
| 737 | + fec_req = "RS-FEC"; |
---|
| 738 | + else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || |
---|
| 739 | + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) |
---|
| 740 | + fec_req = "FC-FEC/BASE-R"; |
---|
| 741 | + else |
---|
| 742 | + fec_req = "NONE"; |
---|
| 743 | + |
---|
| 744 | + kfree(caps); |
---|
| 745 | + |
---|
| 746 | +done: |
---|
| 747 | + netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", |
---|
| 748 | + speed, fec_req, fec, an_advertised, an, fc); |
---|
| 749 | + ice_print_topo_conflict(vsi); |
---|
| 750 | +} |
---|
| 751 | + |
---|
| 752 | +/** |
---|
| 753 | + * ice_vsi_link_event - update the VSI's netdev |
---|
| 754 | + * @vsi: the VSI on which the link event occurred |
---|
| 755 | + * @link_up: whether or not the VSI needs to be set up or down |
---|
| 756 | + */ |
---|
| 757 | +static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) |
---|
| 758 | +{ |
---|
| 759 | + if (!vsi) |
---|
| 760 | + return; |
---|
| 761 | + |
---|
| 762 | + if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev) |
---|
| 763 | + return; |
---|
| 764 | + |
---|
| 765 | + if (vsi->type == ICE_VSI_PF) { |
---|
| 766 | + if (link_up == netif_carrier_ok(vsi->netdev)) |
---|
| 767 | + return; |
---|
| 768 | + |
---|
| 769 | + if (link_up) { |
---|
| 770 | + netif_carrier_on(vsi->netdev); |
---|
| 771 | + netif_tx_wake_all_queues(vsi->netdev); |
---|
| 772 | + } else { |
---|
| 773 | + netif_carrier_off(vsi->netdev); |
---|
| 774 | + netif_tx_stop_all_queues(vsi->netdev); |
---|
| 775 | + } |
---|
| 776 | + } |
---|
| 777 | +} |
---|
| 778 | + |
---|
| 779 | +/** |
---|
| 780 | + * ice_set_dflt_mib - send a default config MIB to the FW |
---|
| 781 | + * @pf: private PF struct |
---|
| 782 | + * |
---|
| 783 | + * This function sends a default configuration MIB to the FW. |
---|
| 784 | + * |
---|
| 785 | + * If this function errors out at any point, the driver is still able to |
---|
| 786 | + * function. The main impact is that LFC may not operate as expected. |
---|
| 787 | + * Therefore an error state in this function should be treated with a DBG |
---|
| 788 | + * message and continue on with driver rebuild/reenable. |
---|
| 789 | + */ |
---|
| 790 | +static void ice_set_dflt_mib(struct ice_pf *pf) |
---|
| 791 | +{ |
---|
| 792 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 793 | + u8 mib_type, *buf, *lldpmib = NULL; |
---|
| 794 | + u16 len, typelen, offset = 0; |
---|
| 795 | + struct ice_lldp_org_tlv *tlv; |
---|
| 796 | + struct ice_hw *hw; |
---|
| 797 | + u32 ouisubtype; |
---|
| 798 | + |
---|
| 799 | + if (!pf) { |
---|
| 800 | + dev_dbg(dev, "%s NULL pf pointer\n", __func__); |
---|
| 801 | + return; |
---|
| 802 | + } |
---|
| 803 | + |
---|
| 804 | + hw = &pf->hw; |
---|
| 805 | + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; |
---|
| 806 | + lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); |
---|
| 807 | + if (!lldpmib) { |
---|
| 808 | + dev_dbg(dev, "%s Failed to allocate MIB memory\n", |
---|
| 809 | + __func__); |
---|
| 810 | + return; |
---|
| 811 | + } |
---|
| 812 | + |
---|
| 813 | + /* Add ETS CFG TLV */ |
---|
| 814 | + tlv = (struct ice_lldp_org_tlv *)lldpmib; |
---|
| 815 | + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | |
---|
| 816 | + ICE_IEEE_ETS_TLV_LEN); |
---|
| 817 | + tlv->typelen = htons(typelen); |
---|
| 818 | + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
---|
| 819 | + ICE_IEEE_SUBTYPE_ETS_CFG); |
---|
| 820 | + tlv->ouisubtype = htonl(ouisubtype); |
---|
| 821 | + |
---|
| 822 | + buf = tlv->tlvinfo; |
---|
| 823 | + buf[0] = 0; |
---|
| 824 | + |
---|
| 825 | + /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. |
---|
| 826 | + * Octets 5 - 12 are BW values, set octet 5 to 100% BW. |
---|
| 827 | + * Octets 13 - 20 are TSA values - leave as zeros |
---|
| 828 | + */ |
---|
| 829 | + buf[5] = 0x64; |
---|
| 830 | + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; |
---|
| 831 | + offset += len + 2; |
---|
| 832 | + tlv = (struct ice_lldp_org_tlv *) |
---|
| 833 | + ((char *)tlv + sizeof(tlv->typelen) + len); |
---|
| 834 | + |
---|
| 835 | + /* Add ETS REC TLV */ |
---|
| 836 | + buf = tlv->tlvinfo; |
---|
| 837 | + tlv->typelen = htons(typelen); |
---|
| 838 | + |
---|
| 839 | + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
---|
| 840 | + ICE_IEEE_SUBTYPE_ETS_REC); |
---|
| 841 | + tlv->ouisubtype = htonl(ouisubtype); |
---|
| 842 | + |
---|
| 843 | + /* First octet of buf is reserved |
---|
| 844 | + * Octets 1 - 4 map UP to TC - all UPs map to zero |
---|
| 845 | + * Octets 5 - 12 are BW values - set TC 0 to 100%. |
---|
| 846 | + * Octets 13 - 20 are TSA value - leave as zeros |
---|
| 847 | + */ |
---|
| 848 | + buf[5] = 0x64; |
---|
| 849 | + offset += len + 2; |
---|
| 850 | + tlv = (struct ice_lldp_org_tlv *) |
---|
| 851 | + ((char *)tlv + sizeof(tlv->typelen) + len); |
---|
| 852 | + |
---|
| 853 | + /* Add PFC CFG TLV */ |
---|
| 854 | + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | |
---|
| 855 | + ICE_IEEE_PFC_TLV_LEN); |
---|
| 856 | + tlv->typelen = htons(typelen); |
---|
| 857 | + |
---|
| 858 | + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
---|
| 859 | + ICE_IEEE_SUBTYPE_PFC_CFG); |
---|
| 860 | + tlv->ouisubtype = htonl(ouisubtype); |
---|
| 861 | + |
---|
| 862 | + /* Octet 1 left as all zeros - PFC disabled */ |
---|
| 863 | + buf[0] = 0x08; |
---|
| 864 | + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; |
---|
| 865 | + offset += len + 2; |
---|
| 866 | + |
---|
| 867 | + if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) |
---|
| 868 | + dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); |
---|
| 869 | + |
---|
| 870 | + kfree(lldpmib); |
---|
| 871 | +} |
---|
| 872 | + |
---|
| 873 | +/** |
---|
| 874 | + * ice_link_event - process the link event |
---|
| 875 | + * @pf: PF that the link event is associated with |
---|
| 876 | + * @pi: port_info for the port that the link event is associated with |
---|
| 877 | + * @link_up: true if the physical link is up and false if it is down |
---|
| 878 | + * @link_speed: current link speed received from the link event |
---|
| 879 | + * |
---|
| 880 | + * Returns 0 on success and negative on failure |
---|
| 881 | + */ |
---|
| 882 | +static int |
---|
| 883 | +ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, |
---|
| 884 | + u16 link_speed) |
---|
| 885 | +{ |
---|
| 886 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 887 | + struct ice_phy_info *phy_info; |
---|
| 888 | + struct ice_vsi *vsi; |
---|
| 889 | + u16 old_link_speed; |
---|
| 890 | + bool old_link; |
---|
| 891 | + int result; |
---|
| 892 | + |
---|
| 893 | + phy_info = &pi->phy; |
---|
| 894 | + phy_info->link_info_old = phy_info->link_info; |
---|
| 895 | + |
---|
| 896 | + old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
---|
| 897 | + old_link_speed = phy_info->link_info_old.link_speed; |
---|
| 898 | + |
---|
| 899 | + /* update the link info structures and re-enable link events, |
---|
| 900 | + * don't bail on failure due to other book keeping needed |
---|
| 901 | + */ |
---|
| 902 | + result = ice_update_link_info(pi); |
---|
| 903 | + if (result) |
---|
| 904 | + dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", |
---|
| 905 | + pi->lport); |
---|
| 906 | + |
---|
| 907 | + /* Check if the link state is up after updating link info, and treat |
---|
| 908 | + * this event as an UP event since the link is actually UP now. |
---|
| 909 | + */ |
---|
| 910 | + if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) |
---|
| 911 | + link_up = true; |
---|
| 912 | + |
---|
| 913 | + vsi = ice_get_main_vsi(pf); |
---|
| 914 | + if (!vsi || !vsi->port_info) |
---|
| 915 | + return -EINVAL; |
---|
| 916 | + |
---|
| 917 | + /* turn off PHY if media was removed */ |
---|
| 918 | + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && |
---|
| 919 | + !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { |
---|
| 920 | + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
---|
| 921 | + |
---|
| 922 | + result = ice_aq_set_link_restart_an(pi, false, NULL); |
---|
| 923 | + if (result) { |
---|
| 924 | + dev_dbg(dev, "Failed to set link down, VSI %d error %d\n", |
---|
| 925 | + vsi->vsi_num, result); |
---|
| 926 | + return result; |
---|
| 927 | + } |
---|
| 928 | + } |
---|
| 929 | + |
---|
| 930 | + /* if the old link up/down and speed is the same as the new */ |
---|
| 931 | + if (link_up == old_link && link_speed == old_link_speed) |
---|
| 932 | + return result; |
---|
| 933 | + |
---|
| 934 | + if (ice_is_dcb_active(pf)) { |
---|
| 935 | + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
---|
| 936 | + ice_dcb_rebuild(pf); |
---|
| 937 | + } else { |
---|
| 938 | + if (link_up) |
---|
| 939 | + ice_set_dflt_mib(pf); |
---|
| 940 | + } |
---|
| 941 | + ice_vsi_link_event(vsi, link_up); |
---|
| 942 | + ice_print_link_msg(vsi, link_up); |
---|
| 943 | + |
---|
| 944 | + ice_vc_notify_link_state(pf); |
---|
| 945 | + |
---|
| 946 | + return result; |
---|
| 947 | +} |
---|
| 948 | + |
---|
| 949 | +/** |
---|
| 950 | + * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
---|
| 951 | + * @pf: board private structure |
---|
| 952 | + */ |
---|
| 953 | +static void ice_watchdog_subtask(struct ice_pf *pf) |
---|
| 954 | +{ |
---|
| 955 | + int i; |
---|
| 956 | + |
---|
| 957 | + /* if interface is down do nothing */ |
---|
| 958 | + if (test_bit(__ICE_DOWN, pf->state) || |
---|
| 959 | + test_bit(__ICE_CFG_BUSY, pf->state)) |
---|
| 960 | + return; |
---|
| 961 | + |
---|
| 962 | + /* make sure we don't do these things too often */ |
---|
| 963 | + if (time_before(jiffies, |
---|
| 964 | + pf->serv_tmr_prev + pf->serv_tmr_period)) |
---|
| 965 | + return; |
---|
| 966 | + |
---|
| 967 | + pf->serv_tmr_prev = jiffies; |
---|
| 968 | + |
---|
| 969 | + /* Update the stats for active netdevs so the network stack |
---|
| 970 | + * can look at updated numbers whenever it cares to |
---|
| 971 | + */ |
---|
| 972 | + ice_update_pf_stats(pf); |
---|
| 973 | + ice_for_each_vsi(pf, i) |
---|
| 974 | + if (pf->vsi[i] && pf->vsi[i]->netdev) |
---|
| 975 | + ice_update_vsi_stats(pf->vsi[i]); |
---|
668 | 976 | } |
---|
669 | 977 | |
---|
670 | 978 | /** |
---|
.. | .. |
---|
681 | 989 | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); |
---|
682 | 990 | |
---|
683 | 991 | if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { |
---|
684 | | - dev_dbg(ice_hw_to_dev(pi->hw), |
---|
685 | | - "Failed to set link event mask for port %d\n", |
---|
| 992 | + dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", |
---|
686 | 993 | pi->lport); |
---|
687 | 994 | return -EIO; |
---|
688 | 995 | } |
---|
689 | 996 | |
---|
690 | 997 | if (ice_aq_get_link_info(pi, true, NULL, NULL)) { |
---|
691 | | - dev_dbg(ice_hw_to_dev(pi->hw), |
---|
692 | | - "Failed to enable link events for port %d\n", |
---|
| 998 | + dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", |
---|
693 | 999 | pi->lport); |
---|
694 | 1000 | return -EIO; |
---|
695 | | - } |
---|
696 | | - |
---|
697 | | - return 0; |
---|
698 | | -} |
---|
699 | | - |
---|
700 | | -/** |
---|
701 | | - * ice_vsi_link_event - update the vsi's netdev |
---|
702 | | - * @vsi: the vsi on which the link event occurred |
---|
703 | | - * @link_up: whether or not the vsi needs to be set up or down |
---|
704 | | - */ |
---|
705 | | -static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) |
---|
706 | | -{ |
---|
707 | | - if (!vsi || test_bit(__ICE_DOWN, vsi->state)) |
---|
708 | | - return; |
---|
709 | | - |
---|
710 | | - if (vsi->type == ICE_VSI_PF) { |
---|
711 | | - if (!vsi->netdev) { |
---|
712 | | - dev_dbg(&vsi->back->pdev->dev, |
---|
713 | | - "vsi->netdev is not initialized!\n"); |
---|
714 | | - return; |
---|
715 | | - } |
---|
716 | | - if (link_up) { |
---|
717 | | - netif_carrier_on(vsi->netdev); |
---|
718 | | - netif_tx_wake_all_queues(vsi->netdev); |
---|
719 | | - } else { |
---|
720 | | - netif_carrier_off(vsi->netdev); |
---|
721 | | - netif_tx_stop_all_queues(vsi->netdev); |
---|
722 | | - } |
---|
723 | | - } |
---|
724 | | -} |
---|
725 | | - |
---|
726 | | -/** |
---|
727 | | - * ice_link_event - process the link event |
---|
728 | | - * @pf: pf that the link event is associated with |
---|
729 | | - * @pi: port_info for the port that the link event is associated with |
---|
730 | | - * |
---|
731 | | - * Returns -EIO if ice_get_link_status() fails |
---|
732 | | - * Returns 0 on success |
---|
733 | | - */ |
---|
734 | | -static int |
---|
735 | | -ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) |
---|
736 | | -{ |
---|
737 | | - u8 new_link_speed, old_link_speed; |
---|
738 | | - struct ice_phy_info *phy_info; |
---|
739 | | - bool new_link_same_as_old; |
---|
740 | | - bool new_link, old_link; |
---|
741 | | - u8 lport; |
---|
742 | | - u16 v; |
---|
743 | | - |
---|
744 | | - phy_info = &pi->phy; |
---|
745 | | - phy_info->link_info_old = phy_info->link_info; |
---|
746 | | - /* Force ice_get_link_status() to update link info */ |
---|
747 | | - phy_info->get_link_info = true; |
---|
748 | | - |
---|
749 | | - old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
---|
750 | | - old_link_speed = phy_info->link_info_old.link_speed; |
---|
751 | | - |
---|
752 | | - lport = pi->lport; |
---|
753 | | - if (ice_get_link_status(pi, &new_link)) { |
---|
754 | | - dev_dbg(&pf->pdev->dev, |
---|
755 | | - "Could not get link status for port %d\n", lport); |
---|
756 | | - return -EIO; |
---|
757 | | - } |
---|
758 | | - |
---|
759 | | - new_link_speed = phy_info->link_info.link_speed; |
---|
760 | | - |
---|
761 | | - new_link_same_as_old = (new_link == old_link && |
---|
762 | | - new_link_speed == old_link_speed); |
---|
763 | | - |
---|
764 | | - ice_for_each_vsi(pf, v) { |
---|
765 | | - struct ice_vsi *vsi = pf->vsi[v]; |
---|
766 | | - |
---|
767 | | - if (!vsi || !vsi->port_info) |
---|
768 | | - continue; |
---|
769 | | - |
---|
770 | | - if (new_link_same_as_old && |
---|
771 | | - (test_bit(__ICE_DOWN, vsi->state) || |
---|
772 | | - new_link == netif_carrier_ok(vsi->netdev))) |
---|
773 | | - continue; |
---|
774 | | - |
---|
775 | | - if (vsi->port_info->lport == lport) { |
---|
776 | | - ice_print_link_msg(vsi, new_link); |
---|
777 | | - ice_vsi_link_event(vsi, new_link); |
---|
778 | | - } |
---|
779 | 1001 | } |
---|
780 | 1002 | |
---|
781 | 1003 | return 0; |
---|
.. | .. |
---|
783 | 1005 | |
---|
784 | 1006 | /** |
---|
785 | 1007 | * ice_handle_link_event - handle link event via ARQ |
---|
786 | | - * @pf: pf that the link event is associated with |
---|
787 | | - * |
---|
788 | | - * Return -EINVAL if port_info is null |
---|
789 | | - * Return status on succes |
---|
| 1008 | + * @pf: PF that the link event is associated with |
---|
| 1009 | + * @event: event structure containing link status info |
---|
790 | 1010 | */ |
---|
791 | | -static int ice_handle_link_event(struct ice_pf *pf) |
---|
| 1011 | +static int |
---|
| 1012 | +ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) |
---|
792 | 1013 | { |
---|
| 1014 | + struct ice_aqc_get_link_status_data *link_data; |
---|
793 | 1015 | struct ice_port_info *port_info; |
---|
794 | 1016 | int status; |
---|
795 | 1017 | |
---|
| 1018 | + link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; |
---|
796 | 1019 | port_info = pf->hw.port_info; |
---|
797 | 1020 | if (!port_info) |
---|
798 | 1021 | return -EINVAL; |
---|
799 | 1022 | |
---|
800 | | - status = ice_link_event(pf, port_info); |
---|
| 1023 | + status = ice_link_event(pf, port_info, |
---|
| 1024 | + !!(link_data->link_info & ICE_AQ_LINK_UP), |
---|
| 1025 | + le16_to_cpu(link_data->link_speed)); |
---|
801 | 1026 | if (status) |
---|
802 | | - dev_dbg(&pf->pdev->dev, |
---|
803 | | - "Could not process link event, error %d\n", status); |
---|
| 1027 | + dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", |
---|
| 1028 | + status); |
---|
804 | 1029 | |
---|
805 | 1030 | return status; |
---|
| 1031 | +} |
---|
| 1032 | + |
---|
| 1033 | +enum ice_aq_task_state { |
---|
| 1034 | + ICE_AQ_TASK_WAITING = 0, |
---|
| 1035 | + ICE_AQ_TASK_COMPLETE, |
---|
| 1036 | + ICE_AQ_TASK_CANCELED, |
---|
| 1037 | +}; |
---|
| 1038 | + |
---|
| 1039 | +struct ice_aq_task { |
---|
| 1040 | + struct hlist_node entry; |
---|
| 1041 | + |
---|
| 1042 | + u16 opcode; |
---|
| 1043 | + struct ice_rq_event_info *event; |
---|
| 1044 | + enum ice_aq_task_state state; |
---|
| 1045 | +}; |
---|
| 1046 | + |
---|
| 1047 | +/** |
---|
| 1048 | + * ice_wait_for_aq_event - Wait for an AdminQ event from firmware |
---|
| 1049 | + * @pf: pointer to the PF private structure |
---|
| 1050 | + * @opcode: the opcode to wait for |
---|
| 1051 | + * @timeout: how long to wait, in jiffies |
---|
| 1052 | + * @event: storage for the event info |
---|
| 1053 | + * |
---|
| 1054 | + * Waits for a specific AdminQ completion event on the ARQ for a given PF. The |
---|
| 1055 | + * current thread will be put to sleep until the specified event occurs or |
---|
| 1056 | + * until the given timeout is reached. |
---|
| 1057 | + * |
---|
| 1058 | + * To obtain only the descriptor contents, pass an event without an allocated |
---|
| 1059 | + * msg_buf. If the complete data buffer is desired, allocate the |
---|
| 1060 | + * event->msg_buf with enough space ahead of time. |
---|
| 1061 | + * |
---|
| 1062 | + * Returns: zero on success, or a negative error code on failure. |
---|
| 1063 | + */ |
---|
| 1064 | +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, |
---|
| 1065 | + struct ice_rq_event_info *event) |
---|
| 1066 | +{ |
---|
| 1067 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 1068 | + struct ice_aq_task *task; |
---|
| 1069 | + unsigned long start; |
---|
| 1070 | + long ret; |
---|
| 1071 | + int err; |
---|
| 1072 | + |
---|
| 1073 | + task = kzalloc(sizeof(*task), GFP_KERNEL); |
---|
| 1074 | + if (!task) |
---|
| 1075 | + return -ENOMEM; |
---|
| 1076 | + |
---|
| 1077 | + INIT_HLIST_NODE(&task->entry); |
---|
| 1078 | + task->opcode = opcode; |
---|
| 1079 | + task->event = event; |
---|
| 1080 | + task->state = ICE_AQ_TASK_WAITING; |
---|
| 1081 | + |
---|
| 1082 | + spin_lock_bh(&pf->aq_wait_lock); |
---|
| 1083 | + hlist_add_head(&task->entry, &pf->aq_wait_list); |
---|
| 1084 | + spin_unlock_bh(&pf->aq_wait_lock); |
---|
| 1085 | + |
---|
| 1086 | + start = jiffies; |
---|
| 1087 | + |
---|
| 1088 | + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, |
---|
| 1089 | + timeout); |
---|
| 1090 | + switch (task->state) { |
---|
| 1091 | + case ICE_AQ_TASK_WAITING: |
---|
| 1092 | + err = ret < 0 ? ret : -ETIMEDOUT; |
---|
| 1093 | + break; |
---|
| 1094 | + case ICE_AQ_TASK_CANCELED: |
---|
| 1095 | + err = ret < 0 ? ret : -ECANCELED; |
---|
| 1096 | + break; |
---|
| 1097 | + case ICE_AQ_TASK_COMPLETE: |
---|
| 1098 | + err = ret < 0 ? ret : 0; |
---|
| 1099 | + break; |
---|
| 1100 | + default: |
---|
| 1101 | + WARN(1, "Unexpected AdminQ wait task state %u", task->state); |
---|
| 1102 | + err = -EINVAL; |
---|
| 1103 | + break; |
---|
| 1104 | + } |
---|
| 1105 | + |
---|
| 1106 | + dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", |
---|
| 1107 | + jiffies_to_msecs(jiffies - start), |
---|
| 1108 | + jiffies_to_msecs(timeout), |
---|
| 1109 | + opcode); |
---|
| 1110 | + |
---|
| 1111 | + spin_lock_bh(&pf->aq_wait_lock); |
---|
| 1112 | + hlist_del(&task->entry); |
---|
| 1113 | + spin_unlock_bh(&pf->aq_wait_lock); |
---|
| 1114 | + kfree(task); |
---|
| 1115 | + |
---|
| 1116 | + return err; |
---|
| 1117 | +} |
---|
| 1118 | + |
---|
| 1119 | +/** |
---|
| 1120 | + * ice_aq_check_events - Check if any thread is waiting for an AdminQ event |
---|
| 1121 | + * @pf: pointer to the PF private structure |
---|
| 1122 | + * @opcode: the opcode of the event |
---|
| 1123 | + * @event: the event to check |
---|
| 1124 | + * |
---|
| 1125 | + * Loops over the current list of pending threads waiting for an AdminQ event. |
---|
| 1126 | + * For each matching task, copy the contents of the event into the task |
---|
| 1127 | + * structure and wake up the thread. |
---|
| 1128 | + * |
---|
| 1129 | + * If multiple threads wait for the same opcode, they will all be woken up. |
---|
| 1130 | + * |
---|
| 1131 | + * Note that event->msg_buf will only be duplicated if the event has a buffer |
---|
| 1132 | + * with enough space already allocated. Otherwise, only the descriptor and |
---|
| 1133 | + * message length will be copied. |
---|
| 1134 | + * |
---|
| 1135 | + * Returns: true if an event was found, false otherwise |
---|
| 1136 | + */ |
---|
| 1137 | +static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, |
---|
| 1138 | + struct ice_rq_event_info *event) |
---|
| 1139 | +{ |
---|
| 1140 | + struct ice_aq_task *task; |
---|
| 1141 | + bool found = false; |
---|
| 1142 | + |
---|
| 1143 | + spin_lock_bh(&pf->aq_wait_lock); |
---|
| 1144 | + hlist_for_each_entry(task, &pf->aq_wait_list, entry) { |
---|
| 1145 | + if (task->state || task->opcode != opcode) |
---|
| 1146 | + continue; |
---|
| 1147 | + |
---|
| 1148 | + memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); |
---|
| 1149 | + task->event->msg_len = event->msg_len; |
---|
| 1150 | + |
---|
| 1151 | + /* Only copy the data buffer if a destination was set */ |
---|
| 1152 | + if (task->event->msg_buf && |
---|
| 1153 | + task->event->buf_len > event->buf_len) { |
---|
| 1154 | + memcpy(task->event->msg_buf, event->msg_buf, |
---|
| 1155 | + event->buf_len); |
---|
| 1156 | + task->event->buf_len = event->buf_len; |
---|
| 1157 | + } |
---|
| 1158 | + |
---|
| 1159 | + task->state = ICE_AQ_TASK_COMPLETE; |
---|
| 1160 | + found = true; |
---|
| 1161 | + } |
---|
| 1162 | + spin_unlock_bh(&pf->aq_wait_lock); |
---|
| 1163 | + |
---|
| 1164 | + if (found) |
---|
| 1165 | + wake_up(&pf->aq_wait_queue); |
---|
| 1166 | +} |
---|
| 1167 | + |
---|
| 1168 | +/** |
---|
| 1169 | + * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks |
---|
| 1170 | + * @pf: the PF private structure |
---|
| 1171 | + * |
---|
| 1172 | + * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. |
---|
| 1173 | + * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. |
---|
| 1174 | + */ |
---|
| 1175 | +static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) |
---|
| 1176 | +{ |
---|
| 1177 | + struct ice_aq_task *task; |
---|
| 1178 | + |
---|
| 1179 | + spin_lock_bh(&pf->aq_wait_lock); |
---|
| 1180 | + hlist_for_each_entry(task, &pf->aq_wait_list, entry) |
---|
| 1181 | + task->state = ICE_AQ_TASK_CANCELED; |
---|
| 1182 | + spin_unlock_bh(&pf->aq_wait_lock); |
---|
| 1183 | + |
---|
| 1184 | + wake_up(&pf->aq_wait_queue); |
---|
806 | 1185 | } |
---|
807 | 1186 | |
---|
808 | 1187 | /** |
---|
.. | .. |
---|
812 | 1191 | */ |
---|
813 | 1192 | static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) |
---|
814 | 1193 | { |
---|
| 1194 | + struct device *dev = ice_pf_to_dev(pf); |
---|
815 | 1195 | struct ice_rq_event_info event; |
---|
816 | 1196 | struct ice_hw *hw = &pf->hw; |
---|
817 | 1197 | struct ice_ctl_q_info *cq; |
---|
.. | .. |
---|
828 | 1208 | cq = &hw->adminq; |
---|
829 | 1209 | qtype = "Admin"; |
---|
830 | 1210 | break; |
---|
| 1211 | + case ICE_CTL_Q_MAILBOX: |
---|
| 1212 | + cq = &hw->mailboxq; |
---|
| 1213 | + qtype = "Mailbox"; |
---|
| 1214 | + break; |
---|
831 | 1215 | default: |
---|
832 | | - dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", |
---|
833 | | - q_type); |
---|
| 1216 | + dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); |
---|
834 | 1217 | return 0; |
---|
835 | 1218 | } |
---|
836 | 1219 | |
---|
.. | .. |
---|
842 | 1225 | PF_FW_ARQLEN_ARQCRIT_M)) { |
---|
843 | 1226 | oldval = val; |
---|
844 | 1227 | if (val & PF_FW_ARQLEN_ARQVFE_M) |
---|
845 | | - dev_dbg(&pf->pdev->dev, |
---|
846 | | - "%s Receive Queue VF Error detected\n", qtype); |
---|
| 1228 | + dev_dbg(dev, "%s Receive Queue VF Error detected\n", |
---|
| 1229 | + qtype); |
---|
847 | 1230 | if (val & PF_FW_ARQLEN_ARQOVFL_M) { |
---|
848 | | - dev_dbg(&pf->pdev->dev, |
---|
849 | | - "%s Receive Queue Overflow Error detected\n", |
---|
| 1231 | + dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", |
---|
850 | 1232 | qtype); |
---|
851 | 1233 | } |
---|
852 | 1234 | if (val & PF_FW_ARQLEN_ARQCRIT_M) |
---|
853 | | - dev_dbg(&pf->pdev->dev, |
---|
854 | | - "%s Receive Queue Critical Error detected\n", |
---|
| 1235 | + dev_dbg(dev, "%s Receive Queue Critical Error detected\n", |
---|
855 | 1236 | qtype); |
---|
856 | 1237 | val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | |
---|
857 | 1238 | PF_FW_ARQLEN_ARQCRIT_M); |
---|
.. | .. |
---|
864 | 1245 | PF_FW_ATQLEN_ATQCRIT_M)) { |
---|
865 | 1246 | oldval = val; |
---|
866 | 1247 | if (val & PF_FW_ATQLEN_ATQVFE_M) |
---|
867 | | - dev_dbg(&pf->pdev->dev, |
---|
868 | | - "%s Send Queue VF Error detected\n", qtype); |
---|
| 1248 | + dev_dbg(dev, "%s Send Queue VF Error detected\n", |
---|
| 1249 | + qtype); |
---|
869 | 1250 | if (val & PF_FW_ATQLEN_ATQOVFL_M) { |
---|
870 | | - dev_dbg(&pf->pdev->dev, |
---|
871 | | - "%s Send Queue Overflow Error detected\n", |
---|
| 1251 | + dev_dbg(dev, "%s Send Queue Overflow Error detected\n", |
---|
872 | 1252 | qtype); |
---|
873 | 1253 | } |
---|
874 | 1254 | if (val & PF_FW_ATQLEN_ATQCRIT_M) |
---|
875 | | - dev_dbg(&pf->pdev->dev, |
---|
876 | | - "%s Send Queue Critical Error detected\n", |
---|
| 1255 | + dev_dbg(dev, "%s Send Queue Critical Error detected\n", |
---|
877 | 1256 | qtype); |
---|
878 | 1257 | val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | |
---|
879 | 1258 | PF_FW_ATQLEN_ATQCRIT_M); |
---|
.. | .. |
---|
882 | 1261 | } |
---|
883 | 1262 | |
---|
884 | 1263 | event.buf_len = cq->rq_buf_size; |
---|
885 | | - event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, |
---|
886 | | - GFP_KERNEL); |
---|
| 1264 | + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); |
---|
887 | 1265 | if (!event.msg_buf) |
---|
888 | 1266 | return 0; |
---|
889 | 1267 | |
---|
.. | .. |
---|
895 | 1273 | if (ret == ICE_ERR_AQ_NO_WORK) |
---|
896 | 1274 | break; |
---|
897 | 1275 | if (ret) { |
---|
898 | | - dev_err(&pf->pdev->dev, |
---|
899 | | - "%s Receive Queue event error %d\n", qtype, |
---|
900 | | - ret); |
---|
| 1276 | + dev_err(dev, "%s Receive Queue event error %s\n", qtype, |
---|
| 1277 | + ice_stat_str(ret)); |
---|
901 | 1278 | break; |
---|
902 | 1279 | } |
---|
903 | 1280 | |
---|
904 | 1281 | opcode = le16_to_cpu(event.desc.opcode); |
---|
905 | 1282 | |
---|
| 1283 | + /* Notify any thread that might be waiting for this event */ |
---|
| 1284 | + ice_aq_check_events(pf, opcode, &event); |
---|
| 1285 | + |
---|
906 | 1286 | switch (opcode) { |
---|
907 | 1287 | case ice_aqc_opc_get_link_status: |
---|
908 | | - if (ice_handle_link_event(pf)) |
---|
909 | | - dev_err(&pf->pdev->dev, |
---|
910 | | - "Could not handle link event\n"); |
---|
| 1288 | + if (ice_handle_link_event(pf, &event)) |
---|
| 1289 | + dev_err(dev, "Could not handle link event\n"); |
---|
| 1290 | + break; |
---|
| 1291 | + case ice_aqc_opc_event_lan_overflow: |
---|
| 1292 | + ice_vf_lan_overflow_event(pf, &event); |
---|
| 1293 | + break; |
---|
| 1294 | + case ice_mbx_opc_send_msg_to_pf: |
---|
| 1295 | + ice_vc_process_vf_msg(pf, &event); |
---|
| 1296 | + break; |
---|
| 1297 | + case ice_aqc_opc_fw_logging: |
---|
| 1298 | + ice_output_fw_log(hw, &event.desc, event.msg_buf); |
---|
| 1299 | + break; |
---|
| 1300 | + case ice_aqc_opc_lldp_set_mib_change: |
---|
| 1301 | + ice_dcb_process_lldp_set_mib_change(pf, &event); |
---|
911 | 1302 | break; |
---|
912 | 1303 | default: |
---|
913 | | - dev_dbg(&pf->pdev->dev, |
---|
914 | | - "%s Receive Queue unknown event 0x%04x ignored\n", |
---|
| 1304 | + dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", |
---|
915 | 1305 | qtype, opcode); |
---|
916 | 1306 | break; |
---|
917 | 1307 | } |
---|
918 | 1308 | } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); |
---|
919 | 1309 | |
---|
920 | | - devm_kfree(&pf->pdev->dev, event.msg_buf); |
---|
| 1310 | + kfree(event.msg_buf); |
---|
921 | 1311 | |
---|
922 | 1312 | return pending && (i == ICE_DFLT_IRQ_WORK); |
---|
923 | 1313 | } |
---|
.. | .. |
---|
965 | 1355 | } |
---|
966 | 1356 | |
---|
967 | 1357 | /** |
---|
| 1358 | + * ice_clean_mailboxq_subtask - clean the MailboxQ rings |
---|
| 1359 | + * @pf: board private structure |
---|
| 1360 | + */ |
---|
| 1361 | +static void ice_clean_mailboxq_subtask(struct ice_pf *pf) |
---|
| 1362 | +{ |
---|
| 1363 | + struct ice_hw *hw = &pf->hw; |
---|
| 1364 | + |
---|
| 1365 | + if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) |
---|
| 1366 | + return; |
---|
| 1367 | + |
---|
| 1368 | + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) |
---|
| 1369 | + return; |
---|
| 1370 | + |
---|
| 1371 | + clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
---|
| 1372 | + |
---|
| 1373 | + if (ice_ctrlq_pending(hw, &hw->mailboxq)) |
---|
| 1374 | + __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); |
---|
| 1375 | + |
---|
| 1376 | + ice_flush(hw); |
---|
| 1377 | +} |
---|
| 1378 | + |
---|
| 1379 | +/** |
---|
968 | 1380 | * ice_service_task_schedule - schedule the service task to wake up |
---|
969 | 1381 | * @pf: board private structure |
---|
970 | 1382 | * |
---|
971 | 1383 | * If not already scheduled, this puts the task into the work queue. |
---|
972 | 1384 | */ |
---|
973 | | -static void ice_service_task_schedule(struct ice_pf *pf) |
---|
| 1385 | +void ice_service_task_schedule(struct ice_pf *pf) |
---|
974 | 1386 | { |
---|
975 | | - if (!test_bit(__ICE_DOWN, pf->state) && |
---|
976 | | - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) |
---|
| 1387 | + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && |
---|
| 1388 | + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && |
---|
| 1389 | + !test_bit(__ICE_NEEDS_RESTART, pf->state)) |
---|
977 | 1390 | queue_work(ice_wq, &pf->serv_task); |
---|
978 | 1391 | } |
---|
979 | 1392 | |
---|
.. | .. |
---|
991 | 1404 | } |
---|
992 | 1405 | |
---|
993 | 1406 | /** |
---|
| 1407 | + * ice_service_task_stop - stop service task and cancel works |
---|
| 1408 | + * @pf: board private structure |
---|
| 1409 | + * |
---|
| 1410 | + * Return 0 if the __ICE_SERVICE_DIS bit was not already set, |
---|
| 1411 | + * 1 otherwise. |
---|
| 1412 | + */ |
---|
| 1413 | +static int ice_service_task_stop(struct ice_pf *pf) |
---|
| 1414 | +{ |
---|
| 1415 | + int ret; |
---|
| 1416 | + |
---|
| 1417 | + ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); |
---|
| 1418 | + |
---|
| 1419 | + if (pf->serv_tmr.function) |
---|
| 1420 | + del_timer_sync(&pf->serv_tmr); |
---|
| 1421 | + if (pf->serv_task.func) |
---|
| 1422 | + cancel_work_sync(&pf->serv_task); |
---|
| 1423 | + |
---|
| 1424 | + clear_bit(__ICE_SERVICE_SCHED, pf->state); |
---|
| 1425 | + return ret; |
---|
| 1426 | +} |
---|
| 1427 | + |
---|
| 1428 | +/** |
---|
| 1429 | + * ice_service_task_restart - restart service task and schedule works |
---|
| 1430 | + * @pf: board private structure |
---|
| 1431 | + * |
---|
| 1432 | + * This function is needed for suspend and resume works (e.g WoL scenario) |
---|
| 1433 | + */ |
---|
| 1434 | +static void ice_service_task_restart(struct ice_pf *pf) |
---|
| 1435 | +{ |
---|
| 1436 | + clear_bit(__ICE_SERVICE_DIS, pf->state); |
---|
| 1437 | + ice_service_task_schedule(pf); |
---|
| 1438 | +} |
---|
| 1439 | + |
---|
| 1440 | +/** |
---|
994 | 1441 | * ice_service_timer - timer callback to schedule service task |
---|
995 | 1442 | * @t: pointer to timer_list |
---|
996 | 1443 | */ |
---|
.. | .. |
---|
1000 | 1447 | |
---|
1001 | 1448 | mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); |
---|
1002 | 1449 | ice_service_task_schedule(pf); |
---|
| 1450 | +} |
---|
| 1451 | + |
---|
| 1452 | +/** |
---|
| 1453 | + * ice_handle_mdd_event - handle malicious driver detect event |
---|
| 1454 | + * @pf: pointer to the PF structure |
---|
| 1455 | + * |
---|
| 1456 | + * Called from service task. OICR interrupt handler indicates MDD event. |
---|
| 1457 | + * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log |
---|
| 1458 | + * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events |
---|
| 1459 | + * disable the queue, the PF can be configured to reset the VF using ethtool |
---|
| 1460 | + * private flag mdd-auto-reset-vf. |
---|
| 1461 | + */ |
---|
| 1462 | +static void ice_handle_mdd_event(struct ice_pf *pf) |
---|
| 1463 | +{ |
---|
| 1464 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 1465 | + struct ice_hw *hw = &pf->hw; |
---|
| 1466 | + unsigned int i; |
---|
| 1467 | + u32 reg; |
---|
| 1468 | + |
---|
| 1469 | + if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { |
---|
| 1470 | + /* Since the VF MDD event logging is rate limited, check if |
---|
| 1471 | + * there are pending MDD events. |
---|
| 1472 | + */ |
---|
| 1473 | + ice_print_vfs_mdd_events(pf); |
---|
| 1474 | + return; |
---|
| 1475 | + } |
---|
| 1476 | + |
---|
| 1477 | + /* find what triggered an MDD event */ |
---|
| 1478 | + reg = rd32(hw, GL_MDET_TX_PQM); |
---|
| 1479 | + if (reg & GL_MDET_TX_PQM_VALID_M) { |
---|
| 1480 | + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> |
---|
| 1481 | + GL_MDET_TX_PQM_PF_NUM_S; |
---|
| 1482 | + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> |
---|
| 1483 | + GL_MDET_TX_PQM_VF_NUM_S; |
---|
| 1484 | + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> |
---|
| 1485 | + GL_MDET_TX_PQM_MAL_TYPE_S; |
---|
| 1486 | + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> |
---|
| 1487 | + GL_MDET_TX_PQM_QNUM_S); |
---|
| 1488 | + |
---|
| 1489 | + if (netif_msg_tx_err(pf)) |
---|
| 1490 | + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
---|
| 1491 | + event, queue, pf_num, vf_num); |
---|
| 1492 | + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); |
---|
| 1493 | + } |
---|
| 1494 | + |
---|
| 1495 | + reg = rd32(hw, GL_MDET_TX_TCLAN); |
---|
| 1496 | + if (reg & GL_MDET_TX_TCLAN_VALID_M) { |
---|
| 1497 | + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> |
---|
| 1498 | + GL_MDET_TX_TCLAN_PF_NUM_S; |
---|
| 1499 | + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> |
---|
| 1500 | + GL_MDET_TX_TCLAN_VF_NUM_S; |
---|
| 1501 | + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> |
---|
| 1502 | + GL_MDET_TX_TCLAN_MAL_TYPE_S; |
---|
| 1503 | + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> |
---|
| 1504 | + GL_MDET_TX_TCLAN_QNUM_S); |
---|
| 1505 | + |
---|
| 1506 | + if (netif_msg_tx_err(pf)) |
---|
| 1507 | + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
---|
| 1508 | + event, queue, pf_num, vf_num); |
---|
| 1509 | + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); |
---|
| 1510 | + } |
---|
| 1511 | + |
---|
| 1512 | + reg = rd32(hw, GL_MDET_RX); |
---|
| 1513 | + if (reg & GL_MDET_RX_VALID_M) { |
---|
| 1514 | + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> |
---|
| 1515 | + GL_MDET_RX_PF_NUM_S; |
---|
| 1516 | + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> |
---|
| 1517 | + GL_MDET_RX_VF_NUM_S; |
---|
| 1518 | + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> |
---|
| 1519 | + GL_MDET_RX_MAL_TYPE_S; |
---|
| 1520 | + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> |
---|
| 1521 | + GL_MDET_RX_QNUM_S); |
---|
| 1522 | + |
---|
| 1523 | + if (netif_msg_rx_err(pf)) |
---|
| 1524 | + dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", |
---|
| 1525 | + event, queue, pf_num, vf_num); |
---|
| 1526 | + wr32(hw, GL_MDET_RX, 0xffffffff); |
---|
| 1527 | + } |
---|
| 1528 | + |
---|
| 1529 | + /* check to see if this PF caused an MDD event */ |
---|
| 1530 | + reg = rd32(hw, PF_MDET_TX_PQM); |
---|
| 1531 | + if (reg & PF_MDET_TX_PQM_VALID_M) { |
---|
| 1532 | + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); |
---|
| 1533 | + if (netif_msg_tx_err(pf)) |
---|
| 1534 | + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); |
---|
| 1535 | + } |
---|
| 1536 | + |
---|
| 1537 | + reg = rd32(hw, PF_MDET_TX_TCLAN); |
---|
| 1538 | + if (reg & PF_MDET_TX_TCLAN_VALID_M) { |
---|
| 1539 | + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); |
---|
| 1540 | + if (netif_msg_tx_err(pf)) |
---|
| 1541 | + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); |
---|
| 1542 | + } |
---|
| 1543 | + |
---|
| 1544 | + reg = rd32(hw, PF_MDET_RX); |
---|
| 1545 | + if (reg & PF_MDET_RX_VALID_M) { |
---|
| 1546 | + wr32(hw, PF_MDET_RX, 0xFFFF); |
---|
| 1547 | + if (netif_msg_rx_err(pf)) |
---|
| 1548 | + dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); |
---|
| 1549 | + } |
---|
| 1550 | + |
---|
| 1551 | + /* Check to see if one of the VFs caused an MDD event, and then |
---|
| 1552 | + * increment counters and set print pending |
---|
| 1553 | + */ |
---|
| 1554 | + ice_for_each_vf(pf, i) { |
---|
| 1555 | + struct ice_vf *vf = &pf->vf[i]; |
---|
| 1556 | + |
---|
| 1557 | + reg = rd32(hw, VP_MDET_TX_PQM(i)); |
---|
| 1558 | + if (reg & VP_MDET_TX_PQM_VALID_M) { |
---|
| 1559 | + wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); |
---|
| 1560 | + vf->mdd_tx_events.count++; |
---|
| 1561 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
---|
| 1562 | + if (netif_msg_tx_err(pf)) |
---|
| 1563 | + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", |
---|
| 1564 | + i); |
---|
| 1565 | + } |
---|
| 1566 | + |
---|
| 1567 | + reg = rd32(hw, VP_MDET_TX_TCLAN(i)); |
---|
| 1568 | + if (reg & VP_MDET_TX_TCLAN_VALID_M) { |
---|
| 1569 | + wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); |
---|
| 1570 | + vf->mdd_tx_events.count++; |
---|
| 1571 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
---|
| 1572 | + if (netif_msg_tx_err(pf)) |
---|
| 1573 | + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", |
---|
| 1574 | + i); |
---|
| 1575 | + } |
---|
| 1576 | + |
---|
| 1577 | + reg = rd32(hw, VP_MDET_TX_TDPU(i)); |
---|
| 1578 | + if (reg & VP_MDET_TX_TDPU_VALID_M) { |
---|
| 1579 | + wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); |
---|
| 1580 | + vf->mdd_tx_events.count++; |
---|
| 1581 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
---|
| 1582 | + if (netif_msg_tx_err(pf)) |
---|
| 1583 | + dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", |
---|
| 1584 | + i); |
---|
| 1585 | + } |
---|
| 1586 | + |
---|
| 1587 | + reg = rd32(hw, VP_MDET_RX(i)); |
---|
| 1588 | + if (reg & VP_MDET_RX_VALID_M) { |
---|
| 1589 | + wr32(hw, VP_MDET_RX(i), 0xFFFF); |
---|
| 1590 | + vf->mdd_rx_events.count++; |
---|
| 1591 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
---|
| 1592 | + if (netif_msg_rx_err(pf)) |
---|
| 1593 | + dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", |
---|
| 1594 | + i); |
---|
| 1595 | + |
---|
| 1596 | + /* Since the queue is disabled on VF Rx MDD events, the |
---|
| 1597 | + * PF can be configured to reset the VF through ethtool |
---|
| 1598 | + * private flag mdd-auto-reset-vf. |
---|
| 1599 | + */ |
---|
| 1600 | + if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { |
---|
| 1601 | + /* VF MDD event counters will be cleared by |
---|
| 1602 | + * reset, so print the event prior to reset. |
---|
| 1603 | + */ |
---|
| 1604 | + ice_print_vf_rx_mdd_event(vf); |
---|
| 1605 | + mutex_lock(&pf->vf[i].cfg_lock); |
---|
| 1606 | + ice_reset_vf(&pf->vf[i], false); |
---|
| 1607 | + mutex_unlock(&pf->vf[i].cfg_lock); |
---|
| 1608 | + } |
---|
| 1609 | + } |
---|
| 1610 | + } |
---|
| 1611 | + |
---|
| 1612 | + ice_print_vfs_mdd_events(pf); |
---|
| 1613 | +} |
---|
| 1614 | + |
---|
| 1615 | +/** |
---|
| 1616 | + * ice_force_phys_link_state - Force the physical link state |
---|
| 1617 | + * @vsi: VSI to force the physical link state to up/down |
---|
| 1618 | + * @link_up: true/false indicates to set the physical link to up/down |
---|
| 1619 | + * |
---|
| 1620 | + * Force the physical link state by getting the current PHY capabilities from |
---|
| 1621 | + * hardware and setting the PHY config based on the determined capabilities. If |
---|
| 1622 | + * link changes a link event will be triggered because both the Enable Automatic |
---|
| 1623 | + * Link Update and LESM Enable bits are set when setting the PHY capabilities. |
---|
| 1624 | + * |
---|
| 1625 | + * Returns 0 on success, negative on failure |
---|
| 1626 | + */ |
---|
| 1627 | +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) |
---|
| 1628 | +{ |
---|
| 1629 | + struct ice_aqc_get_phy_caps_data *pcaps; |
---|
| 1630 | + struct ice_aqc_set_phy_cfg_data *cfg; |
---|
| 1631 | + struct ice_port_info *pi; |
---|
| 1632 | + struct device *dev; |
---|
| 1633 | + int retcode; |
---|
| 1634 | + |
---|
| 1635 | + if (!vsi || !vsi->port_info || !vsi->back) |
---|
| 1636 | + return -EINVAL; |
---|
| 1637 | + if (vsi->type != ICE_VSI_PF) |
---|
| 1638 | + return 0; |
---|
| 1639 | + |
---|
| 1640 | + dev = ice_pf_to_dev(vsi->back); |
---|
| 1641 | + |
---|
| 1642 | + pi = vsi->port_info; |
---|
| 1643 | + |
---|
| 1644 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
---|
| 1645 | + if (!pcaps) |
---|
| 1646 | + return -ENOMEM; |
---|
| 1647 | + |
---|
| 1648 | + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
---|
| 1649 | + NULL); |
---|
| 1650 | + if (retcode) { |
---|
| 1651 | + dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", |
---|
| 1652 | + vsi->vsi_num, retcode); |
---|
| 1653 | + retcode = -EIO; |
---|
| 1654 | + goto out; |
---|
| 1655 | + } |
---|
| 1656 | + |
---|
| 1657 | + /* No change in link */ |
---|
| 1658 | + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && |
---|
| 1659 | + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) |
---|
| 1660 | + goto out; |
---|
| 1661 | + |
---|
| 1662 | + /* Use the current user PHY configuration. The current user PHY |
---|
| 1663 | + * configuration is initialized during probe from PHY capabilities |
---|
| 1664 | + * software mode, and updated on set PHY configuration. |
---|
| 1665 | + */ |
---|
| 1666 | + cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); |
---|
| 1667 | + if (!cfg) { |
---|
| 1668 | + retcode = -ENOMEM; |
---|
| 1669 | + goto out; |
---|
| 1670 | + } |
---|
| 1671 | + |
---|
| 1672 | + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; |
---|
| 1673 | + if (link_up) |
---|
| 1674 | + cfg->caps |= ICE_AQ_PHY_ENA_LINK; |
---|
| 1675 | + else |
---|
| 1676 | + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; |
---|
| 1677 | + |
---|
| 1678 | + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
---|
| 1679 | + if (retcode) { |
---|
| 1680 | + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", |
---|
| 1681 | + vsi->vsi_num, retcode); |
---|
| 1682 | + retcode = -EIO; |
---|
| 1683 | + } |
---|
| 1684 | + |
---|
| 1685 | + kfree(cfg); |
---|
| 1686 | +out: |
---|
| 1687 | + kfree(pcaps); |
---|
| 1688 | + return retcode; |
---|
| 1689 | +} |
---|
| 1690 | + |
---|
| 1691 | +/** |
---|
| 1692 | + * ice_init_nvm_phy_type - Initialize the NVM PHY type |
---|
| 1693 | + * @pi: port info structure |
---|
| 1694 | + * |
---|
| 1695 | + * Initialize nvm_phy_type_[low|high] for link lenient mode support |
---|
| 1696 | + */ |
---|
| 1697 | +static int ice_init_nvm_phy_type(struct ice_port_info *pi) |
---|
| 1698 | +{ |
---|
| 1699 | + struct ice_aqc_get_phy_caps_data *pcaps; |
---|
| 1700 | + struct ice_pf *pf = pi->hw->back; |
---|
| 1701 | + enum ice_status status; |
---|
| 1702 | + int err = 0; |
---|
| 1703 | + |
---|
| 1704 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
---|
| 1705 | + if (!pcaps) |
---|
| 1706 | + return -ENOMEM; |
---|
| 1707 | + |
---|
| 1708 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, |
---|
| 1709 | + NULL); |
---|
| 1710 | + |
---|
| 1711 | + if (status) { |
---|
| 1712 | + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
---|
| 1713 | + err = -EIO; |
---|
| 1714 | + goto out; |
---|
| 1715 | + } |
---|
| 1716 | + |
---|
| 1717 | + pf->nvm_phy_type_hi = pcaps->phy_type_high; |
---|
| 1718 | + pf->nvm_phy_type_lo = pcaps->phy_type_low; |
---|
| 1719 | + |
---|
| 1720 | +out: |
---|
| 1721 | + kfree(pcaps); |
---|
| 1722 | + return err; |
---|
| 1723 | +} |
---|
| 1724 | + |
---|
| 1725 | +/** |
---|
| 1726 | + * ice_init_link_dflt_override - Initialize link default override |
---|
| 1727 | + * @pi: port info structure |
---|
| 1728 | + * |
---|
| 1729 | + * Initialize link default override and PHY total port shutdown during probe |
---|
| 1730 | + */ |
---|
| 1731 | +static void ice_init_link_dflt_override(struct ice_port_info *pi) |
---|
| 1732 | +{ |
---|
| 1733 | + struct ice_link_default_override_tlv *ldo; |
---|
| 1734 | + struct ice_pf *pf = pi->hw->back; |
---|
| 1735 | + |
---|
| 1736 | + ldo = &pf->link_dflt_override; |
---|
| 1737 | + if (ice_get_link_default_override(ldo, pi)) |
---|
| 1738 | + return; |
---|
| 1739 | + |
---|
| 1740 | + if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) |
---|
| 1741 | + return; |
---|
| 1742 | + |
---|
| 1743 | + /* Enable Total Port Shutdown (override/replace link-down-on-close |
---|
| 1744 | + * ethtool private flag) for ports with Port Disable bit set. |
---|
| 1745 | + */ |
---|
| 1746 | + set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); |
---|
| 1747 | + set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); |
---|
| 1748 | +} |
---|
| 1749 | + |
---|
| 1750 | +/** |
---|
| 1751 | + * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings |
---|
| 1752 | + * @pi: port info structure |
---|
| 1753 | + * |
---|
| 1754 | + * If default override is enabled, initialized the user PHY cfg speed and FEC |
---|
| 1755 | + * settings using the default override mask from the NVM. |
---|
| 1756 | + * |
---|
| 1757 | + * The PHY should only be configured with the default override settings the |
---|
| 1758 | + * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state |
---|
| 1759 | + * is used to indicate that the user PHY cfg default override is initialized |
---|
| 1760 | + * and the PHY has not been configured with the default override settings. The |
---|
| 1761 | + * state is set here, and cleared in ice_configure_phy the first time the PHY is |
---|
| 1762 | + * configured. |
---|
| 1763 | + */ |
---|
| 1764 | +static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) |
---|
| 1765 | +{ |
---|
| 1766 | + struct ice_link_default_override_tlv *ldo; |
---|
| 1767 | + struct ice_aqc_set_phy_cfg_data *cfg; |
---|
| 1768 | + struct ice_phy_info *phy = &pi->phy; |
---|
| 1769 | + struct ice_pf *pf = pi->hw->back; |
---|
| 1770 | + |
---|
| 1771 | + ldo = &pf->link_dflt_override; |
---|
| 1772 | + |
---|
| 1773 | + /* If link default override is enabled, use to mask NVM PHY capabilities |
---|
| 1774 | + * for speed and FEC default configuration. |
---|
| 1775 | + */ |
---|
| 1776 | + cfg = &phy->curr_user_phy_cfg; |
---|
| 1777 | + |
---|
| 1778 | + if (ldo->phy_type_low || ldo->phy_type_high) { |
---|
| 1779 | + cfg->phy_type_low = pf->nvm_phy_type_lo & |
---|
| 1780 | + cpu_to_le64(ldo->phy_type_low); |
---|
| 1781 | + cfg->phy_type_high = pf->nvm_phy_type_hi & |
---|
| 1782 | + cpu_to_le64(ldo->phy_type_high); |
---|
| 1783 | + } |
---|
| 1784 | + cfg->link_fec_opt = ldo->fec_options; |
---|
| 1785 | + phy->curr_user_fec_req = ICE_FEC_AUTO; |
---|
| 1786 | + |
---|
| 1787 | + set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); |
---|
| 1788 | +} |
---|
| 1789 | + |
---|
| 1790 | +/** |
---|
| 1791 | + * ice_init_phy_user_cfg - Initialize the PHY user configuration |
---|
| 1792 | + * @pi: port info structure |
---|
| 1793 | + * |
---|
| 1794 | + * Initialize the current user PHY configuration, speed, FEC, and FC requested |
---|
| 1795 | + * mode to default. The PHY defaults are from get PHY capabilities topology |
---|
| 1796 | + * with media so call when media is first available. An error is returned if |
---|
| 1797 | + * called when media is not available. The PHY initialization completed state is |
---|
| 1798 | + * set here. |
---|
| 1799 | + * |
---|
| 1800 | + * These configurations are used when setting PHY |
---|
| 1801 | + * configuration. The user PHY configuration is updated on set PHY |
---|
| 1802 | + * configuration. Returns 0 on success, negative on failure |
---|
| 1803 | + */ |
---|
| 1804 | +static int ice_init_phy_user_cfg(struct ice_port_info *pi) |
---|
| 1805 | +{ |
---|
| 1806 | + struct ice_aqc_get_phy_caps_data *pcaps; |
---|
| 1807 | + struct ice_phy_info *phy = &pi->phy; |
---|
| 1808 | + struct ice_pf *pf = pi->hw->back; |
---|
| 1809 | + enum ice_status status; |
---|
| 1810 | + struct ice_vsi *vsi; |
---|
| 1811 | + int err = 0; |
---|
| 1812 | + |
---|
| 1813 | + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
---|
| 1814 | + return -EIO; |
---|
| 1815 | + |
---|
| 1816 | + vsi = ice_get_main_vsi(pf); |
---|
| 1817 | + if (!vsi) |
---|
| 1818 | + return -EINVAL; |
---|
| 1819 | + |
---|
| 1820 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
---|
| 1821 | + if (!pcaps) |
---|
| 1822 | + return -ENOMEM; |
---|
| 1823 | + |
---|
| 1824 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, |
---|
| 1825 | + NULL); |
---|
| 1826 | + if (status) { |
---|
| 1827 | + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
---|
| 1828 | + err = -EIO; |
---|
| 1829 | + goto err_out; |
---|
| 1830 | + } |
---|
| 1831 | + |
---|
| 1832 | + ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); |
---|
| 1833 | + |
---|
| 1834 | + /* check if lenient mode is supported and enabled */ |
---|
| 1835 | + if (ice_fw_supports_link_override(&vsi->back->hw) && |
---|
| 1836 | + !(pcaps->module_compliance_enforcement & |
---|
| 1837 | + ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { |
---|
| 1838 | + set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); |
---|
| 1839 | + |
---|
| 1840 | + /* if link default override is enabled, initialize user PHY |
---|
| 1841 | + * configuration with link default override values |
---|
| 1842 | + */ |
---|
| 1843 | + if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) { |
---|
| 1844 | + ice_init_phy_cfg_dflt_override(pi); |
---|
| 1845 | + goto out; |
---|
| 1846 | + } |
---|
| 1847 | + } |
---|
| 1848 | + |
---|
| 1849 | + /* if link default override is not enabled, initialize PHY using |
---|
| 1850 | + * topology with media |
---|
| 1851 | + */ |
---|
| 1852 | + phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, |
---|
| 1853 | + pcaps->link_fec_options); |
---|
| 1854 | + phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); |
---|
| 1855 | + |
---|
| 1856 | +out: |
---|
| 1857 | + phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; |
---|
| 1858 | + set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); |
---|
| 1859 | +err_out: |
---|
| 1860 | + kfree(pcaps); |
---|
| 1861 | + return err; |
---|
| 1862 | +} |
---|
| 1863 | + |
---|
| 1864 | +/** |
---|
| 1865 | + * ice_configure_phy - configure PHY |
---|
| 1866 | + * @vsi: VSI of PHY |
---|
| 1867 | + * |
---|
| 1868 | + * Set the PHY configuration. If the current PHY configuration is the same as |
---|
| 1869 | + * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise |
---|
| 1870 | + * configure the based get PHY capabilities for topology with media. |
---|
| 1871 | + */ |
---|
| 1872 | +static int ice_configure_phy(struct ice_vsi *vsi) |
---|
| 1873 | +{ |
---|
| 1874 | + struct device *dev = ice_pf_to_dev(vsi->back); |
---|
| 1875 | + struct ice_aqc_get_phy_caps_data *pcaps; |
---|
| 1876 | + struct ice_aqc_set_phy_cfg_data *cfg; |
---|
| 1877 | + struct ice_port_info *pi; |
---|
| 1878 | + enum ice_status status; |
---|
| 1879 | + int err = 0; |
---|
| 1880 | + |
---|
| 1881 | + pi = vsi->port_info; |
---|
| 1882 | + if (!pi) |
---|
| 1883 | + return -EINVAL; |
---|
| 1884 | + |
---|
| 1885 | + /* Ensure we have media as we cannot configure a medialess port */ |
---|
| 1886 | + if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
---|
| 1887 | + return -EPERM; |
---|
| 1888 | + |
---|
| 1889 | + ice_print_topo_conflict(vsi); |
---|
| 1890 | + |
---|
| 1891 | + if (vsi->port_info->phy.link_info.topo_media_conflict == |
---|
| 1892 | + ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) |
---|
| 1893 | + return -EPERM; |
---|
| 1894 | + |
---|
| 1895 | + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
---|
| 1896 | + return ice_force_phys_link_state(vsi, true); |
---|
| 1897 | + |
---|
| 1898 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
---|
| 1899 | + if (!pcaps) |
---|
| 1900 | + return -ENOMEM; |
---|
| 1901 | + |
---|
| 1902 | + /* Get current PHY config */ |
---|
| 1903 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
---|
| 1904 | + NULL); |
---|
| 1905 | + if (status) { |
---|
| 1906 | + dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", |
---|
| 1907 | + vsi->vsi_num, ice_stat_str(status)); |
---|
| 1908 | + err = -EIO; |
---|
| 1909 | + goto done; |
---|
| 1910 | + } |
---|
| 1911 | + |
---|
| 1912 | + /* If PHY enable link is configured and configuration has not changed, |
---|
| 1913 | + * there's nothing to do |
---|
| 1914 | + */ |
---|
| 1915 | + if (pcaps->caps & ICE_AQC_PHY_EN_LINK && |
---|
| 1916 | + ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg)) |
---|
| 1917 | + goto done; |
---|
| 1918 | + |
---|
| 1919 | + /* Use PHY topology as baseline for configuration */ |
---|
| 1920 | + memset(pcaps, 0, sizeof(*pcaps)); |
---|
| 1921 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, |
---|
| 1922 | + NULL); |
---|
| 1923 | + if (status) { |
---|
| 1924 | + dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", |
---|
| 1925 | + vsi->vsi_num, ice_stat_str(status)); |
---|
| 1926 | + err = -EIO; |
---|
| 1927 | + goto done; |
---|
| 1928 | + } |
---|
| 1929 | + |
---|
| 1930 | + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
---|
| 1931 | + if (!cfg) { |
---|
| 1932 | + err = -ENOMEM; |
---|
| 1933 | + goto done; |
---|
| 1934 | + } |
---|
| 1935 | + |
---|
| 1936 | + ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); |
---|
| 1937 | + |
---|
| 1938 | + /* Speed - If default override pending, use curr_user_phy_cfg set in |
---|
| 1939 | + * ice_init_phy_user_cfg_ldo. |
---|
| 1940 | + */ |
---|
| 1941 | + if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
---|
| 1942 | + vsi->back->state)) { |
---|
| 1943 | + cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low; |
---|
| 1944 | + cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high; |
---|
| 1945 | + } else { |
---|
| 1946 | + u64 phy_low = 0, phy_high = 0; |
---|
| 1947 | + |
---|
| 1948 | + ice_update_phy_type(&phy_low, &phy_high, |
---|
| 1949 | + pi->phy.curr_user_speed_req); |
---|
| 1950 | + cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); |
---|
| 1951 | + cfg->phy_type_high = pcaps->phy_type_high & |
---|
| 1952 | + cpu_to_le64(phy_high); |
---|
| 1953 | + } |
---|
| 1954 | + |
---|
| 1955 | + /* Can't provide what was requested; use PHY capabilities */ |
---|
| 1956 | + if (!cfg->phy_type_low && !cfg->phy_type_high) { |
---|
| 1957 | + cfg->phy_type_low = pcaps->phy_type_low; |
---|
| 1958 | + cfg->phy_type_high = pcaps->phy_type_high; |
---|
| 1959 | + } |
---|
| 1960 | + |
---|
| 1961 | + /* FEC */ |
---|
| 1962 | + ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); |
---|
| 1963 | + |
---|
| 1964 | + /* Can't provide what was requested; use PHY capabilities */ |
---|
| 1965 | + if (cfg->link_fec_opt != |
---|
| 1966 | + (cfg->link_fec_opt & pcaps->link_fec_options)) { |
---|
| 1967 | + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; |
---|
| 1968 | + cfg->link_fec_opt = pcaps->link_fec_options; |
---|
| 1969 | + } |
---|
| 1970 | + |
---|
| 1971 | + /* Flow Control - always supported; no need to check against |
---|
| 1972 | + * capabilities |
---|
| 1973 | + */ |
---|
| 1974 | + ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req); |
---|
| 1975 | + |
---|
| 1976 | + /* Enable link and link update */ |
---|
| 1977 | + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; |
---|
| 1978 | + |
---|
| 1979 | + status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
---|
| 1980 | + if (status) { |
---|
| 1981 | + dev_err(dev, "Failed to set phy config, VSI %d error %s\n", |
---|
| 1982 | + vsi->vsi_num, ice_stat_str(status)); |
---|
| 1983 | + err = -EIO; |
---|
| 1984 | + } |
---|
| 1985 | + |
---|
| 1986 | + kfree(cfg); |
---|
| 1987 | +done: |
---|
| 1988 | + kfree(pcaps); |
---|
| 1989 | + return err; |
---|
| 1990 | +} |
---|
| 1991 | + |
---|
| 1992 | +/** |
---|
| 1993 | + * ice_check_media_subtask - Check for media |
---|
| 1994 | + * @pf: pointer to PF struct |
---|
| 1995 | + * |
---|
| 1996 | + * If media is available, then initialize PHY user configuration if it is not |
---|
| 1997 | + * been, and configure the PHY if the interface is up. |
---|
| 1998 | + */ |
---|
| 1999 | +static void ice_check_media_subtask(struct ice_pf *pf) |
---|
| 2000 | +{ |
---|
| 2001 | + struct ice_port_info *pi; |
---|
| 2002 | + struct ice_vsi *vsi; |
---|
| 2003 | + int err; |
---|
| 2004 | + |
---|
| 2005 | + /* No need to check for media if it's already present */ |
---|
| 2006 | + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) |
---|
| 2007 | + return; |
---|
| 2008 | + |
---|
| 2009 | + vsi = ice_get_main_vsi(pf); |
---|
| 2010 | + if (!vsi) |
---|
| 2011 | + return; |
---|
| 2012 | + |
---|
| 2013 | + /* Refresh link info and check if media is present */ |
---|
| 2014 | + pi = vsi->port_info; |
---|
| 2015 | + err = ice_update_link_info(pi); |
---|
| 2016 | + if (err) |
---|
| 2017 | + return; |
---|
| 2018 | + |
---|
| 2019 | + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
---|
| 2020 | + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) |
---|
| 2021 | + ice_init_phy_user_cfg(pi); |
---|
| 2022 | + |
---|
| 2023 | + /* PHY settings are reset on media insertion, reconfigure |
---|
| 2024 | + * PHY to preserve settings. |
---|
| 2025 | + */ |
---|
| 2026 | + if (test_bit(__ICE_DOWN, vsi->state) && |
---|
| 2027 | + test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
---|
| 2028 | + return; |
---|
| 2029 | + |
---|
| 2030 | + err = ice_configure_phy(vsi); |
---|
| 2031 | + if (!err) |
---|
| 2032 | + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
---|
| 2033 | + |
---|
| 2034 | + /* A Link Status Event will be generated; the event handler |
---|
| 2035 | + * will complete bringing the interface up |
---|
| 2036 | + */ |
---|
| 2037 | + } |
---|
1003 | 2038 | } |
---|
1004 | 2039 | |
---|
1005 | 2040 | /** |
---|
.. | .. |
---|
1016 | 2051 | /* process reset requests first */ |
---|
1017 | 2052 | ice_reset_subtask(pf); |
---|
1018 | 2053 | |
---|
1019 | | - /* bail if a reset/recovery cycle is pending */ |
---|
1020 | | - if (ice_is_reset_recovery_pending(pf->state) || |
---|
1021 | | - test_bit(__ICE_SUSPENDED, pf->state)) { |
---|
| 2054 | + /* bail if a reset/recovery cycle is pending or rebuild failed */ |
---|
| 2055 | + if (ice_is_reset_in_progress(pf->state) || |
---|
| 2056 | + test_bit(__ICE_SUSPENDED, pf->state) || |
---|
| 2057 | + test_bit(__ICE_NEEDS_RESTART, pf->state)) { |
---|
1022 | 2058 | ice_service_task_complete(pf); |
---|
1023 | 2059 | return; |
---|
1024 | 2060 | } |
---|
1025 | 2061 | |
---|
1026 | | - ice_sync_fltr_subtask(pf); |
---|
1027 | | - ice_watchdog_subtask(pf); |
---|
1028 | 2062 | ice_clean_adminq_subtask(pf); |
---|
| 2063 | + ice_check_media_subtask(pf); |
---|
| 2064 | + ice_check_for_hang_subtask(pf); |
---|
| 2065 | + ice_sync_fltr_subtask(pf); |
---|
| 2066 | + ice_handle_mdd_event(pf); |
---|
| 2067 | + ice_watchdog_subtask(pf); |
---|
1029 | 2068 | |
---|
| 2069 | + if (ice_is_safe_mode(pf)) { |
---|
| 2070 | + ice_service_task_complete(pf); |
---|
| 2071 | + return; |
---|
| 2072 | + } |
---|
| 2073 | + |
---|
| 2074 | + ice_process_vflr_event(pf); |
---|
| 2075 | + ice_clean_mailboxq_subtask(pf); |
---|
| 2076 | + ice_sync_arfs_fltrs(pf); |
---|
1030 | 2077 | /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ |
---|
1031 | 2078 | ice_service_task_complete(pf); |
---|
1032 | 2079 | |
---|
.. | .. |
---|
1035 | 2082 | * schedule the service task now. |
---|
1036 | 2083 | */ |
---|
1037 | 2084 | if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || |
---|
| 2085 | + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || |
---|
| 2086 | + test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || |
---|
| 2087 | + test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || |
---|
1038 | 2088 | test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) |
---|
1039 | 2089 | mod_timer(&pf->serv_tmr, jiffies); |
---|
1040 | 2090 | } |
---|
1041 | 2091 | |
---|
1042 | 2092 | /** |
---|
1043 | 2093 | * ice_set_ctrlq_len - helper function to set controlq length |
---|
1044 | | - * @hw: pointer to the hw instance |
---|
| 2094 | + * @hw: pointer to the HW instance |
---|
1045 | 2095 | */ |
---|
1046 | 2096 | static void ice_set_ctrlq_len(struct ice_hw *hw) |
---|
1047 | 2097 | { |
---|
.. | .. |
---|
1049 | 2099 | hw->adminq.num_sq_entries = ICE_AQ_LEN; |
---|
1050 | 2100 | hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; |
---|
1051 | 2101 | hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; |
---|
| 2102 | + hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; |
---|
| 2103 | + hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; |
---|
| 2104 | + hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
---|
| 2105 | + hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
---|
| 2106 | +} |
---|
| 2107 | + |
---|
| 2108 | +/** |
---|
| 2109 | + * ice_schedule_reset - schedule a reset |
---|
| 2110 | + * @pf: board private structure |
---|
| 2111 | + * @reset: reset being requested |
---|
| 2112 | + */ |
---|
| 2113 | +int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) |
---|
| 2114 | +{ |
---|
| 2115 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 2116 | + |
---|
| 2117 | + /* bail out if earlier reset has failed */ |
---|
| 2118 | + if (test_bit(__ICE_RESET_FAILED, pf->state)) { |
---|
| 2119 | + dev_dbg(dev, "earlier reset has failed\n"); |
---|
| 2120 | + return -EIO; |
---|
| 2121 | + } |
---|
| 2122 | + /* bail if reset/recovery already in progress */ |
---|
| 2123 | + if (ice_is_reset_in_progress(pf->state)) { |
---|
| 2124 | + dev_dbg(dev, "Reset already in progress\n"); |
---|
| 2125 | + return -EBUSY; |
---|
| 2126 | + } |
---|
| 2127 | + |
---|
| 2128 | + switch (reset) { |
---|
| 2129 | + case ICE_RESET_PFR: |
---|
| 2130 | + set_bit(__ICE_PFR_REQ, pf->state); |
---|
| 2131 | + break; |
---|
| 2132 | + case ICE_RESET_CORER: |
---|
| 2133 | + set_bit(__ICE_CORER_REQ, pf->state); |
---|
| 2134 | + break; |
---|
| 2135 | + case ICE_RESET_GLOBR: |
---|
| 2136 | + set_bit(__ICE_GLOBR_REQ, pf->state); |
---|
| 2137 | + break; |
---|
| 2138 | + default: |
---|
| 2139 | + return -EINVAL; |
---|
| 2140 | + } |
---|
| 2141 | + |
---|
| 2142 | + ice_service_task_schedule(pf); |
---|
| 2143 | + return 0; |
---|
1052 | 2144 | } |
---|
1053 | 2145 | |
---|
1054 | 2146 | /** |
---|
.. | .. |
---|
1059 | 2151 | * This is a callback function used by the irq_set_affinity_notifier function |
---|
1060 | 2152 | * so that we may register to receive changes to the irq affinity masks. |
---|
1061 | 2153 | */ |
---|
1062 | | -static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, |
---|
1063 | | - const cpumask_t *mask) |
---|
| 2154 | +static void |
---|
| 2155 | +ice_irq_affinity_notify(struct irq_affinity_notify *notify, |
---|
| 2156 | + const cpumask_t *mask) |
---|
1064 | 2157 | { |
---|
1065 | 2158 | struct ice_q_vector *q_vector = |
---|
1066 | 2159 | container_of(notify, struct ice_q_vector, affinity_notify); |
---|
.. | .. |
---|
1079 | 2172 | static void ice_irq_affinity_release(struct kref __always_unused *ref) {} |
---|
1080 | 2173 | |
---|
1081 | 2174 | /** |
---|
1082 | | - * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI |
---|
1083 | | - * @vsi: the VSI being un-configured |
---|
1084 | | - */ |
---|
1085 | | -static void ice_vsi_dis_irq(struct ice_vsi *vsi) |
---|
1086 | | -{ |
---|
1087 | | - struct ice_pf *pf = vsi->back; |
---|
1088 | | - struct ice_hw *hw = &pf->hw; |
---|
1089 | | - int base = vsi->base_vector; |
---|
1090 | | - u32 val; |
---|
1091 | | - int i; |
---|
1092 | | - |
---|
1093 | | - /* disable interrupt causation from each queue */ |
---|
1094 | | - if (vsi->tx_rings) { |
---|
1095 | | - ice_for_each_txq(vsi, i) { |
---|
1096 | | - if (vsi->tx_rings[i]) { |
---|
1097 | | - u16 reg; |
---|
1098 | | - |
---|
1099 | | - reg = vsi->tx_rings[i]->reg_idx; |
---|
1100 | | - val = rd32(hw, QINT_TQCTL(reg)); |
---|
1101 | | - val &= ~QINT_TQCTL_CAUSE_ENA_M; |
---|
1102 | | - wr32(hw, QINT_TQCTL(reg), val); |
---|
1103 | | - } |
---|
1104 | | - } |
---|
1105 | | - } |
---|
1106 | | - |
---|
1107 | | - if (vsi->rx_rings) { |
---|
1108 | | - ice_for_each_rxq(vsi, i) { |
---|
1109 | | - if (vsi->rx_rings[i]) { |
---|
1110 | | - u16 reg; |
---|
1111 | | - |
---|
1112 | | - reg = vsi->rx_rings[i]->reg_idx; |
---|
1113 | | - val = rd32(hw, QINT_RQCTL(reg)); |
---|
1114 | | - val &= ~QINT_RQCTL_CAUSE_ENA_M; |
---|
1115 | | - wr32(hw, QINT_RQCTL(reg), val); |
---|
1116 | | - } |
---|
1117 | | - } |
---|
1118 | | - } |
---|
1119 | | - |
---|
1120 | | - /* disable each interrupt */ |
---|
1121 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
---|
1122 | | - for (i = vsi->base_vector; |
---|
1123 | | - i < (vsi->num_q_vectors + vsi->base_vector); i++) |
---|
1124 | | - wr32(hw, GLINT_DYN_CTL(i), 0); |
---|
1125 | | - |
---|
1126 | | - ice_flush(hw); |
---|
1127 | | - for (i = 0; i < vsi->num_q_vectors; i++) |
---|
1128 | | - synchronize_irq(pf->msix_entries[i + base].vector); |
---|
1129 | | - } |
---|
1130 | | -} |
---|
1131 | | - |
---|
1132 | | -/** |
---|
1133 | 2175 | * ice_vsi_ena_irq - Enable IRQ for the given VSI |
---|
1134 | 2176 | * @vsi: the VSI being configured |
---|
1135 | 2177 | */ |
---|
1136 | 2178 | static int ice_vsi_ena_irq(struct ice_vsi *vsi) |
---|
1137 | 2179 | { |
---|
1138 | | - struct ice_pf *pf = vsi->back; |
---|
1139 | | - struct ice_hw *hw = &pf->hw; |
---|
| 2180 | + struct ice_hw *hw = &vsi->back->hw; |
---|
| 2181 | + int i; |
---|
1140 | 2182 | |
---|
1141 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
---|
1142 | | - int i; |
---|
1143 | | - |
---|
1144 | | - for (i = 0; i < vsi->num_q_vectors; i++) |
---|
1145 | | - ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); |
---|
1146 | | - } |
---|
| 2183 | + ice_for_each_q_vector(vsi, i) |
---|
| 2184 | + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); |
---|
1147 | 2185 | |
---|
1148 | 2186 | ice_flush(hw); |
---|
1149 | 2187 | return 0; |
---|
1150 | | -} |
---|
1151 | | - |
---|
1152 | | -/** |
---|
1153 | | - * ice_vsi_delete - delete a VSI from the switch |
---|
1154 | | - * @vsi: pointer to VSI being removed |
---|
1155 | | - */ |
---|
1156 | | -static void ice_vsi_delete(struct ice_vsi *vsi) |
---|
1157 | | -{ |
---|
1158 | | - struct ice_pf *pf = vsi->back; |
---|
1159 | | - struct ice_vsi_ctx ctxt; |
---|
1160 | | - enum ice_status status; |
---|
1161 | | - |
---|
1162 | | - ctxt.vsi_num = vsi->vsi_num; |
---|
1163 | | - |
---|
1164 | | - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); |
---|
1165 | | - |
---|
1166 | | - status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); |
---|
1167 | | - if (status) |
---|
1168 | | - dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", |
---|
1169 | | - vsi->vsi_num); |
---|
1170 | 2188 | } |
---|
1171 | 2189 | |
---|
1172 | 2190 | /** |
---|
.. | .. |
---|
1179 | 2197 | int q_vectors = vsi->num_q_vectors; |
---|
1180 | 2198 | struct ice_pf *pf = vsi->back; |
---|
1181 | 2199 | int base = vsi->base_vector; |
---|
| 2200 | + struct device *dev; |
---|
1182 | 2201 | int rx_int_idx = 0; |
---|
1183 | 2202 | int tx_int_idx = 0; |
---|
1184 | 2203 | int vector, err; |
---|
1185 | 2204 | int irq_num; |
---|
1186 | 2205 | |
---|
| 2206 | + dev = ice_pf_to_dev(pf); |
---|
1187 | 2207 | for (vector = 0; vector < q_vectors; vector++) { |
---|
1188 | 2208 | struct ice_q_vector *q_vector = vsi->q_vectors[vector]; |
---|
1189 | 2209 | |
---|
.. | .. |
---|
1203 | 2223 | /* skip this unused q_vector */ |
---|
1204 | 2224 | continue; |
---|
1205 | 2225 | } |
---|
1206 | | - err = devm_request_irq(&pf->pdev->dev, |
---|
1207 | | - pf->msix_entries[base + vector].vector, |
---|
1208 | | - vsi->irq_handler, 0, q_vector->name, |
---|
1209 | | - q_vector); |
---|
| 2226 | + err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, |
---|
| 2227 | + q_vector->name, q_vector); |
---|
1210 | 2228 | if (err) { |
---|
1211 | | - netdev_err(vsi->netdev, |
---|
1212 | | - "MSIX request_irq failed, error: %d\n", err); |
---|
| 2229 | + netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", |
---|
| 2230 | + err); |
---|
1213 | 2231 | goto free_q_irqs; |
---|
1214 | 2232 | } |
---|
1215 | 2233 | |
---|
1216 | 2234 | /* register for affinity change notifications */ |
---|
1217 | | - q_vector->affinity_notify.notify = ice_irq_affinity_notify; |
---|
1218 | | - q_vector->affinity_notify.release = ice_irq_affinity_release; |
---|
1219 | | - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); |
---|
| 2235 | + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { |
---|
| 2236 | + struct irq_affinity_notify *affinity_notify; |
---|
| 2237 | + |
---|
| 2238 | + affinity_notify = &q_vector->affinity_notify; |
---|
| 2239 | + affinity_notify->notify = ice_irq_affinity_notify; |
---|
| 2240 | + affinity_notify->release = ice_irq_affinity_release; |
---|
| 2241 | + irq_set_affinity_notifier(irq_num, affinity_notify); |
---|
| 2242 | + } |
---|
1220 | 2243 | |
---|
1221 | 2244 | /* assign the mask for this irq */ |
---|
1222 | 2245 | irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); |
---|
.. | .. |
---|
1228 | 2251 | free_q_irqs: |
---|
1229 | 2252 | while (vector) { |
---|
1230 | 2253 | vector--; |
---|
1231 | | - irq_num = pf->msix_entries[base + vector].vector, |
---|
1232 | | - irq_set_affinity_notifier(irq_num, NULL); |
---|
| 2254 | + irq_num = pf->msix_entries[base + vector].vector; |
---|
| 2255 | + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) |
---|
| 2256 | + irq_set_affinity_notifier(irq_num, NULL); |
---|
1233 | 2257 | irq_set_affinity_hint(irq_num, NULL); |
---|
1234 | | - devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); |
---|
| 2258 | + devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); |
---|
1235 | 2259 | } |
---|
1236 | 2260 | return err; |
---|
1237 | 2261 | } |
---|
1238 | 2262 | |
---|
1239 | 2263 | /** |
---|
1240 | | - * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type |
---|
1241 | | - * @vsi: the VSI being configured |
---|
1242 | | - */ |
---|
1243 | | -static void ice_vsi_set_rss_params(struct ice_vsi *vsi) |
---|
1244 | | -{ |
---|
1245 | | - struct ice_hw_common_caps *cap; |
---|
1246 | | - struct ice_pf *pf = vsi->back; |
---|
1247 | | - |
---|
1248 | | - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { |
---|
1249 | | - vsi->rss_size = 1; |
---|
1250 | | - return; |
---|
1251 | | - } |
---|
1252 | | - |
---|
1253 | | - cap = &pf->hw.func_caps.common_cap; |
---|
1254 | | - switch (vsi->type) { |
---|
1255 | | - case ICE_VSI_PF: |
---|
1256 | | - /* PF VSI will inherit RSS instance of PF */ |
---|
1257 | | - vsi->rss_table_size = cap->rss_table_size; |
---|
1258 | | - vsi->rss_size = min_t(int, num_online_cpus(), |
---|
1259 | | - BIT(cap->rss_table_entry_width)); |
---|
1260 | | - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; |
---|
1261 | | - break; |
---|
1262 | | - default: |
---|
1263 | | - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); |
---|
1264 | | - break; |
---|
1265 | | - } |
---|
1266 | | -} |
---|
1267 | | - |
---|
1268 | | -/** |
---|
1269 | | - * ice_vsi_setup_q_map - Setup a VSI queue map |
---|
1270 | | - * @vsi: the VSI being configured |
---|
1271 | | - * @ctxt: VSI context structure |
---|
1272 | | - */ |
---|
1273 | | -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) |
---|
1274 | | -{ |
---|
1275 | | - u16 offset = 0, qmap = 0, numq_tc; |
---|
1276 | | - u16 pow = 0, max_rss = 0, qcount; |
---|
1277 | | - u16 qcount_tx = vsi->alloc_txq; |
---|
1278 | | - u16 qcount_rx = vsi->alloc_rxq; |
---|
1279 | | - bool ena_tc0 = false; |
---|
1280 | | - int i; |
---|
1281 | | - |
---|
1282 | | - /* at least TC0 should be enabled by default */ |
---|
1283 | | - if (vsi->tc_cfg.numtc) { |
---|
1284 | | - if (!(vsi->tc_cfg.ena_tc & BIT(0))) |
---|
1285 | | - ena_tc0 = true; |
---|
1286 | | - } else { |
---|
1287 | | - ena_tc0 = true; |
---|
1288 | | - } |
---|
1289 | | - |
---|
1290 | | - if (ena_tc0) { |
---|
1291 | | - vsi->tc_cfg.numtc++; |
---|
1292 | | - vsi->tc_cfg.ena_tc |= 1; |
---|
1293 | | - } |
---|
1294 | | - |
---|
1295 | | - numq_tc = qcount_rx / vsi->tc_cfg.numtc; |
---|
1296 | | - |
---|
1297 | | - /* TC mapping is a function of the number of Rx queues assigned to the |
---|
1298 | | - * VSI for each traffic class and the offset of these queues. |
---|
1299 | | - * The first 10 bits are for queue offset for TC0, next 4 bits for no:of |
---|
1300 | | - * queues allocated to TC0. No:of queues is a power-of-2. |
---|
1301 | | - * |
---|
1302 | | - * If TC is not enabled, the queue offset is set to 0, and allocate one |
---|
1303 | | - * queue, this way, traffic for the given TC will be sent to the default |
---|
1304 | | - * queue. |
---|
1305 | | - * |
---|
1306 | | - * Setup number and offset of Rx queues for all TCs for the VSI |
---|
1307 | | - */ |
---|
1308 | | - |
---|
1309 | | - /* qcount will change if RSS is enabled */ |
---|
1310 | | - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { |
---|
1311 | | - if (vsi->type == ICE_VSI_PF) |
---|
1312 | | - max_rss = ICE_MAX_LG_RSS_QS; |
---|
1313 | | - else |
---|
1314 | | - max_rss = ICE_MAX_SMALL_RSS_QS; |
---|
1315 | | - |
---|
1316 | | - qcount = min_t(int, numq_tc, max_rss); |
---|
1317 | | - qcount = min_t(int, qcount, vsi->rss_size); |
---|
1318 | | - } else { |
---|
1319 | | - qcount = numq_tc; |
---|
1320 | | - } |
---|
1321 | | - |
---|
1322 | | - /* find the (rounded up) power-of-2 of qcount */ |
---|
1323 | | - pow = order_base_2(qcount); |
---|
1324 | | - |
---|
1325 | | - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { |
---|
1326 | | - if (!(vsi->tc_cfg.ena_tc & BIT(i))) { |
---|
1327 | | - /* TC is not enabled */ |
---|
1328 | | - vsi->tc_cfg.tc_info[i].qoffset = 0; |
---|
1329 | | - vsi->tc_cfg.tc_info[i].qcount = 1; |
---|
1330 | | - ctxt->info.tc_mapping[i] = 0; |
---|
1331 | | - continue; |
---|
1332 | | - } |
---|
1333 | | - |
---|
1334 | | - /* TC is enabled */ |
---|
1335 | | - vsi->tc_cfg.tc_info[i].qoffset = offset; |
---|
1336 | | - vsi->tc_cfg.tc_info[i].qcount = qcount; |
---|
1337 | | - |
---|
1338 | | - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & |
---|
1339 | | - ICE_AQ_VSI_TC_Q_OFFSET_M) | |
---|
1340 | | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & |
---|
1341 | | - ICE_AQ_VSI_TC_Q_NUM_M); |
---|
1342 | | - offset += qcount; |
---|
1343 | | - ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); |
---|
1344 | | - } |
---|
1345 | | - |
---|
1346 | | - vsi->num_txq = qcount_tx; |
---|
1347 | | - vsi->num_rxq = offset; |
---|
1348 | | - |
---|
1349 | | - /* Rx queue mapping */ |
---|
1350 | | - ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); |
---|
1351 | | - /* q_mapping buffer holds the info for the first queue allocated for |
---|
1352 | | - * this VSI in the PF space and also the number of queues associated |
---|
1353 | | - * with this VSI. |
---|
1354 | | - */ |
---|
1355 | | - ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); |
---|
1356 | | - ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); |
---|
1357 | | -} |
---|
1358 | | - |
---|
1359 | | -/** |
---|
1360 | | - * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI |
---|
1361 | | - * @ctxt: the VSI context being set |
---|
| 2264 | + * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP |
---|
| 2265 | + * @vsi: VSI to setup Tx rings used by XDP |
---|
1362 | 2266 | * |
---|
1363 | | - * This initializes a default VSI context for all sections except the Queues. |
---|
| 2267 | + * Return 0 on success and negative value on error |
---|
1364 | 2268 | */ |
---|
1365 | | -static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) |
---|
| 2269 | +static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) |
---|
1366 | 2270 | { |
---|
1367 | | - u32 table = 0; |
---|
1368 | | - |
---|
1369 | | - memset(&ctxt->info, 0, sizeof(ctxt->info)); |
---|
1370 | | - /* VSI's should be allocated from shared pool */ |
---|
1371 | | - ctxt->alloc_from_pool = true; |
---|
1372 | | - /* Src pruning enabled by default */ |
---|
1373 | | - ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; |
---|
1374 | | - /* Traffic from VSI can be sent to LAN */ |
---|
1375 | | - ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; |
---|
1376 | | - |
---|
1377 | | - /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy |
---|
1378 | | - * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all |
---|
1379 | | - * packets untagged/tagged. |
---|
1380 | | - */ |
---|
1381 | | - ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & |
---|
1382 | | - ICE_AQ_VSI_VLAN_MODE_M) >> |
---|
1383 | | - ICE_AQ_VSI_VLAN_MODE_S); |
---|
1384 | | - |
---|
1385 | | - /* Have 1:1 UP mapping for both ingress/egress tables */ |
---|
1386 | | - table |= ICE_UP_TABLE_TRANSLATE(0, 0); |
---|
1387 | | - table |= ICE_UP_TABLE_TRANSLATE(1, 1); |
---|
1388 | | - table |= ICE_UP_TABLE_TRANSLATE(2, 2); |
---|
1389 | | - table |= ICE_UP_TABLE_TRANSLATE(3, 3); |
---|
1390 | | - table |= ICE_UP_TABLE_TRANSLATE(4, 4); |
---|
1391 | | - table |= ICE_UP_TABLE_TRANSLATE(5, 5); |
---|
1392 | | - table |= ICE_UP_TABLE_TRANSLATE(6, 6); |
---|
1393 | | - table |= ICE_UP_TABLE_TRANSLATE(7, 7); |
---|
1394 | | - ctxt->info.ingress_table = cpu_to_le32(table); |
---|
1395 | | - ctxt->info.egress_table = cpu_to_le32(table); |
---|
1396 | | - /* Have 1:1 UP mapping for outer to inner UP table */ |
---|
1397 | | - ctxt->info.outer_up_table = cpu_to_le32(table); |
---|
1398 | | - /* No Outer tag support outer_tag_flags remains to zero */ |
---|
1399 | | -} |
---|
1400 | | - |
---|
1401 | | -/** |
---|
1402 | | - * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI |
---|
1403 | | - * @ctxt: the VSI context being set |
---|
1404 | | - * @vsi: the VSI being configured |
---|
1405 | | - */ |
---|
1406 | | -static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) |
---|
1407 | | -{ |
---|
1408 | | - u8 lut_type, hash_type; |
---|
1409 | | - |
---|
1410 | | - switch (vsi->type) { |
---|
1411 | | - case ICE_VSI_PF: |
---|
1412 | | - /* PF VSI will inherit RSS instance of PF */ |
---|
1413 | | - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; |
---|
1414 | | - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; |
---|
1415 | | - break; |
---|
1416 | | - default: |
---|
1417 | | - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", |
---|
1418 | | - vsi->type); |
---|
1419 | | - return; |
---|
1420 | | - } |
---|
1421 | | - |
---|
1422 | | - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & |
---|
1423 | | - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | |
---|
1424 | | - ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & |
---|
1425 | | - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); |
---|
1426 | | -} |
---|
1427 | | - |
---|
1428 | | -/** |
---|
1429 | | - * ice_vsi_add - Create a new VSI or fetch preallocated VSI |
---|
1430 | | - * @vsi: the VSI being configured |
---|
1431 | | - * |
---|
1432 | | - * This initializes a VSI context depending on the VSI type to be added and |
---|
1433 | | - * passes it down to the add_vsi aq command to create a new VSI. |
---|
1434 | | - */ |
---|
1435 | | -static int ice_vsi_add(struct ice_vsi *vsi) |
---|
1436 | | -{ |
---|
1437 | | - struct ice_vsi_ctx ctxt = { 0 }; |
---|
1438 | | - struct ice_pf *pf = vsi->back; |
---|
1439 | | - struct ice_hw *hw = &pf->hw; |
---|
1440 | | - int ret = 0; |
---|
1441 | | - |
---|
1442 | | - switch (vsi->type) { |
---|
1443 | | - case ICE_VSI_PF: |
---|
1444 | | - ctxt.flags = ICE_AQ_VSI_TYPE_PF; |
---|
1445 | | - break; |
---|
1446 | | - default: |
---|
1447 | | - return -ENODEV; |
---|
1448 | | - } |
---|
1449 | | - |
---|
1450 | | - ice_set_dflt_vsi_ctx(&ctxt); |
---|
1451 | | - /* if the switch is in VEB mode, allow VSI loopback */ |
---|
1452 | | - if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) |
---|
1453 | | - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
---|
1454 | | - |
---|
1455 | | - /* Set LUT type and HASH type if RSS is enabled */ |
---|
1456 | | - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
---|
1457 | | - ice_set_rss_vsi_ctx(&ctxt, vsi); |
---|
1458 | | - |
---|
1459 | | - ctxt.info.sw_id = vsi->port_info->sw_id; |
---|
1460 | | - ice_vsi_setup_q_map(vsi, &ctxt); |
---|
1461 | | - |
---|
1462 | | - ret = ice_aq_add_vsi(hw, &ctxt, NULL); |
---|
1463 | | - if (ret) { |
---|
1464 | | - dev_err(&vsi->back->pdev->dev, |
---|
1465 | | - "Add VSI AQ call failed, err %d\n", ret); |
---|
1466 | | - return -EIO; |
---|
1467 | | - } |
---|
1468 | | - vsi->info = ctxt.info; |
---|
1469 | | - vsi->vsi_num = ctxt.vsi_num; |
---|
1470 | | - |
---|
1471 | | - return ret; |
---|
1472 | | -} |
---|
1473 | | - |
---|
1474 | | -/** |
---|
1475 | | - * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW |
---|
1476 | | - * @vsi: the VSI being cleaned up |
---|
1477 | | - */ |
---|
1478 | | -static void ice_vsi_release_msix(struct ice_vsi *vsi) |
---|
1479 | | -{ |
---|
1480 | | - struct ice_pf *pf = vsi->back; |
---|
1481 | | - u16 vector = vsi->base_vector; |
---|
1482 | | - struct ice_hw *hw = &pf->hw; |
---|
1483 | | - u32 txq = 0; |
---|
1484 | | - u32 rxq = 0; |
---|
1485 | | - int i, q; |
---|
1486 | | - |
---|
1487 | | - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
---|
1488 | | - struct ice_q_vector *q_vector = vsi->q_vectors[i]; |
---|
1489 | | - |
---|
1490 | | - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); |
---|
1491 | | - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); |
---|
1492 | | - for (q = 0; q < q_vector->num_ring_tx; q++) { |
---|
1493 | | - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); |
---|
1494 | | - txq++; |
---|
1495 | | - } |
---|
1496 | | - |
---|
1497 | | - for (q = 0; q < q_vector->num_ring_rx; q++) { |
---|
1498 | | - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); |
---|
1499 | | - rxq++; |
---|
1500 | | - } |
---|
1501 | | - } |
---|
1502 | | - |
---|
1503 | | - ice_flush(hw); |
---|
1504 | | -} |
---|
1505 | | - |
---|
1506 | | -/** |
---|
1507 | | - * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI |
---|
1508 | | - * @vsi: the VSI having rings deallocated |
---|
1509 | | - */ |
---|
1510 | | -static void ice_vsi_clear_rings(struct ice_vsi *vsi) |
---|
1511 | | -{ |
---|
| 2271 | + struct device *dev = ice_pf_to_dev(vsi->back); |
---|
1512 | 2272 | int i; |
---|
1513 | 2273 | |
---|
1514 | | - if (vsi->tx_rings) { |
---|
1515 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
---|
1516 | | - if (vsi->tx_rings[i]) { |
---|
1517 | | - kfree_rcu(vsi->tx_rings[i], rcu); |
---|
1518 | | - vsi->tx_rings[i] = NULL; |
---|
1519 | | - } |
---|
1520 | | - } |
---|
1521 | | - } |
---|
1522 | | - if (vsi->rx_rings) { |
---|
1523 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
---|
1524 | | - if (vsi->rx_rings[i]) { |
---|
1525 | | - kfree_rcu(vsi->rx_rings[i], rcu); |
---|
1526 | | - vsi->rx_rings[i] = NULL; |
---|
1527 | | - } |
---|
1528 | | - } |
---|
1529 | | - } |
---|
1530 | | -} |
---|
| 2274 | + for (i = 0; i < vsi->num_xdp_txq; i++) { |
---|
| 2275 | + u16 xdp_q_idx = vsi->alloc_txq + i; |
---|
| 2276 | + struct ice_ring *xdp_ring; |
---|
1531 | 2277 | |
---|
1532 | | -/** |
---|
1533 | | - * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI |
---|
1534 | | - * @vsi: VSI which is having rings allocated |
---|
1535 | | - */ |
---|
1536 | | -static int ice_vsi_alloc_rings(struct ice_vsi *vsi) |
---|
1537 | | -{ |
---|
1538 | | - struct ice_pf *pf = vsi->back; |
---|
1539 | | - int i; |
---|
| 2278 | + xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); |
---|
1540 | 2279 | |
---|
1541 | | - /* Allocate tx_rings */ |
---|
1542 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
---|
1543 | | - struct ice_ring *ring; |
---|
| 2280 | + if (!xdp_ring) |
---|
| 2281 | + goto free_xdp_rings; |
---|
1544 | 2282 | |
---|
1545 | | - /* allocate with kzalloc(), free with kfree_rcu() */ |
---|
1546 | | - ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
---|
1547 | | - |
---|
1548 | | - if (!ring) |
---|
1549 | | - goto err_out; |
---|
1550 | | - |
---|
1551 | | - ring->q_index = i; |
---|
1552 | | - ring->reg_idx = vsi->txq_map[i]; |
---|
1553 | | - ring->ring_active = false; |
---|
1554 | | - ring->vsi = vsi; |
---|
1555 | | - ring->netdev = vsi->netdev; |
---|
1556 | | - ring->dev = &pf->pdev->dev; |
---|
1557 | | - ring->count = vsi->num_desc; |
---|
1558 | | - |
---|
1559 | | - vsi->tx_rings[i] = ring; |
---|
1560 | | - } |
---|
1561 | | - |
---|
1562 | | - /* Allocate rx_rings */ |
---|
1563 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
---|
1564 | | - struct ice_ring *ring; |
---|
1565 | | - |
---|
1566 | | - /* allocate with kzalloc(), free with kfree_rcu() */ |
---|
1567 | | - ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
---|
1568 | | - if (!ring) |
---|
1569 | | - goto err_out; |
---|
1570 | | - |
---|
1571 | | - ring->q_index = i; |
---|
1572 | | - ring->reg_idx = vsi->rxq_map[i]; |
---|
1573 | | - ring->ring_active = false; |
---|
1574 | | - ring->vsi = vsi; |
---|
1575 | | - ring->netdev = vsi->netdev; |
---|
1576 | | - ring->dev = &pf->pdev->dev; |
---|
1577 | | - ring->count = vsi->num_desc; |
---|
1578 | | - vsi->rx_rings[i] = ring; |
---|
| 2283 | + xdp_ring->q_index = xdp_q_idx; |
---|
| 2284 | + xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; |
---|
| 2285 | + xdp_ring->ring_active = false; |
---|
| 2286 | + xdp_ring->vsi = vsi; |
---|
| 2287 | + xdp_ring->netdev = NULL; |
---|
| 2288 | + xdp_ring->dev = dev; |
---|
| 2289 | + xdp_ring->count = vsi->num_tx_desc; |
---|
| 2290 | + WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); |
---|
| 2291 | + if (ice_setup_tx_ring(xdp_ring)) |
---|
| 2292 | + goto free_xdp_rings; |
---|
| 2293 | + ice_set_ring_xdp(xdp_ring); |
---|
| 2294 | + xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); |
---|
1579 | 2295 | } |
---|
1580 | 2296 | |
---|
1581 | 2297 | return 0; |
---|
1582 | 2298 | |
---|
1583 | | -err_out: |
---|
1584 | | - ice_vsi_clear_rings(vsi); |
---|
| 2299 | +free_xdp_rings: |
---|
| 2300 | + for (; i >= 0; i--) |
---|
| 2301 | + if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) |
---|
| 2302 | + ice_free_tx_ring(vsi->xdp_rings[i]); |
---|
1585 | 2303 | return -ENOMEM; |
---|
1586 | 2304 | } |
---|
1587 | 2305 | |
---|
1588 | 2306 | /** |
---|
1589 | | - * ice_vsi_free_irq - Free the irq association with the OS |
---|
1590 | | - * @vsi: the VSI being configured |
---|
| 2307 | + * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI |
---|
| 2308 | + * @vsi: VSI to set the bpf prog on |
---|
| 2309 | + * @prog: the bpf prog pointer |
---|
1591 | 2310 | */ |
---|
1592 | | -static void ice_vsi_free_irq(struct ice_vsi *vsi) |
---|
| 2311 | +static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) |
---|
1593 | 2312 | { |
---|
1594 | | - struct ice_pf *pf = vsi->back; |
---|
1595 | | - int base = vsi->base_vector; |
---|
| 2313 | + struct bpf_prog *old_prog; |
---|
| 2314 | + int i; |
---|
1596 | 2315 | |
---|
1597 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
---|
1598 | | - int i; |
---|
| 2316 | + old_prog = xchg(&vsi->xdp_prog, prog); |
---|
| 2317 | + if (old_prog) |
---|
| 2318 | + bpf_prog_put(old_prog); |
---|
1599 | 2319 | |
---|
1600 | | - if (!vsi->q_vectors || !vsi->irqs_ready) |
---|
1601 | | - return; |
---|
1602 | | - |
---|
1603 | | - vsi->irqs_ready = false; |
---|
1604 | | - for (i = 0; i < vsi->num_q_vectors; i++) { |
---|
1605 | | - u16 vector = i + base; |
---|
1606 | | - int irq_num; |
---|
1607 | | - |
---|
1608 | | - irq_num = pf->msix_entries[vector].vector; |
---|
1609 | | - |
---|
1610 | | - /* free only the irqs that were actually requested */ |
---|
1611 | | - if (!vsi->q_vectors[i] || |
---|
1612 | | - !(vsi->q_vectors[i]->num_ring_tx || |
---|
1613 | | - vsi->q_vectors[i]->num_ring_rx)) |
---|
1614 | | - continue; |
---|
1615 | | - |
---|
1616 | | - /* clear the affinity notifier in the IRQ descriptor */ |
---|
1617 | | - irq_set_affinity_notifier(irq_num, NULL); |
---|
1618 | | - |
---|
1619 | | - /* clear the affinity_mask in the IRQ descriptor */ |
---|
1620 | | - irq_set_affinity_hint(irq_num, NULL); |
---|
1621 | | - synchronize_irq(irq_num); |
---|
1622 | | - devm_free_irq(&pf->pdev->dev, irq_num, |
---|
1623 | | - vsi->q_vectors[i]); |
---|
1624 | | - } |
---|
1625 | | - ice_vsi_release_msix(vsi); |
---|
1626 | | - } |
---|
| 2320 | + ice_for_each_rxq(vsi, i) |
---|
| 2321 | + WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); |
---|
1627 | 2322 | } |
---|
1628 | 2323 | |
---|
1629 | 2324 | /** |
---|
1630 | | - * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW |
---|
1631 | | - * @vsi: the VSI being configured |
---|
| 2325 | + * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP |
---|
| 2326 | + * @vsi: VSI to bring up Tx rings used by XDP |
---|
| 2327 | + * @prog: bpf program that will be assigned to VSI |
---|
| 2328 | + * |
---|
| 2329 | + * Return 0 on success and negative value on error |
---|
1632 | 2330 | */ |
---|
1633 | | -static void ice_vsi_cfg_msix(struct ice_vsi *vsi) |
---|
| 2331 | +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) |
---|
1634 | 2332 | { |
---|
| 2333 | + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
---|
| 2334 | + int xdp_rings_rem = vsi->num_xdp_txq; |
---|
1635 | 2335 | struct ice_pf *pf = vsi->back; |
---|
1636 | | - u16 vector = vsi->base_vector; |
---|
1637 | | - struct ice_hw *hw = &pf->hw; |
---|
1638 | | - u32 txq = 0, rxq = 0; |
---|
1639 | | - int i, q, itr; |
---|
1640 | | - u8 itr_gran; |
---|
| 2336 | + struct ice_qs_cfg xdp_qs_cfg = { |
---|
| 2337 | + .qs_mutex = &pf->avail_q_mutex, |
---|
| 2338 | + .pf_map = pf->avail_txqs, |
---|
| 2339 | + .pf_map_size = pf->max_pf_txqs, |
---|
| 2340 | + .q_count = vsi->num_xdp_txq, |
---|
| 2341 | + .scatter_count = ICE_MAX_SCATTER_TXQS, |
---|
| 2342 | + .vsi_map = vsi->txq_map, |
---|
| 2343 | + .vsi_map_offset = vsi->alloc_txq, |
---|
| 2344 | + .mapping_mode = ICE_VSI_MAP_CONTIG |
---|
| 2345 | + }; |
---|
| 2346 | + enum ice_status status; |
---|
| 2347 | + struct device *dev; |
---|
| 2348 | + int i, v_idx; |
---|
1641 | 2349 | |
---|
1642 | | - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
---|
1643 | | - struct ice_q_vector *q_vector = vsi->q_vectors[i]; |
---|
| 2350 | + dev = ice_pf_to_dev(pf); |
---|
| 2351 | + vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, |
---|
| 2352 | + sizeof(*vsi->xdp_rings), GFP_KERNEL); |
---|
| 2353 | + if (!vsi->xdp_rings) |
---|
| 2354 | + return -ENOMEM; |
---|
1644 | 2355 | |
---|
1645 | | - itr_gran = hw->itr_gran_200; |
---|
| 2356 | + vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; |
---|
| 2357 | + if (__ice_vsi_get_qs(&xdp_qs_cfg)) |
---|
| 2358 | + goto err_map_xdp; |
---|
1646 | 2359 | |
---|
1647 | | - if (q_vector->num_ring_rx) { |
---|
1648 | | - q_vector->rx.itr = |
---|
1649 | | - ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, |
---|
1650 | | - itr_gran); |
---|
1651 | | - q_vector->rx.latency_range = ICE_LOW_LATENCY; |
---|
| 2360 | + if (ice_xdp_alloc_setup_rings(vsi)) |
---|
| 2361 | + goto clear_xdp_rings; |
---|
| 2362 | + |
---|
| 2363 | + /* follow the logic from ice_vsi_map_rings_to_vectors */ |
---|
| 2364 | + ice_for_each_q_vector(vsi, v_idx) { |
---|
| 2365 | + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; |
---|
| 2366 | + int xdp_rings_per_v, q_id, q_base; |
---|
| 2367 | + |
---|
| 2368 | + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, |
---|
| 2369 | + vsi->num_q_vectors - v_idx); |
---|
| 2370 | + q_base = vsi->num_xdp_txq - xdp_rings_rem; |
---|
| 2371 | + |
---|
| 2372 | + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { |
---|
| 2373 | + struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; |
---|
| 2374 | + |
---|
| 2375 | + xdp_ring->q_vector = q_vector; |
---|
| 2376 | + xdp_ring->next = q_vector->tx.ring; |
---|
| 2377 | + q_vector->tx.ring = xdp_ring; |
---|
| 2378 | + } |
---|
| 2379 | + xdp_rings_rem -= xdp_rings_per_v; |
---|
| 2380 | + } |
---|
| 2381 | + |
---|
| 2382 | + /* omit the scheduler update if in reset path; XDP queues will be |
---|
| 2383 | + * taken into account at the end of ice_vsi_rebuild, where |
---|
| 2384 | + * ice_cfg_vsi_lan is being called |
---|
| 2385 | + */ |
---|
| 2386 | + if (ice_is_reset_in_progress(pf->state)) |
---|
| 2387 | + return 0; |
---|
| 2388 | + |
---|
| 2389 | + /* tell the Tx scheduler that right now we have |
---|
| 2390 | + * additional queues |
---|
| 2391 | + */ |
---|
| 2392 | + for (i = 0; i < vsi->tc_cfg.numtc; i++) |
---|
| 2393 | + max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; |
---|
| 2394 | + |
---|
| 2395 | + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
---|
| 2396 | + max_txqs); |
---|
| 2397 | + if (status) { |
---|
| 2398 | + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", |
---|
| 2399 | + ice_stat_str(status)); |
---|
| 2400 | + goto clear_xdp_rings; |
---|
| 2401 | + } |
---|
| 2402 | + |
---|
| 2403 | + /* assign the prog only when it's not already present on VSI; |
---|
| 2404 | + * this flow is a subject of both ethtool -L and ndo_bpf flows; |
---|
| 2405 | + * VSI rebuild that happens under ethtool -L can expose us to |
---|
| 2406 | + * the bpf_prog refcount issues as we would be swapping same |
---|
| 2407 | + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put |
---|
| 2408 | + * on it as it would be treated as an 'old_prog'; for ndo_bpf |
---|
| 2409 | + * this is not harmful as dev_xdp_install bumps the refcount |
---|
| 2410 | + * before calling the op exposed by the driver; |
---|
| 2411 | + */ |
---|
| 2412 | + if (!ice_is_xdp_ena_vsi(vsi)) |
---|
| 2413 | + ice_vsi_assign_bpf_prog(vsi, prog); |
---|
| 2414 | + |
---|
| 2415 | + return 0; |
---|
| 2416 | +clear_xdp_rings: |
---|
| 2417 | + for (i = 0; i < vsi->num_xdp_txq; i++) |
---|
| 2418 | + if (vsi->xdp_rings[i]) { |
---|
| 2419 | + kfree_rcu(vsi->xdp_rings[i], rcu); |
---|
| 2420 | + vsi->xdp_rings[i] = NULL; |
---|
1652 | 2421 | } |
---|
1653 | 2422 | |
---|
1654 | | - if (q_vector->num_ring_tx) { |
---|
1655 | | - q_vector->tx.itr = |
---|
1656 | | - ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, |
---|
1657 | | - itr_gran); |
---|
1658 | | - q_vector->tx.latency_range = ICE_LOW_LATENCY; |
---|
1659 | | - } |
---|
1660 | | - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); |
---|
1661 | | - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); |
---|
| 2423 | +err_map_xdp: |
---|
| 2424 | + mutex_lock(&pf->avail_q_mutex); |
---|
| 2425 | + for (i = 0; i < vsi->num_xdp_txq; i++) { |
---|
| 2426 | + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
---|
| 2427 | + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; |
---|
| 2428 | + } |
---|
| 2429 | + mutex_unlock(&pf->avail_q_mutex); |
---|
1662 | 2430 | |
---|
1663 | | - /* Both Transmit Queue Interrupt Cause Control register |
---|
1664 | | - * and Receive Queue Interrupt Cause control register |
---|
1665 | | - * expects MSIX_INDX field to be the vector index |
---|
1666 | | - * within the function space and not the absolute |
---|
1667 | | - * vector index across PF or across device. |
---|
1668 | | - * For SR-IOV VF VSIs queue vector index always starts |
---|
1669 | | - * with 1 since first vector index(0) is used for OICR |
---|
1670 | | - * in VF space. Since VMDq and other PF VSIs are withtin |
---|
1671 | | - * the PF function space, use the vector index thats |
---|
1672 | | - * tracked for this PF. |
---|
1673 | | - */ |
---|
1674 | | - for (q = 0; q < q_vector->num_ring_tx; q++) { |
---|
1675 | | - u32 val; |
---|
| 2431 | + devm_kfree(dev, vsi->xdp_rings); |
---|
| 2432 | + return -ENOMEM; |
---|
| 2433 | +} |
---|
1676 | 2434 | |
---|
1677 | | - itr = ICE_TX_ITR; |
---|
1678 | | - val = QINT_TQCTL_CAUSE_ENA_M | |
---|
1679 | | - (itr << QINT_TQCTL_ITR_INDX_S) | |
---|
1680 | | - (vector << QINT_TQCTL_MSIX_INDX_S); |
---|
1681 | | - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); |
---|
1682 | | - txq++; |
---|
| 2435 | +/** |
---|
| 2436 | + * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings |
---|
| 2437 | + * @vsi: VSI to remove XDP rings |
---|
| 2438 | + * |
---|
| 2439 | + * Detach XDP rings from irq vectors, clean up the PF bitmap and free |
---|
| 2440 | + * resources |
---|
| 2441 | + */ |
---|
| 2442 | +int ice_destroy_xdp_rings(struct ice_vsi *vsi) |
---|
| 2443 | +{ |
---|
| 2444 | + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
---|
| 2445 | + struct ice_pf *pf = vsi->back; |
---|
| 2446 | + int i, v_idx; |
---|
| 2447 | + |
---|
| 2448 | + /* q_vectors are freed in reset path so there's no point in detaching |
---|
| 2449 | + * rings; in case of rebuild being triggered not from reset bits |
---|
| 2450 | + * in pf->state won't be set, so additionally check first q_vector |
---|
| 2451 | + * against NULL |
---|
| 2452 | + */ |
---|
| 2453 | + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) |
---|
| 2454 | + goto free_qmap; |
---|
| 2455 | + |
---|
| 2456 | + ice_for_each_q_vector(vsi, v_idx) { |
---|
| 2457 | + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; |
---|
| 2458 | + struct ice_ring *ring; |
---|
| 2459 | + |
---|
| 2460 | + ice_for_each_ring(ring, q_vector->tx) |
---|
| 2461 | + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) |
---|
| 2462 | + break; |
---|
| 2463 | + |
---|
| 2464 | + /* restore the value of last node prior to XDP setup */ |
---|
| 2465 | + q_vector->tx.ring = ring; |
---|
| 2466 | + } |
---|
| 2467 | + |
---|
| 2468 | +free_qmap: |
---|
| 2469 | + mutex_lock(&pf->avail_q_mutex); |
---|
| 2470 | + for (i = 0; i < vsi->num_xdp_txq; i++) { |
---|
| 2471 | + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
---|
| 2472 | + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; |
---|
| 2473 | + } |
---|
| 2474 | + mutex_unlock(&pf->avail_q_mutex); |
---|
| 2475 | + |
---|
| 2476 | + for (i = 0; i < vsi->num_xdp_txq; i++) |
---|
| 2477 | + if (vsi->xdp_rings[i]) { |
---|
| 2478 | + if (vsi->xdp_rings[i]->desc) { |
---|
| 2479 | + synchronize_rcu(); |
---|
| 2480 | + ice_free_tx_ring(vsi->xdp_rings[i]); |
---|
| 2481 | + } |
---|
| 2482 | + kfree_rcu(vsi->xdp_rings[i], rcu); |
---|
| 2483 | + vsi->xdp_rings[i] = NULL; |
---|
1683 | 2484 | } |
---|
1684 | 2485 | |
---|
1685 | | - for (q = 0; q < q_vector->num_ring_rx; q++) { |
---|
1686 | | - u32 val; |
---|
| 2486 | + devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); |
---|
| 2487 | + vsi->xdp_rings = NULL; |
---|
1687 | 2488 | |
---|
1688 | | - itr = ICE_RX_ITR; |
---|
1689 | | - val = QINT_RQCTL_CAUSE_ENA_M | |
---|
1690 | | - (itr << QINT_RQCTL_ITR_INDX_S) | |
---|
1691 | | - (vector << QINT_RQCTL_MSIX_INDX_S); |
---|
1692 | | - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); |
---|
1693 | | - rxq++; |
---|
| 2489 | + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) |
---|
| 2490 | + return 0; |
---|
| 2491 | + |
---|
| 2492 | + ice_vsi_assign_bpf_prog(vsi, NULL); |
---|
| 2493 | + |
---|
| 2494 | + /* notify Tx scheduler that we destroyed XDP queues and bring |
---|
| 2495 | + * back the old number of child nodes |
---|
| 2496 | + */ |
---|
| 2497 | + for (i = 0; i < vsi->tc_cfg.numtc; i++) |
---|
| 2498 | + max_txqs[i] = vsi->num_txq; |
---|
| 2499 | + |
---|
| 2500 | + /* change number of XDP Tx queues to 0 */ |
---|
| 2501 | + vsi->num_xdp_txq = 0; |
---|
| 2502 | + |
---|
| 2503 | + return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
---|
| 2504 | + max_txqs); |
---|
| 2505 | +} |
---|
| 2506 | + |
---|
| 2507 | +/** |
---|
| 2508 | + * ice_xdp_setup_prog - Add or remove XDP eBPF program |
---|
| 2509 | + * @vsi: VSI to setup XDP for |
---|
| 2510 | + * @prog: XDP program |
---|
| 2511 | + * @extack: netlink extended ack |
---|
| 2512 | + */ |
---|
| 2513 | +static int |
---|
| 2514 | +ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, |
---|
| 2515 | + struct netlink_ext_ack *extack) |
---|
| 2516 | +{ |
---|
| 2517 | + int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; |
---|
| 2518 | + bool if_running = netif_running(vsi->netdev); |
---|
| 2519 | + int ret = 0, xdp_ring_err = 0; |
---|
| 2520 | + |
---|
| 2521 | + if (frame_size > vsi->rx_buf_len) { |
---|
| 2522 | + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); |
---|
| 2523 | + return -EOPNOTSUPP; |
---|
| 2524 | + } |
---|
| 2525 | + |
---|
| 2526 | + /* need to stop netdev while setting up the program for Rx rings */ |
---|
| 2527 | + if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { |
---|
| 2528 | + ret = ice_down(vsi); |
---|
| 2529 | + if (ret) { |
---|
| 2530 | + NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); |
---|
| 2531 | + return ret; |
---|
1694 | 2532 | } |
---|
1695 | 2533 | } |
---|
1696 | 2534 | |
---|
1697 | | - ice_flush(hw); |
---|
| 2535 | + if (!ice_is_xdp_ena_vsi(vsi) && prog) { |
---|
| 2536 | + vsi->num_xdp_txq = vsi->alloc_rxq; |
---|
| 2537 | + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); |
---|
| 2538 | + if (xdp_ring_err) |
---|
| 2539 | + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); |
---|
| 2540 | + } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { |
---|
| 2541 | + xdp_ring_err = ice_destroy_xdp_rings(vsi); |
---|
| 2542 | + if (xdp_ring_err) |
---|
| 2543 | + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); |
---|
| 2544 | + } else { |
---|
| 2545 | + /* safe to call even when prog == vsi->xdp_prog as |
---|
| 2546 | + * dev_xdp_install in net/core/dev.c incremented prog's |
---|
| 2547 | + * refcount so corresponding bpf_prog_put won't cause |
---|
| 2548 | + * underflow |
---|
| 2549 | + */ |
---|
| 2550 | + ice_vsi_assign_bpf_prog(vsi, prog); |
---|
| 2551 | + } |
---|
| 2552 | + |
---|
| 2553 | + if (if_running) |
---|
| 2554 | + ret = ice_up(vsi); |
---|
| 2555 | + |
---|
| 2556 | + if (!ret && prog && vsi->xsk_pools) { |
---|
| 2557 | + int i; |
---|
| 2558 | + |
---|
| 2559 | + ice_for_each_rxq(vsi, i) { |
---|
| 2560 | + struct ice_ring *rx_ring = vsi->rx_rings[i]; |
---|
| 2561 | + |
---|
| 2562 | + if (rx_ring->xsk_pool) |
---|
| 2563 | + napi_schedule(&rx_ring->q_vector->napi); |
---|
| 2564 | + } |
---|
| 2565 | + } |
---|
| 2566 | + |
---|
| 2567 | + return (ret || xdp_ring_err) ? -ENOMEM : 0; |
---|
| 2568 | +} |
---|
| 2569 | + |
---|
| 2570 | +/** |
---|
| 2571 | + * ice_xdp_safe_mode - XDP handler for safe mode |
---|
| 2572 | + * @dev: netdevice |
---|
| 2573 | + * @xdp: XDP command |
---|
| 2574 | + */ |
---|
| 2575 | +static int ice_xdp_safe_mode(struct net_device __always_unused *dev, |
---|
| 2576 | + struct netdev_bpf *xdp) |
---|
| 2577 | +{ |
---|
| 2578 | + NL_SET_ERR_MSG_MOD(xdp->extack, |
---|
| 2579 | + "Please provide working DDP firmware package in order to use XDP\n" |
---|
| 2580 | + "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); |
---|
| 2581 | + return -EOPNOTSUPP; |
---|
| 2582 | +} |
---|
| 2583 | + |
---|
| 2584 | +/** |
---|
| 2585 | + * ice_xdp - implements XDP handler |
---|
| 2586 | + * @dev: netdevice |
---|
| 2587 | + * @xdp: XDP command |
---|
| 2588 | + */ |
---|
| 2589 | +static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
---|
| 2590 | +{ |
---|
| 2591 | + struct ice_netdev_priv *np = netdev_priv(dev); |
---|
| 2592 | + struct ice_vsi *vsi = np->vsi; |
---|
| 2593 | + |
---|
| 2594 | + if (vsi->type != ICE_VSI_PF) { |
---|
| 2595 | + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); |
---|
| 2596 | + return -EINVAL; |
---|
| 2597 | + } |
---|
| 2598 | + |
---|
| 2599 | + switch (xdp->command) { |
---|
| 2600 | + case XDP_SETUP_PROG: |
---|
| 2601 | + return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); |
---|
| 2602 | + case XDP_SETUP_XSK_POOL: |
---|
| 2603 | + return ice_xsk_pool_setup(vsi, xdp->xsk.pool, |
---|
| 2604 | + xdp->xsk.queue_id); |
---|
| 2605 | + default: |
---|
| 2606 | + return -EINVAL; |
---|
| 2607 | + } |
---|
1698 | 2608 | } |
---|
1699 | 2609 | |
---|
1700 | 2610 | /** |
---|
.. | .. |
---|
1706 | 2616 | struct ice_hw *hw = &pf->hw; |
---|
1707 | 2617 | u32 val; |
---|
1708 | 2618 | |
---|
| 2619 | + /* Disable anti-spoof detection interrupt to prevent spurious event |
---|
| 2620 | + * interrupts during a function reset. Anti-spoof functionally is |
---|
| 2621 | + * still supported. |
---|
| 2622 | + */ |
---|
| 2623 | + val = rd32(hw, GL_MDCK_TX_TDPU); |
---|
| 2624 | + val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; |
---|
| 2625 | + wr32(hw, GL_MDCK_TX_TDPU, val); |
---|
| 2626 | + |
---|
1709 | 2627 | /* clear things first */ |
---|
1710 | 2628 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
---|
1711 | 2629 | rd32(hw, PFINT_OICR); /* read to clear */ |
---|
.. | .. |
---|
1714 | 2632 | PFINT_OICR_MAL_DETECT_M | |
---|
1715 | 2633 | PFINT_OICR_GRST_M | |
---|
1716 | 2634 | PFINT_OICR_PCI_EXCEPTION_M | |
---|
| 2635 | + PFINT_OICR_VFLR_M | |
---|
1717 | 2636 | PFINT_OICR_HMC_ERR_M | |
---|
1718 | 2637 | PFINT_OICR_PE_CRITERR_M); |
---|
1719 | 2638 | |
---|
.. | .. |
---|
1734 | 2653 | struct ice_pf *pf = (struct ice_pf *)data; |
---|
1735 | 2654 | struct ice_hw *hw = &pf->hw; |
---|
1736 | 2655 | irqreturn_t ret = IRQ_NONE; |
---|
| 2656 | + struct device *dev; |
---|
1737 | 2657 | u32 oicr, ena_mask; |
---|
1738 | 2658 | |
---|
| 2659 | + dev = ice_pf_to_dev(pf); |
---|
1739 | 2660 | set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); |
---|
| 2661 | + set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
---|
1740 | 2662 | |
---|
1741 | 2663 | oicr = rd32(hw, PFINT_OICR); |
---|
1742 | 2664 | ena_mask = rd32(hw, PFINT_OICR_ENA); |
---|
1743 | 2665 | |
---|
| 2666 | + if (oicr & PFINT_OICR_SWINT_M) { |
---|
| 2667 | + ena_mask &= ~PFINT_OICR_SWINT_M; |
---|
| 2668 | + pf->sw_int_count++; |
---|
| 2669 | + } |
---|
| 2670 | + |
---|
| 2671 | + if (oicr & PFINT_OICR_MAL_DETECT_M) { |
---|
| 2672 | + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; |
---|
| 2673 | + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); |
---|
| 2674 | + } |
---|
| 2675 | + if (oicr & PFINT_OICR_VFLR_M) { |
---|
| 2676 | + /* disable any further VFLR event notifications */ |
---|
| 2677 | + if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { |
---|
| 2678 | + u32 reg = rd32(hw, PFINT_OICR_ENA); |
---|
| 2679 | + |
---|
| 2680 | + reg &= ~PFINT_OICR_VFLR_M; |
---|
| 2681 | + wr32(hw, PFINT_OICR_ENA, reg); |
---|
| 2682 | + } else { |
---|
| 2683 | + ena_mask &= ~PFINT_OICR_VFLR_M; |
---|
| 2684 | + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); |
---|
| 2685 | + } |
---|
| 2686 | + } |
---|
| 2687 | + |
---|
1744 | 2688 | if (oicr & PFINT_OICR_GRST_M) { |
---|
1745 | 2689 | u32 reset; |
---|
| 2690 | + |
---|
1746 | 2691 | /* we have a reset warning */ |
---|
1747 | 2692 | ena_mask &= ~PFINT_OICR_GRST_M; |
---|
1748 | 2693 | reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> |
---|
.. | .. |
---|
1752 | 2697 | pf->corer_count++; |
---|
1753 | 2698 | else if (reset == ICE_RESET_GLOBR) |
---|
1754 | 2699 | pf->globr_count++; |
---|
1755 | | - else |
---|
| 2700 | + else if (reset == ICE_RESET_EMPR) |
---|
1756 | 2701 | pf->empr_count++; |
---|
| 2702 | + else |
---|
| 2703 | + dev_dbg(dev, "Invalid reset type %d\n", reset); |
---|
1757 | 2704 | |
---|
1758 | 2705 | /* If a reset cycle isn't already in progress, we set a bit in |
---|
1759 | 2706 | * pf->state so that the service task can start a reset/rebuild. |
---|
1760 | 2707 | * We also make note of which reset happened so that peer |
---|
1761 | 2708 | * devices/drivers can be informed. |
---|
1762 | 2709 | */ |
---|
1763 | | - if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING, |
---|
1764 | | - pf->state)) { |
---|
| 2710 | + if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { |
---|
1765 | 2711 | if (reset == ICE_RESET_CORER) |
---|
1766 | 2712 | set_bit(__ICE_CORER_RECV, pf->state); |
---|
1767 | 2713 | else if (reset == ICE_RESET_GLOBR) |
---|
.. | .. |
---|
1775 | 2721 | * is received and set back to false after the driver |
---|
1776 | 2722 | * has determined that the hardware is out of reset. |
---|
1777 | 2723 | * |
---|
1778 | | - * __ICE_RESET_RECOVERY_PENDING in pf->state indicates |
---|
| 2724 | + * __ICE_RESET_OICR_RECV in pf->state indicates |
---|
1779 | 2725 | * that a post reset rebuild is required before the |
---|
1780 | 2726 | * driver is operational again. This is set above. |
---|
1781 | 2727 | * |
---|
.. | .. |
---|
1788 | 2734 | |
---|
1789 | 2735 | if (oicr & PFINT_OICR_HMC_ERR_M) { |
---|
1790 | 2736 | ena_mask &= ~PFINT_OICR_HMC_ERR_M; |
---|
1791 | | - dev_dbg(&pf->pdev->dev, |
---|
1792 | | - "HMC Error interrupt - info 0x%x, data 0x%x\n", |
---|
| 2737 | + dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", |
---|
1793 | 2738 | rd32(hw, PFHMC_ERRORINFO), |
---|
1794 | 2739 | rd32(hw, PFHMC_ERRORDATA)); |
---|
1795 | 2740 | } |
---|
1796 | 2741 | |
---|
1797 | | - /* Report and mask off any remaining unexpected interrupts */ |
---|
| 2742 | + /* Report any remaining unexpected interrupts */ |
---|
1798 | 2743 | oicr &= ena_mask; |
---|
1799 | 2744 | if (oicr) { |
---|
1800 | | - dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", |
---|
1801 | | - oicr); |
---|
| 2745 | + dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); |
---|
1802 | 2746 | /* If a critical error is pending there is no choice but to |
---|
1803 | 2747 | * reset the device. |
---|
1804 | 2748 | */ |
---|
.. | .. |
---|
1808 | 2752 | set_bit(__ICE_PFR_REQ, pf->state); |
---|
1809 | 2753 | ice_service_task_schedule(pf); |
---|
1810 | 2754 | } |
---|
1811 | | - ena_mask &= ~oicr; |
---|
1812 | 2755 | } |
---|
1813 | 2756 | ret = IRQ_HANDLED; |
---|
1814 | 2757 | |
---|
1815 | | - /* re-enable interrupt causes that are not handled during this pass */ |
---|
1816 | | - wr32(hw, PFINT_OICR_ENA, ena_mask); |
---|
1817 | | - if (!test_bit(__ICE_DOWN, pf->state)) { |
---|
1818 | | - ice_service_task_schedule(pf); |
---|
1819 | | - ice_irq_dynamic_ena(hw, NULL, NULL); |
---|
1820 | | - } |
---|
| 2758 | + ice_service_task_schedule(pf); |
---|
| 2759 | + ice_irq_dynamic_ena(hw, NULL, NULL); |
---|
1821 | 2760 | |
---|
1822 | 2761 | return ret; |
---|
1823 | 2762 | } |
---|
1824 | 2763 | |
---|
1825 | 2764 | /** |
---|
1826 | | - * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors |
---|
1827 | | - * @vsi: the VSI being configured |
---|
1828 | | - * |
---|
1829 | | - * This function maps descriptor rings to the queue-specific vectors allotted |
---|
1830 | | - * through the MSI-X enabling code. On a constrained vector budget, we map Tx |
---|
1831 | | - * and Rx rings to the vector as "efficiently" as possible. |
---|
| 2765 | + * ice_dis_ctrlq_interrupts - disable control queue interrupts |
---|
| 2766 | + * @hw: pointer to HW structure |
---|
1832 | 2767 | */ |
---|
1833 | | -static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) |
---|
| 2768 | +static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) |
---|
1834 | 2769 | { |
---|
1835 | | - int q_vectors = vsi->num_q_vectors; |
---|
1836 | | - int tx_rings_rem, rx_rings_rem; |
---|
1837 | | - int v_id; |
---|
| 2770 | + /* disable Admin queue Interrupt causes */ |
---|
| 2771 | + wr32(hw, PFINT_FW_CTL, |
---|
| 2772 | + rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); |
---|
1838 | 2773 | |
---|
1839 | | - /* initially assigning remaining rings count to VSIs num queue value */ |
---|
1840 | | - tx_rings_rem = vsi->num_txq; |
---|
1841 | | - rx_rings_rem = vsi->num_rxq; |
---|
| 2774 | + /* disable Mailbox queue Interrupt causes */ |
---|
| 2775 | + wr32(hw, PFINT_MBX_CTL, |
---|
| 2776 | + rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); |
---|
1842 | 2777 | |
---|
1843 | | - for (v_id = 0; v_id < q_vectors; v_id++) { |
---|
1844 | | - struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; |
---|
1845 | | - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; |
---|
| 2778 | + /* disable Control queue Interrupt causes */ |
---|
| 2779 | + wr32(hw, PFINT_OICR_CTL, |
---|
| 2780 | + rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); |
---|
1846 | 2781 | |
---|
1847 | | - /* Tx rings mapping to vector */ |
---|
1848 | | - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); |
---|
1849 | | - q_vector->num_ring_tx = tx_rings_per_v; |
---|
1850 | | - q_vector->tx.ring = NULL; |
---|
1851 | | - q_base = vsi->num_txq - tx_rings_rem; |
---|
1852 | | - |
---|
1853 | | - for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { |
---|
1854 | | - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; |
---|
1855 | | - |
---|
1856 | | - tx_ring->q_vector = q_vector; |
---|
1857 | | - tx_ring->next = q_vector->tx.ring; |
---|
1858 | | - q_vector->tx.ring = tx_ring; |
---|
1859 | | - } |
---|
1860 | | - tx_rings_rem -= tx_rings_per_v; |
---|
1861 | | - |
---|
1862 | | - /* Rx rings mapping to vector */ |
---|
1863 | | - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); |
---|
1864 | | - q_vector->num_ring_rx = rx_rings_per_v; |
---|
1865 | | - q_vector->rx.ring = NULL; |
---|
1866 | | - q_base = vsi->num_rxq - rx_rings_rem; |
---|
1867 | | - |
---|
1868 | | - for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { |
---|
1869 | | - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; |
---|
1870 | | - |
---|
1871 | | - rx_ring->q_vector = q_vector; |
---|
1872 | | - rx_ring->next = q_vector->rx.ring; |
---|
1873 | | - q_vector->rx.ring = rx_ring; |
---|
1874 | | - } |
---|
1875 | | - rx_rings_rem -= rx_rings_per_v; |
---|
1876 | | - } |
---|
1877 | | -} |
---|
1878 | | - |
---|
1879 | | -/** |
---|
1880 | | - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI |
---|
1881 | | - * @vsi: the VSI being configured |
---|
1882 | | - * |
---|
1883 | | - * Return 0 on success and a negative value on error |
---|
1884 | | - */ |
---|
1885 | | -static void ice_vsi_set_num_qs(struct ice_vsi *vsi) |
---|
1886 | | -{ |
---|
1887 | | - struct ice_pf *pf = vsi->back; |
---|
1888 | | - |
---|
1889 | | - switch (vsi->type) { |
---|
1890 | | - case ICE_VSI_PF: |
---|
1891 | | - vsi->alloc_txq = pf->num_lan_tx; |
---|
1892 | | - vsi->alloc_rxq = pf->num_lan_rx; |
---|
1893 | | - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); |
---|
1894 | | - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); |
---|
1895 | | - break; |
---|
1896 | | - default: |
---|
1897 | | - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", |
---|
1898 | | - vsi->type); |
---|
1899 | | - break; |
---|
1900 | | - } |
---|
1901 | | -} |
---|
1902 | | - |
---|
1903 | | -/** |
---|
1904 | | - * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi |
---|
1905 | | - * @vsi: VSI pointer |
---|
1906 | | - * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. |
---|
1907 | | - * |
---|
1908 | | - * On error: returns error code (negative) |
---|
1909 | | - * On success: returns 0 |
---|
1910 | | - */ |
---|
1911 | | -static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) |
---|
1912 | | -{ |
---|
1913 | | - struct ice_pf *pf = vsi->back; |
---|
1914 | | - |
---|
1915 | | - /* allocate memory for both Tx and Rx ring pointers */ |
---|
1916 | | - vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, |
---|
1917 | | - sizeof(struct ice_ring *), GFP_KERNEL); |
---|
1918 | | - if (!vsi->tx_rings) |
---|
1919 | | - goto err_txrings; |
---|
1920 | | - |
---|
1921 | | - vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, |
---|
1922 | | - sizeof(struct ice_ring *), GFP_KERNEL); |
---|
1923 | | - if (!vsi->rx_rings) |
---|
1924 | | - goto err_rxrings; |
---|
1925 | | - |
---|
1926 | | - if (alloc_qvectors) { |
---|
1927 | | - /* allocate memory for q_vector pointers */ |
---|
1928 | | - vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, |
---|
1929 | | - vsi->num_q_vectors, |
---|
1930 | | - sizeof(struct ice_q_vector *), |
---|
1931 | | - GFP_KERNEL); |
---|
1932 | | - if (!vsi->q_vectors) |
---|
1933 | | - goto err_vectors; |
---|
1934 | | - } |
---|
1935 | | - |
---|
1936 | | - return 0; |
---|
1937 | | - |
---|
1938 | | -err_vectors: |
---|
1939 | | - devm_kfree(&pf->pdev->dev, vsi->rx_rings); |
---|
1940 | | -err_rxrings: |
---|
1941 | | - devm_kfree(&pf->pdev->dev, vsi->tx_rings); |
---|
1942 | | -err_txrings: |
---|
1943 | | - return -ENOMEM; |
---|
1944 | | -} |
---|
1945 | | - |
---|
1946 | | -/** |
---|
1947 | | - * ice_msix_clean_rings - MSIX mode Interrupt Handler |
---|
1948 | | - * @irq: interrupt number |
---|
1949 | | - * @data: pointer to a q_vector |
---|
1950 | | - */ |
---|
1951 | | -static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) |
---|
1952 | | -{ |
---|
1953 | | - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; |
---|
1954 | | - |
---|
1955 | | - if (!q_vector->tx.ring && !q_vector->rx.ring) |
---|
1956 | | - return IRQ_HANDLED; |
---|
1957 | | - |
---|
1958 | | - napi_schedule(&q_vector->napi); |
---|
1959 | | - |
---|
1960 | | - return IRQ_HANDLED; |
---|
1961 | | -} |
---|
1962 | | - |
---|
1963 | | -/** |
---|
1964 | | - * ice_vsi_alloc - Allocates the next available struct vsi in the PF |
---|
1965 | | - * @pf: board private structure |
---|
1966 | | - * @type: type of VSI |
---|
1967 | | - * |
---|
1968 | | - * returns a pointer to a VSI on success, NULL on failure. |
---|
1969 | | - */ |
---|
1970 | | -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) |
---|
1971 | | -{ |
---|
1972 | | - struct ice_vsi *vsi = NULL; |
---|
1973 | | - |
---|
1974 | | - /* Need to protect the allocation of the VSIs at the PF level */ |
---|
1975 | | - mutex_lock(&pf->sw_mutex); |
---|
1976 | | - |
---|
1977 | | - /* If we have already allocated our maximum number of VSIs, |
---|
1978 | | - * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index |
---|
1979 | | - * is available to be populated |
---|
1980 | | - */ |
---|
1981 | | - if (pf->next_vsi == ICE_NO_VSI) { |
---|
1982 | | - dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); |
---|
1983 | | - goto unlock_pf; |
---|
1984 | | - } |
---|
1985 | | - |
---|
1986 | | - vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); |
---|
1987 | | - if (!vsi) |
---|
1988 | | - goto unlock_pf; |
---|
1989 | | - |
---|
1990 | | - vsi->type = type; |
---|
1991 | | - vsi->back = pf; |
---|
1992 | | - set_bit(__ICE_DOWN, vsi->state); |
---|
1993 | | - vsi->idx = pf->next_vsi; |
---|
1994 | | - vsi->work_lmt = ICE_DFLT_IRQ_WORK; |
---|
1995 | | - |
---|
1996 | | - ice_vsi_set_num_qs(vsi); |
---|
1997 | | - |
---|
1998 | | - switch (vsi->type) { |
---|
1999 | | - case ICE_VSI_PF: |
---|
2000 | | - if (ice_vsi_alloc_arrays(vsi, true)) |
---|
2001 | | - goto err_rings; |
---|
2002 | | - |
---|
2003 | | - /* Setup default MSIX irq handler for VSI */ |
---|
2004 | | - vsi->irq_handler = ice_msix_clean_rings; |
---|
2005 | | - break; |
---|
2006 | | - default: |
---|
2007 | | - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); |
---|
2008 | | - goto unlock_pf; |
---|
2009 | | - } |
---|
2010 | | - |
---|
2011 | | - /* fill VSI slot in the PF struct */ |
---|
2012 | | - pf->vsi[pf->next_vsi] = vsi; |
---|
2013 | | - |
---|
2014 | | - /* prepare pf->next_vsi for next use */ |
---|
2015 | | - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, |
---|
2016 | | - pf->next_vsi); |
---|
2017 | | - goto unlock_pf; |
---|
2018 | | - |
---|
2019 | | -err_rings: |
---|
2020 | | - devm_kfree(&pf->pdev->dev, vsi); |
---|
2021 | | - vsi = NULL; |
---|
2022 | | -unlock_pf: |
---|
2023 | | - mutex_unlock(&pf->sw_mutex); |
---|
2024 | | - return vsi; |
---|
| 2782 | + ice_flush(hw); |
---|
2025 | 2783 | } |
---|
2026 | 2784 | |
---|
2027 | 2785 | /** |
---|
.. | .. |
---|
2030 | 2788 | */ |
---|
2031 | 2789 | static void ice_free_irq_msix_misc(struct ice_pf *pf) |
---|
2032 | 2790 | { |
---|
2033 | | - /* disable OICR interrupt */ |
---|
2034 | | - wr32(&pf->hw, PFINT_OICR_ENA, 0); |
---|
2035 | | - ice_flush(&pf->hw); |
---|
| 2791 | + struct ice_hw *hw = &pf->hw; |
---|
2036 | 2792 | |
---|
2037 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { |
---|
| 2793 | + ice_dis_ctrlq_interrupts(hw); |
---|
| 2794 | + |
---|
| 2795 | + /* disable OICR interrupt */ |
---|
| 2796 | + wr32(hw, PFINT_OICR_ENA, 0); |
---|
| 2797 | + ice_flush(hw); |
---|
| 2798 | + |
---|
| 2799 | + if (pf->msix_entries) { |
---|
2038 | 2800 | synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); |
---|
2039 | | - devm_free_irq(&pf->pdev->dev, |
---|
| 2801 | + devm_free_irq(ice_pf_to_dev(pf), |
---|
2040 | 2802 | pf->msix_entries[pf->oicr_idx].vector, pf); |
---|
2041 | 2803 | } |
---|
2042 | 2804 | |
---|
| 2805 | + pf->num_avail_sw_msix += 1; |
---|
2043 | 2806 | ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); |
---|
| 2807 | +} |
---|
| 2808 | + |
---|
| 2809 | +/** |
---|
| 2810 | + * ice_ena_ctrlq_interrupts - enable control queue interrupts |
---|
| 2811 | + * @hw: pointer to HW structure |
---|
| 2812 | + * @reg_idx: HW vector index to associate the control queue interrupts with |
---|
| 2813 | + */ |
---|
| 2814 | +static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) |
---|
| 2815 | +{ |
---|
| 2816 | + u32 val; |
---|
| 2817 | + |
---|
| 2818 | + val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
---|
| 2819 | + PFINT_OICR_CTL_CAUSE_ENA_M); |
---|
| 2820 | + wr32(hw, PFINT_OICR_CTL, val); |
---|
| 2821 | + |
---|
| 2822 | + /* enable Admin queue Interrupt causes */ |
---|
| 2823 | + val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
---|
| 2824 | + PFINT_FW_CTL_CAUSE_ENA_M); |
---|
| 2825 | + wr32(hw, PFINT_FW_CTL, val); |
---|
| 2826 | + |
---|
| 2827 | + /* enable Mailbox queue Interrupt causes */ |
---|
| 2828 | + val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | |
---|
| 2829 | + PFINT_MBX_CTL_CAUSE_ENA_M); |
---|
| 2830 | + wr32(hw, PFINT_MBX_CTL, val); |
---|
| 2831 | + |
---|
| 2832 | + ice_flush(hw); |
---|
2044 | 2833 | } |
---|
2045 | 2834 | |
---|
2046 | 2835 | /** |
---|
.. | .. |
---|
2048 | 2837 | * @pf: board private structure |
---|
2049 | 2838 | * |
---|
2050 | 2839 | * This sets up the handler for MSIX 0, which is used to manage the |
---|
2051 | | - * non-queue interrupts, e.g. AdminQ and errors. This is not used |
---|
| 2840 | + * non-queue interrupts, e.g. AdminQ and errors. This is not used |
---|
2052 | 2841 | * when in MSI or Legacy interrupt mode. |
---|
2053 | 2842 | */ |
---|
2054 | 2843 | static int ice_req_irq_msix_misc(struct ice_pf *pf) |
---|
2055 | 2844 | { |
---|
| 2845 | + struct device *dev = ice_pf_to_dev(pf); |
---|
2056 | 2846 | struct ice_hw *hw = &pf->hw; |
---|
2057 | 2847 | int oicr_idx, err = 0; |
---|
2058 | | - u8 itr_gran; |
---|
2059 | | - u32 val; |
---|
2060 | 2848 | |
---|
2061 | 2849 | if (!pf->int_name[0]) |
---|
2062 | 2850 | snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", |
---|
2063 | | - dev_driver_string(&pf->pdev->dev), |
---|
2064 | | - dev_name(&pf->pdev->dev)); |
---|
| 2851 | + dev_driver_string(dev), dev_name(dev)); |
---|
2065 | 2852 | |
---|
2066 | 2853 | /* Do not request IRQ but do enable OICR interrupt since settings are |
---|
2067 | 2854 | * lost during reset. Note that this function is called only during |
---|
2068 | 2855 | * rebuild path and not while reset is in progress. |
---|
2069 | 2856 | */ |
---|
2070 | | - if (ice_is_reset_recovery_pending(pf->state)) |
---|
| 2857 | + if (ice_is_reset_in_progress(pf->state)) |
---|
2071 | 2858 | goto skip_req_irq; |
---|
2072 | 2859 | |
---|
2073 | 2860 | /* reserve one vector in irq_tracker for misc interrupts */ |
---|
.. | .. |
---|
2075 | 2862 | if (oicr_idx < 0) |
---|
2076 | 2863 | return oicr_idx; |
---|
2077 | 2864 | |
---|
2078 | | - pf->oicr_idx = oicr_idx; |
---|
| 2865 | + pf->num_avail_sw_msix -= 1; |
---|
| 2866 | + pf->oicr_idx = (u16)oicr_idx; |
---|
2079 | 2867 | |
---|
2080 | | - err = devm_request_irq(&pf->pdev->dev, |
---|
2081 | | - pf->msix_entries[pf->oicr_idx].vector, |
---|
| 2868 | + err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, |
---|
2082 | 2869 | ice_misc_intr, 0, pf->int_name, pf); |
---|
2083 | 2870 | if (err) { |
---|
2084 | | - dev_err(&pf->pdev->dev, |
---|
2085 | | - "devm_request_irq for %s failed: %d\n", |
---|
| 2871 | + dev_err(dev, "devm_request_irq for %s failed: %d\n", |
---|
2086 | 2872 | pf->int_name, err); |
---|
2087 | 2873 | ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); |
---|
| 2874 | + pf->num_avail_sw_msix += 1; |
---|
2088 | 2875 | return err; |
---|
2089 | 2876 | } |
---|
2090 | 2877 | |
---|
2091 | 2878 | skip_req_irq: |
---|
2092 | 2879 | ice_ena_misc_vector(pf); |
---|
2093 | 2880 | |
---|
2094 | | - val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
---|
2095 | | - PFINT_OICR_CTL_CAUSE_ENA_M); |
---|
2096 | | - wr32(hw, PFINT_OICR_CTL, val); |
---|
2097 | | - |
---|
2098 | | - /* This enables Admin queue Interrupt causes */ |
---|
2099 | | - val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
---|
2100 | | - PFINT_FW_CTL_CAUSE_ENA_M); |
---|
2101 | | - wr32(hw, PFINT_FW_CTL, val); |
---|
2102 | | - |
---|
2103 | | - itr_gran = hw->itr_gran_200; |
---|
2104 | | - |
---|
| 2881 | + ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); |
---|
2105 | 2882 | wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), |
---|
2106 | | - ITR_TO_REG(ICE_ITR_8K, itr_gran)); |
---|
| 2883 | + ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); |
---|
2107 | 2884 | |
---|
2108 | 2885 | ice_flush(hw); |
---|
2109 | 2886 | ice_irq_dynamic_ena(hw, NULL, NULL); |
---|
.. | .. |
---|
2112 | 2889 | } |
---|
2113 | 2890 | |
---|
2114 | 2891 | /** |
---|
2115 | | - * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI |
---|
2116 | | - * @vsi: the VSI getting queues |
---|
| 2892 | + * ice_napi_add - register NAPI handler for the VSI |
---|
| 2893 | + * @vsi: VSI for which NAPI handler is to be registered |
---|
2117 | 2894 | * |
---|
2118 | | - * Return 0 on success and a negative value on error |
---|
| 2895 | + * This function is only called in the driver's load path. Registering the NAPI |
---|
| 2896 | + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, |
---|
| 2897 | + * reset/rebuild, etc.) |
---|
2119 | 2898 | */ |
---|
2120 | | -static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) |
---|
2121 | | -{ |
---|
2122 | | - struct ice_pf *pf = vsi->back; |
---|
2123 | | - int offset, ret = 0; |
---|
2124 | | - |
---|
2125 | | - mutex_lock(&pf->avail_q_mutex); |
---|
2126 | | - /* look for contiguous block of queues for tx */ |
---|
2127 | | - offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, |
---|
2128 | | - 0, vsi->alloc_txq, 0); |
---|
2129 | | - if (offset < ICE_MAX_TXQS) { |
---|
2130 | | - int i; |
---|
2131 | | - |
---|
2132 | | - bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); |
---|
2133 | | - for (i = 0; i < vsi->alloc_txq; i++) |
---|
2134 | | - vsi->txq_map[i] = i + offset; |
---|
2135 | | - } else { |
---|
2136 | | - ret = -ENOMEM; |
---|
2137 | | - vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; |
---|
2138 | | - } |
---|
2139 | | - |
---|
2140 | | - /* look for contiguous block of queues for rx */ |
---|
2141 | | - offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, |
---|
2142 | | - 0, vsi->alloc_rxq, 0); |
---|
2143 | | - if (offset < ICE_MAX_RXQS) { |
---|
2144 | | - int i; |
---|
2145 | | - |
---|
2146 | | - bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); |
---|
2147 | | - for (i = 0; i < vsi->alloc_rxq; i++) |
---|
2148 | | - vsi->rxq_map[i] = i + offset; |
---|
2149 | | - } else { |
---|
2150 | | - ret = -ENOMEM; |
---|
2151 | | - vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; |
---|
2152 | | - } |
---|
2153 | | - mutex_unlock(&pf->avail_q_mutex); |
---|
2154 | | - |
---|
2155 | | - return ret; |
---|
2156 | | -} |
---|
2157 | | - |
---|
2158 | | -/** |
---|
2159 | | - * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI |
---|
2160 | | - * @vsi: the VSI getting queues |
---|
2161 | | - * |
---|
2162 | | - * Return 0 on success and a negative value on error |
---|
2163 | | - */ |
---|
2164 | | -static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) |
---|
2165 | | -{ |
---|
2166 | | - struct ice_pf *pf = vsi->back; |
---|
2167 | | - int i, index = 0; |
---|
2168 | | - |
---|
2169 | | - mutex_lock(&pf->avail_q_mutex); |
---|
2170 | | - |
---|
2171 | | - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { |
---|
2172 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
---|
2173 | | - index = find_next_zero_bit(pf->avail_txqs, |
---|
2174 | | - ICE_MAX_TXQS, index); |
---|
2175 | | - if (index < ICE_MAX_TXQS) { |
---|
2176 | | - set_bit(index, pf->avail_txqs); |
---|
2177 | | - vsi->txq_map[i] = index; |
---|
2178 | | - } else { |
---|
2179 | | - goto err_scatter_tx; |
---|
2180 | | - } |
---|
2181 | | - } |
---|
2182 | | - } |
---|
2183 | | - |
---|
2184 | | - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { |
---|
2185 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
---|
2186 | | - index = find_next_zero_bit(pf->avail_rxqs, |
---|
2187 | | - ICE_MAX_RXQS, index); |
---|
2188 | | - if (index < ICE_MAX_RXQS) { |
---|
2189 | | - set_bit(index, pf->avail_rxqs); |
---|
2190 | | - vsi->rxq_map[i] = index; |
---|
2191 | | - } else { |
---|
2192 | | - goto err_scatter_rx; |
---|
2193 | | - } |
---|
2194 | | - } |
---|
2195 | | - } |
---|
2196 | | - |
---|
2197 | | - mutex_unlock(&pf->avail_q_mutex); |
---|
2198 | | - return 0; |
---|
2199 | | - |
---|
2200 | | -err_scatter_rx: |
---|
2201 | | - /* unflag any queues we have grabbed (i is failed position) */ |
---|
2202 | | - for (index = 0; index < i; index++) { |
---|
2203 | | - clear_bit(vsi->rxq_map[index], pf->avail_rxqs); |
---|
2204 | | - vsi->rxq_map[index] = 0; |
---|
2205 | | - } |
---|
2206 | | - i = vsi->alloc_txq; |
---|
2207 | | -err_scatter_tx: |
---|
2208 | | - /* i is either position of failed attempt or vsi->alloc_txq */ |
---|
2209 | | - for (index = 0; index < i; index++) { |
---|
2210 | | - clear_bit(vsi->txq_map[index], pf->avail_txqs); |
---|
2211 | | - vsi->txq_map[index] = 0; |
---|
2212 | | - } |
---|
2213 | | - |
---|
2214 | | - mutex_unlock(&pf->avail_q_mutex); |
---|
2215 | | - return -ENOMEM; |
---|
2216 | | -} |
---|
2217 | | - |
---|
2218 | | -/** |
---|
2219 | | - * ice_vsi_get_qs - Assign queues from PF to VSI |
---|
2220 | | - * @vsi: the VSI to assign queues to |
---|
2221 | | - * |
---|
2222 | | - * Returns 0 on success and a negative value on error |
---|
2223 | | - */ |
---|
2224 | | -static int ice_vsi_get_qs(struct ice_vsi *vsi) |
---|
2225 | | -{ |
---|
2226 | | - int ret = 0; |
---|
2227 | | - |
---|
2228 | | - vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; |
---|
2229 | | - vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; |
---|
2230 | | - |
---|
2231 | | - /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping |
---|
2232 | | - * modes individually to scatter if assigning contiguous queues |
---|
2233 | | - * to rx or tx fails |
---|
2234 | | - */ |
---|
2235 | | - ret = ice_vsi_get_qs_contig(vsi); |
---|
2236 | | - if (ret < 0) { |
---|
2237 | | - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) |
---|
2238 | | - vsi->alloc_txq = max_t(u16, vsi->alloc_txq, |
---|
2239 | | - ICE_MAX_SCATTER_TXQS); |
---|
2240 | | - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) |
---|
2241 | | - vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, |
---|
2242 | | - ICE_MAX_SCATTER_RXQS); |
---|
2243 | | - ret = ice_vsi_get_qs_scatter(vsi); |
---|
2244 | | - } |
---|
2245 | | - |
---|
2246 | | - return ret; |
---|
2247 | | -} |
---|
2248 | | - |
---|
2249 | | -/** |
---|
2250 | | - * ice_vsi_put_qs - Release queues from VSI to PF |
---|
2251 | | - * @vsi: the VSI thats going to release queues |
---|
2252 | | - */ |
---|
2253 | | -static void ice_vsi_put_qs(struct ice_vsi *vsi) |
---|
2254 | | -{ |
---|
2255 | | - struct ice_pf *pf = vsi->back; |
---|
2256 | | - int i; |
---|
2257 | | - |
---|
2258 | | - mutex_lock(&pf->avail_q_mutex); |
---|
2259 | | - |
---|
2260 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
---|
2261 | | - clear_bit(vsi->txq_map[i], pf->avail_txqs); |
---|
2262 | | - vsi->txq_map[i] = ICE_INVAL_Q_INDEX; |
---|
2263 | | - } |
---|
2264 | | - |
---|
2265 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
---|
2266 | | - clear_bit(vsi->rxq_map[i], pf->avail_rxqs); |
---|
2267 | | - vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; |
---|
2268 | | - } |
---|
2269 | | - |
---|
2270 | | - mutex_unlock(&pf->avail_q_mutex); |
---|
2271 | | -} |
---|
2272 | | - |
---|
2273 | | -/** |
---|
2274 | | - * ice_free_q_vector - Free memory allocated for a specific interrupt vector |
---|
2275 | | - * @vsi: VSI having the memory freed |
---|
2276 | | - * @v_idx: index of the vector to be freed |
---|
2277 | | - */ |
---|
2278 | | -static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) |
---|
2279 | | -{ |
---|
2280 | | - struct ice_q_vector *q_vector; |
---|
2281 | | - struct ice_ring *ring; |
---|
2282 | | - |
---|
2283 | | - if (!vsi->q_vectors[v_idx]) { |
---|
2284 | | - dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", |
---|
2285 | | - v_idx); |
---|
2286 | | - return; |
---|
2287 | | - } |
---|
2288 | | - q_vector = vsi->q_vectors[v_idx]; |
---|
2289 | | - |
---|
2290 | | - ice_for_each_ring(ring, q_vector->tx) |
---|
2291 | | - ring->q_vector = NULL; |
---|
2292 | | - ice_for_each_ring(ring, q_vector->rx) |
---|
2293 | | - ring->q_vector = NULL; |
---|
2294 | | - |
---|
2295 | | - /* only VSI with an associated netdev is set up with NAPI */ |
---|
2296 | | - if (vsi->netdev) |
---|
2297 | | - netif_napi_del(&q_vector->napi); |
---|
2298 | | - |
---|
2299 | | - devm_kfree(&vsi->back->pdev->dev, q_vector); |
---|
2300 | | - vsi->q_vectors[v_idx] = NULL; |
---|
2301 | | -} |
---|
2302 | | - |
---|
2303 | | -/** |
---|
2304 | | - * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors |
---|
2305 | | - * @vsi: the VSI having memory freed |
---|
2306 | | - */ |
---|
2307 | | -static void ice_vsi_free_q_vectors(struct ice_vsi *vsi) |
---|
| 2899 | +static void ice_napi_add(struct ice_vsi *vsi) |
---|
2308 | 2900 | { |
---|
2309 | 2901 | int v_idx; |
---|
2310 | 2902 | |
---|
2311 | | - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) |
---|
2312 | | - ice_free_q_vector(vsi, v_idx); |
---|
| 2903 | + if (!vsi->netdev) |
---|
| 2904 | + return; |
---|
| 2905 | + |
---|
| 2906 | + ice_for_each_q_vector(vsi, v_idx) |
---|
| 2907 | + netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, |
---|
| 2908 | + ice_napi_poll, NAPI_POLL_WEIGHT); |
---|
2313 | 2909 | } |
---|
2314 | 2910 | |
---|
2315 | 2911 | /** |
---|
2316 | | - * ice_cfg_netdev - Setup the netdev flags |
---|
2317 | | - * @vsi: the VSI being configured |
---|
2318 | | - * |
---|
2319 | | - * Returns 0 on success, negative value on failure |
---|
| 2912 | + * ice_set_ops - set netdev and ethtools ops for the given netdev |
---|
| 2913 | + * @netdev: netdev instance |
---|
2320 | 2914 | */ |
---|
2321 | | -static int ice_cfg_netdev(struct ice_vsi *vsi) |
---|
| 2915 | +static void ice_set_ops(struct net_device *netdev) |
---|
2322 | 2916 | { |
---|
| 2917 | + struct ice_pf *pf = ice_netdev_to_pf(netdev); |
---|
| 2918 | + |
---|
| 2919 | + if (ice_is_safe_mode(pf)) { |
---|
| 2920 | + netdev->netdev_ops = &ice_netdev_safe_mode_ops; |
---|
| 2921 | + ice_set_ethtool_safe_mode_ops(netdev); |
---|
| 2922 | + return; |
---|
| 2923 | + } |
---|
| 2924 | + |
---|
| 2925 | + netdev->netdev_ops = &ice_netdev_ops; |
---|
| 2926 | + netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; |
---|
| 2927 | + ice_set_ethtool_ops(netdev); |
---|
| 2928 | +} |
---|
| 2929 | + |
---|
| 2930 | +/** |
---|
| 2931 | + * ice_set_netdev_features - set features for the given netdev |
---|
| 2932 | + * @netdev: netdev instance |
---|
| 2933 | + */ |
---|
| 2934 | +static void ice_set_netdev_features(struct net_device *netdev) |
---|
| 2935 | +{ |
---|
| 2936 | + struct ice_pf *pf = ice_netdev_to_pf(netdev); |
---|
2323 | 2937 | netdev_features_t csumo_features; |
---|
2324 | 2938 | netdev_features_t vlano_features; |
---|
2325 | 2939 | netdev_features_t dflt_features; |
---|
2326 | 2940 | netdev_features_t tso_features; |
---|
2327 | | - struct ice_netdev_priv *np; |
---|
2328 | | - struct net_device *netdev; |
---|
2329 | | - u8 mac_addr[ETH_ALEN]; |
---|
2330 | 2941 | |
---|
2331 | | - netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), |
---|
2332 | | - vsi->alloc_txq, vsi->alloc_rxq); |
---|
2333 | | - if (!netdev) |
---|
2334 | | - return -ENOMEM; |
---|
2335 | | - |
---|
2336 | | - vsi->netdev = netdev; |
---|
2337 | | - np = netdev_priv(netdev); |
---|
2338 | | - np->vsi = vsi; |
---|
| 2942 | + if (ice_is_safe_mode(pf)) { |
---|
| 2943 | + /* safe mode */ |
---|
| 2944 | + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; |
---|
| 2945 | + netdev->hw_features = netdev->features; |
---|
| 2946 | + return; |
---|
| 2947 | + } |
---|
2339 | 2948 | |
---|
2340 | 2949 | dflt_features = NETIF_F_SG | |
---|
2341 | 2950 | NETIF_F_HIGHDMA | |
---|
| 2951 | + NETIF_F_NTUPLE | |
---|
2342 | 2952 | NETIF_F_RXHASH; |
---|
2343 | 2953 | |
---|
2344 | 2954 | csumo_features = NETIF_F_RXCSUM | |
---|
2345 | 2955 | NETIF_F_IP_CSUM | |
---|
| 2956 | + NETIF_F_SCTP_CRC | |
---|
2346 | 2957 | NETIF_F_IPV6_CSUM; |
---|
2347 | 2958 | |
---|
2348 | 2959 | vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | |
---|
2349 | 2960 | NETIF_F_HW_VLAN_CTAG_TX | |
---|
2350 | 2961 | NETIF_F_HW_VLAN_CTAG_RX; |
---|
2351 | 2962 | |
---|
2352 | | - tso_features = NETIF_F_TSO; |
---|
| 2963 | + tso_features = NETIF_F_TSO | |
---|
| 2964 | + NETIF_F_TSO_ECN | |
---|
| 2965 | + NETIF_F_TSO6 | |
---|
| 2966 | + NETIF_F_GSO_GRE | |
---|
| 2967 | + NETIF_F_GSO_UDP_TUNNEL | |
---|
| 2968 | + NETIF_F_GSO_GRE_CSUM | |
---|
| 2969 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | |
---|
| 2970 | + NETIF_F_GSO_PARTIAL | |
---|
| 2971 | + NETIF_F_GSO_IPXIP4 | |
---|
| 2972 | + NETIF_F_GSO_IPXIP6 | |
---|
| 2973 | + NETIF_F_GSO_UDP_L4; |
---|
2353 | 2974 | |
---|
| 2975 | + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | |
---|
| 2976 | + NETIF_F_GSO_GRE_CSUM; |
---|
2354 | 2977 | /* set features that user can change */ |
---|
2355 | 2978 | netdev->hw_features = dflt_features | csumo_features | |
---|
2356 | 2979 | vlano_features | tso_features; |
---|
| 2980 | + |
---|
| 2981 | + /* add support for HW_CSUM on packets with MPLS header */ |
---|
| 2982 | + netdev->mpls_features = NETIF_F_HW_CSUM; |
---|
2357 | 2983 | |
---|
2358 | 2984 | /* enable features */ |
---|
2359 | 2985 | netdev->features |= netdev->hw_features; |
---|
.. | .. |
---|
2362 | 2988 | tso_features; |
---|
2363 | 2989 | netdev->vlan_features |= dflt_features | csumo_features | |
---|
2364 | 2990 | tso_features; |
---|
| 2991 | +} |
---|
| 2992 | + |
---|
| 2993 | +/** |
---|
| 2994 | + * ice_cfg_netdev - Allocate, configure and register a netdev |
---|
| 2995 | + * @vsi: the VSI associated with the new netdev |
---|
| 2996 | + * |
---|
| 2997 | + * Returns 0 on success, negative value on failure |
---|
| 2998 | + */ |
---|
| 2999 | +static int ice_cfg_netdev(struct ice_vsi *vsi) |
---|
| 3000 | +{ |
---|
| 3001 | + struct ice_pf *pf = vsi->back; |
---|
| 3002 | + struct ice_netdev_priv *np; |
---|
| 3003 | + struct net_device *netdev; |
---|
| 3004 | + u8 mac_addr[ETH_ALEN]; |
---|
| 3005 | + int err; |
---|
| 3006 | + |
---|
| 3007 | + err = ice_devlink_create_port(vsi); |
---|
| 3008 | + if (err) |
---|
| 3009 | + return err; |
---|
| 3010 | + |
---|
| 3011 | + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, |
---|
| 3012 | + vsi->alloc_rxq); |
---|
| 3013 | + if (!netdev) { |
---|
| 3014 | + err = -ENOMEM; |
---|
| 3015 | + goto err_destroy_devlink_port; |
---|
| 3016 | + } |
---|
| 3017 | + |
---|
| 3018 | + vsi->netdev = netdev; |
---|
| 3019 | + np = netdev_priv(netdev); |
---|
| 3020 | + np->vsi = vsi; |
---|
| 3021 | + |
---|
| 3022 | + ice_set_netdev_features(netdev); |
---|
| 3023 | + |
---|
| 3024 | + ice_set_ops(netdev); |
---|
2365 | 3025 | |
---|
2366 | 3026 | if (vsi->type == ICE_VSI_PF) { |
---|
2367 | | - SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); |
---|
| 3027 | + SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf)); |
---|
2368 | 3028 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
---|
2369 | | - |
---|
2370 | 3029 | ether_addr_copy(netdev->dev_addr, mac_addr); |
---|
2371 | 3030 | ether_addr_copy(netdev->perm_addr, mac_addr); |
---|
2372 | 3031 | } |
---|
2373 | 3032 | |
---|
2374 | 3033 | netdev->priv_flags |= IFF_UNICAST_FLT; |
---|
2375 | 3034 | |
---|
2376 | | - /* assign netdev_ops */ |
---|
2377 | | - netdev->netdev_ops = &ice_netdev_ops; |
---|
| 3035 | + /* Setup netdev TC information */ |
---|
| 3036 | + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); |
---|
2378 | 3037 | |
---|
2379 | 3038 | /* setup watchdog timeout value to be 5 second */ |
---|
2380 | 3039 | netdev->watchdog_timeo = 5 * HZ; |
---|
2381 | 3040 | |
---|
2382 | | - ice_set_ethtool_ops(netdev); |
---|
2383 | | - |
---|
2384 | 3041 | netdev->min_mtu = ETH_MIN_MTU; |
---|
2385 | 3042 | netdev->max_mtu = ICE_MAX_MTU; |
---|
2386 | 3043 | |
---|
2387 | | - return 0; |
---|
2388 | | -} |
---|
| 3044 | + err = register_netdev(vsi->netdev); |
---|
| 3045 | + if (err) |
---|
| 3046 | + goto err_free_netdev; |
---|
2389 | 3047 | |
---|
2390 | | -/** |
---|
2391 | | - * ice_vsi_free_arrays - clean up vsi resources |
---|
2392 | | - * @vsi: pointer to VSI being cleared |
---|
2393 | | - * @free_qvectors: bool to specify if q_vectors should be deallocated |
---|
2394 | | - */ |
---|
2395 | | -static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) |
---|
2396 | | -{ |
---|
2397 | | - struct ice_pf *pf = vsi->back; |
---|
| 3048 | + devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); |
---|
2398 | 3049 | |
---|
2399 | | - /* free the ring and vector containers */ |
---|
2400 | | - if (free_qvectors && vsi->q_vectors) { |
---|
2401 | | - devm_kfree(&pf->pdev->dev, vsi->q_vectors); |
---|
2402 | | - vsi->q_vectors = NULL; |
---|
2403 | | - } |
---|
2404 | | - if (vsi->tx_rings) { |
---|
2405 | | - devm_kfree(&pf->pdev->dev, vsi->tx_rings); |
---|
2406 | | - vsi->tx_rings = NULL; |
---|
2407 | | - } |
---|
2408 | | - if (vsi->rx_rings) { |
---|
2409 | | - devm_kfree(&pf->pdev->dev, vsi->rx_rings); |
---|
2410 | | - vsi->rx_rings = NULL; |
---|
2411 | | - } |
---|
2412 | | -} |
---|
| 3050 | + netif_carrier_off(vsi->netdev); |
---|
2413 | 3051 | |
---|
2414 | | -/** |
---|
2415 | | - * ice_vsi_clear - clean up and deallocate the provided vsi |
---|
2416 | | - * @vsi: pointer to VSI being cleared |
---|
2417 | | - * |
---|
2418 | | - * This deallocates the vsi's queue resources, removes it from the PF's |
---|
2419 | | - * VSI array if necessary, and deallocates the VSI |
---|
2420 | | - * |
---|
2421 | | - * Returns 0 on success, negative on failure |
---|
2422 | | - */ |
---|
2423 | | -static int ice_vsi_clear(struct ice_vsi *vsi) |
---|
2424 | | -{ |
---|
2425 | | - struct ice_pf *pf = NULL; |
---|
2426 | | - |
---|
2427 | | - if (!vsi) |
---|
2428 | | - return 0; |
---|
2429 | | - |
---|
2430 | | - if (!vsi->back) |
---|
2431 | | - return -EINVAL; |
---|
2432 | | - |
---|
2433 | | - pf = vsi->back; |
---|
2434 | | - |
---|
2435 | | - if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { |
---|
2436 | | - dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", |
---|
2437 | | - vsi->idx); |
---|
2438 | | - return -EINVAL; |
---|
2439 | | - } |
---|
2440 | | - |
---|
2441 | | - mutex_lock(&pf->sw_mutex); |
---|
2442 | | - /* updates the PF for this cleared vsi */ |
---|
2443 | | - |
---|
2444 | | - pf->vsi[vsi->idx] = NULL; |
---|
2445 | | - if (vsi->idx < pf->next_vsi) |
---|
2446 | | - pf->next_vsi = vsi->idx; |
---|
2447 | | - |
---|
2448 | | - ice_vsi_free_arrays(vsi, true); |
---|
2449 | | - mutex_unlock(&pf->sw_mutex); |
---|
2450 | | - devm_kfree(&pf->pdev->dev, vsi); |
---|
2451 | | - |
---|
2452 | | - return 0; |
---|
2453 | | -} |
---|
2454 | | - |
---|
2455 | | -/** |
---|
2456 | | - * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector |
---|
2457 | | - * @vsi: the VSI being configured |
---|
2458 | | - * @v_idx: index of the vector in the vsi struct |
---|
2459 | | - * |
---|
2460 | | - * We allocate one q_vector. If allocation fails we return -ENOMEM. |
---|
2461 | | - */ |
---|
2462 | | -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) |
---|
2463 | | -{ |
---|
2464 | | - struct ice_pf *pf = vsi->back; |
---|
2465 | | - struct ice_q_vector *q_vector; |
---|
2466 | | - |
---|
2467 | | - /* allocate q_vector */ |
---|
2468 | | - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); |
---|
2469 | | - if (!q_vector) |
---|
2470 | | - return -ENOMEM; |
---|
2471 | | - |
---|
2472 | | - q_vector->vsi = vsi; |
---|
2473 | | - q_vector->v_idx = v_idx; |
---|
2474 | | - /* only set affinity_mask if the CPU is online */ |
---|
2475 | | - if (cpu_online(v_idx)) |
---|
2476 | | - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); |
---|
2477 | | - |
---|
2478 | | - if (vsi->netdev) |
---|
2479 | | - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, |
---|
2480 | | - NAPI_POLL_WEIGHT); |
---|
2481 | | - /* tie q_vector and vsi together */ |
---|
2482 | | - vsi->q_vectors[v_idx] = q_vector; |
---|
2483 | | - |
---|
2484 | | - return 0; |
---|
2485 | | -} |
---|
2486 | | - |
---|
2487 | | -/** |
---|
2488 | | - * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors |
---|
2489 | | - * @vsi: the VSI being configured |
---|
2490 | | - * |
---|
2491 | | - * We allocate one q_vector per queue interrupt. If allocation fails we |
---|
2492 | | - * return -ENOMEM. |
---|
2493 | | - */ |
---|
2494 | | -static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) |
---|
2495 | | -{ |
---|
2496 | | - struct ice_pf *pf = vsi->back; |
---|
2497 | | - int v_idx = 0, num_q_vectors; |
---|
2498 | | - int err; |
---|
2499 | | - |
---|
2500 | | - if (vsi->q_vectors[0]) { |
---|
2501 | | - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", |
---|
2502 | | - vsi->vsi_num); |
---|
2503 | | - return -EEXIST; |
---|
2504 | | - } |
---|
2505 | | - |
---|
2506 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
---|
2507 | | - num_q_vectors = vsi->num_q_vectors; |
---|
2508 | | - } else { |
---|
2509 | | - err = -EINVAL; |
---|
2510 | | - goto err_out; |
---|
2511 | | - } |
---|
2512 | | - |
---|
2513 | | - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
---|
2514 | | - err = ice_vsi_alloc_q_vector(vsi, v_idx); |
---|
2515 | | - if (err) |
---|
2516 | | - goto err_out; |
---|
2517 | | - } |
---|
| 3052 | + /* make sure transmit queues start off as stopped */ |
---|
| 3053 | + netif_tx_stop_all_queues(vsi->netdev); |
---|
2518 | 3054 | |
---|
2519 | 3055 | return 0; |
---|
2520 | 3056 | |
---|
2521 | | -err_out: |
---|
2522 | | - while (v_idx--) |
---|
2523 | | - ice_free_q_vector(vsi, v_idx); |
---|
2524 | | - |
---|
2525 | | - dev_err(&pf->pdev->dev, |
---|
2526 | | - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", |
---|
2527 | | - vsi->num_q_vectors, vsi->vsi_num, err); |
---|
2528 | | - vsi->num_q_vectors = 0; |
---|
| 3057 | +err_free_netdev: |
---|
| 3058 | + free_netdev(vsi->netdev); |
---|
| 3059 | + vsi->netdev = NULL; |
---|
| 3060 | +err_destroy_devlink_port: |
---|
| 3061 | + ice_devlink_destroy_port(vsi); |
---|
2529 | 3062 | return err; |
---|
2530 | | -} |
---|
2531 | | - |
---|
2532 | | -/** |
---|
2533 | | - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI |
---|
2534 | | - * @vsi: ptr to the VSI |
---|
2535 | | - * |
---|
2536 | | - * This should only be called after ice_vsi_alloc() which allocates the |
---|
2537 | | - * corresponding SW VSI structure and initializes num_queue_pairs for the |
---|
2538 | | - * newly allocated VSI. |
---|
2539 | | - * |
---|
2540 | | - * Returns 0 on success or negative on failure |
---|
2541 | | - */ |
---|
2542 | | -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) |
---|
2543 | | -{ |
---|
2544 | | - struct ice_pf *pf = vsi->back; |
---|
2545 | | - int num_q_vectors = 0; |
---|
2546 | | - |
---|
2547 | | - if (vsi->base_vector) { |
---|
2548 | | - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", |
---|
2549 | | - vsi->vsi_num, vsi->base_vector); |
---|
2550 | | - return -EEXIST; |
---|
2551 | | - } |
---|
2552 | | - |
---|
2553 | | - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
---|
2554 | | - return -ENOENT; |
---|
2555 | | - |
---|
2556 | | - switch (vsi->type) { |
---|
2557 | | - case ICE_VSI_PF: |
---|
2558 | | - num_q_vectors = vsi->num_q_vectors; |
---|
2559 | | - break; |
---|
2560 | | - default: |
---|
2561 | | - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", |
---|
2562 | | - vsi->type); |
---|
2563 | | - break; |
---|
2564 | | - } |
---|
2565 | | - |
---|
2566 | | - if (num_q_vectors) |
---|
2567 | | - vsi->base_vector = ice_get_res(pf, pf->irq_tracker, |
---|
2568 | | - num_q_vectors, vsi->idx); |
---|
2569 | | - |
---|
2570 | | - if (vsi->base_vector < 0) { |
---|
2571 | | - dev_err(&pf->pdev->dev, |
---|
2572 | | - "Failed to get tracking for %d vectors for VSI %d, err=%d\n", |
---|
2573 | | - num_q_vectors, vsi->vsi_num, vsi->base_vector); |
---|
2574 | | - return -ENOENT; |
---|
2575 | | - } |
---|
2576 | | - |
---|
2577 | | - return 0; |
---|
2578 | 3063 | } |
---|
2579 | 3064 | |
---|
2580 | 3065 | /** |
---|
.. | .. |
---|
2592 | 3077 | } |
---|
2593 | 3078 | |
---|
2594 | 3079 | /** |
---|
2595 | | - * ice_vsi_cfg_rss - Configure RSS params for a VSI |
---|
2596 | | - * @vsi: VSI to be configured |
---|
2597 | | - */ |
---|
2598 | | -static int ice_vsi_cfg_rss(struct ice_vsi *vsi) |
---|
2599 | | -{ |
---|
2600 | | - u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; |
---|
2601 | | - struct ice_aqc_get_set_rss_keys *key; |
---|
2602 | | - struct ice_pf *pf = vsi->back; |
---|
2603 | | - enum ice_status status; |
---|
2604 | | - int err = 0; |
---|
2605 | | - u8 *lut; |
---|
2606 | | - |
---|
2607 | | - vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); |
---|
2608 | | - |
---|
2609 | | - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); |
---|
2610 | | - if (!lut) |
---|
2611 | | - return -ENOMEM; |
---|
2612 | | - |
---|
2613 | | - if (vsi->rss_lut_user) |
---|
2614 | | - memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); |
---|
2615 | | - else |
---|
2616 | | - ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); |
---|
2617 | | - |
---|
2618 | | - status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type, |
---|
2619 | | - lut, vsi->rss_table_size); |
---|
2620 | | - |
---|
2621 | | - if (status) { |
---|
2622 | | - dev_err(&vsi->back->pdev->dev, |
---|
2623 | | - "set_rss_lut failed, error %d\n", status); |
---|
2624 | | - err = -EIO; |
---|
2625 | | - goto ice_vsi_cfg_rss_exit; |
---|
2626 | | - } |
---|
2627 | | - |
---|
2628 | | - key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); |
---|
2629 | | - if (!key) { |
---|
2630 | | - err = -ENOMEM; |
---|
2631 | | - goto ice_vsi_cfg_rss_exit; |
---|
2632 | | - } |
---|
2633 | | - |
---|
2634 | | - if (vsi->rss_hkey_user) |
---|
2635 | | - memcpy(seed, vsi->rss_hkey_user, |
---|
2636 | | - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); |
---|
2637 | | - else |
---|
2638 | | - netdev_rss_key_fill((void *)seed, |
---|
2639 | | - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); |
---|
2640 | | - memcpy(&key->standard_rss_key, seed, |
---|
2641 | | - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); |
---|
2642 | | - |
---|
2643 | | - status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key); |
---|
2644 | | - |
---|
2645 | | - if (status) { |
---|
2646 | | - dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", |
---|
2647 | | - status); |
---|
2648 | | - err = -EIO; |
---|
2649 | | - } |
---|
2650 | | - |
---|
2651 | | - devm_kfree(&pf->pdev->dev, key); |
---|
2652 | | -ice_vsi_cfg_rss_exit: |
---|
2653 | | - devm_kfree(&pf->pdev->dev, lut); |
---|
2654 | | - return err; |
---|
2655 | | -} |
---|
2656 | | - |
---|
2657 | | -/** |
---|
2658 | | - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI |
---|
2659 | | - * @vsi: pointer to the ice_vsi |
---|
2660 | | - * |
---|
2661 | | - * This reallocates the VSIs queue resources |
---|
2662 | | - * |
---|
2663 | | - * Returns 0 on success and negative value on failure |
---|
2664 | | - */ |
---|
2665 | | -static int ice_vsi_reinit_setup(struct ice_vsi *vsi) |
---|
2666 | | -{ |
---|
2667 | | - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
---|
2668 | | - int ret, i; |
---|
2669 | | - |
---|
2670 | | - if (!vsi) |
---|
2671 | | - return -EINVAL; |
---|
2672 | | - |
---|
2673 | | - ice_vsi_free_q_vectors(vsi); |
---|
2674 | | - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); |
---|
2675 | | - vsi->base_vector = 0; |
---|
2676 | | - ice_vsi_clear_rings(vsi); |
---|
2677 | | - ice_vsi_free_arrays(vsi, false); |
---|
2678 | | - ice_vsi_set_num_qs(vsi); |
---|
2679 | | - |
---|
2680 | | - /* Initialize VSI struct elements and create VSI in FW */ |
---|
2681 | | - ret = ice_vsi_add(vsi); |
---|
2682 | | - if (ret < 0) |
---|
2683 | | - goto err_vsi; |
---|
2684 | | - |
---|
2685 | | - ret = ice_vsi_alloc_arrays(vsi, false); |
---|
2686 | | - if (ret < 0) |
---|
2687 | | - goto err_vsi; |
---|
2688 | | - |
---|
2689 | | - switch (vsi->type) { |
---|
2690 | | - case ICE_VSI_PF: |
---|
2691 | | - if (!vsi->netdev) { |
---|
2692 | | - ret = ice_cfg_netdev(vsi); |
---|
2693 | | - if (ret) |
---|
2694 | | - goto err_rings; |
---|
2695 | | - |
---|
2696 | | - ret = register_netdev(vsi->netdev); |
---|
2697 | | - if (ret) |
---|
2698 | | - goto err_rings; |
---|
2699 | | - |
---|
2700 | | - netif_carrier_off(vsi->netdev); |
---|
2701 | | - netif_tx_stop_all_queues(vsi->netdev); |
---|
2702 | | - } |
---|
2703 | | - |
---|
2704 | | - ret = ice_vsi_alloc_q_vectors(vsi); |
---|
2705 | | - if (ret) |
---|
2706 | | - goto err_rings; |
---|
2707 | | - |
---|
2708 | | - ret = ice_vsi_setup_vector_base(vsi); |
---|
2709 | | - if (ret) |
---|
2710 | | - goto err_vectors; |
---|
2711 | | - |
---|
2712 | | - ret = ice_vsi_alloc_rings(vsi); |
---|
2713 | | - if (ret) |
---|
2714 | | - goto err_vectors; |
---|
2715 | | - |
---|
2716 | | - ice_vsi_map_rings_to_vectors(vsi); |
---|
2717 | | - break; |
---|
2718 | | - default: |
---|
2719 | | - break; |
---|
2720 | | - } |
---|
2721 | | - |
---|
2722 | | - ice_vsi_set_tc_cfg(vsi); |
---|
2723 | | - |
---|
2724 | | - /* configure VSI nodes based on number of queues and TC's */ |
---|
2725 | | - for (i = 0; i < vsi->tc_cfg.numtc; i++) |
---|
2726 | | - max_txqs[i] = vsi->num_txq; |
---|
2727 | | - |
---|
2728 | | - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, |
---|
2729 | | - vsi->tc_cfg.ena_tc, max_txqs); |
---|
2730 | | - if (ret) { |
---|
2731 | | - dev_info(&vsi->back->pdev->dev, |
---|
2732 | | - "Failed VSI lan queue config\n"); |
---|
2733 | | - goto err_vectors; |
---|
2734 | | - } |
---|
2735 | | - return 0; |
---|
2736 | | - |
---|
2737 | | -err_vectors: |
---|
2738 | | - ice_vsi_free_q_vectors(vsi); |
---|
2739 | | -err_rings: |
---|
2740 | | - if (vsi->netdev) { |
---|
2741 | | - vsi->current_netdev_flags = 0; |
---|
2742 | | - unregister_netdev(vsi->netdev); |
---|
2743 | | - free_netdev(vsi->netdev); |
---|
2744 | | - vsi->netdev = NULL; |
---|
2745 | | - } |
---|
2746 | | -err_vsi: |
---|
2747 | | - ice_vsi_clear(vsi); |
---|
2748 | | - set_bit(__ICE_RESET_FAILED, vsi->back->state); |
---|
2749 | | - return ret; |
---|
2750 | | -} |
---|
2751 | | - |
---|
2752 | | -/** |
---|
2753 | | - * ice_vsi_setup - Set up a VSI by a given type |
---|
| 3080 | + * ice_pf_vsi_setup - Set up a PF VSI |
---|
2754 | 3081 | * @pf: board private structure |
---|
2755 | | - * @type: VSI type |
---|
2756 | 3082 | * @pi: pointer to the port_info instance |
---|
2757 | 3083 | * |
---|
2758 | | - * This allocates the sw VSI structure and its queue resources. |
---|
2759 | | - * |
---|
2760 | | - * Returns pointer to the successfully allocated and configure VSI sw struct on |
---|
2761 | | - * success, otherwise returns NULL on failure. |
---|
| 3084 | + * Returns pointer to the successfully allocated VSI software struct |
---|
| 3085 | + * on success, otherwise returns NULL on failure. |
---|
2762 | 3086 | */ |
---|
2763 | 3087 | static struct ice_vsi * |
---|
2764 | | -ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, |
---|
2765 | | - struct ice_port_info *pi) |
---|
| 3088 | +ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
---|
2766 | 3089 | { |
---|
2767 | | - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
---|
2768 | | - struct device *dev = &pf->pdev->dev; |
---|
2769 | | - struct ice_vsi_ctx ctxt = { 0 }; |
---|
2770 | | - struct ice_vsi *vsi; |
---|
2771 | | - int ret, i; |
---|
2772 | | - |
---|
2773 | | - vsi = ice_vsi_alloc(pf, type); |
---|
2774 | | - if (!vsi) { |
---|
2775 | | - dev_err(dev, "could not allocate VSI\n"); |
---|
2776 | | - return NULL; |
---|
2777 | | - } |
---|
2778 | | - |
---|
2779 | | - vsi->port_info = pi; |
---|
2780 | | - vsi->vsw = pf->first_sw; |
---|
2781 | | - |
---|
2782 | | - if (ice_vsi_get_qs(vsi)) { |
---|
2783 | | - dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", |
---|
2784 | | - vsi->idx); |
---|
2785 | | - goto err_get_qs; |
---|
2786 | | - } |
---|
2787 | | - |
---|
2788 | | - /* set RSS capabilities */ |
---|
2789 | | - ice_vsi_set_rss_params(vsi); |
---|
2790 | | - |
---|
2791 | | - /* create the VSI */ |
---|
2792 | | - ret = ice_vsi_add(vsi); |
---|
2793 | | - if (ret) |
---|
2794 | | - goto err_vsi; |
---|
2795 | | - |
---|
2796 | | - ctxt.vsi_num = vsi->vsi_num; |
---|
2797 | | - |
---|
2798 | | - switch (vsi->type) { |
---|
2799 | | - case ICE_VSI_PF: |
---|
2800 | | - ret = ice_cfg_netdev(vsi); |
---|
2801 | | - if (ret) |
---|
2802 | | - goto err_cfg_netdev; |
---|
2803 | | - |
---|
2804 | | - ret = register_netdev(vsi->netdev); |
---|
2805 | | - if (ret) |
---|
2806 | | - goto err_register_netdev; |
---|
2807 | | - |
---|
2808 | | - netif_carrier_off(vsi->netdev); |
---|
2809 | | - |
---|
2810 | | - /* make sure transmit queues start off as stopped */ |
---|
2811 | | - netif_tx_stop_all_queues(vsi->netdev); |
---|
2812 | | - ret = ice_vsi_alloc_q_vectors(vsi); |
---|
2813 | | - if (ret) |
---|
2814 | | - goto err_msix; |
---|
2815 | | - |
---|
2816 | | - ret = ice_vsi_setup_vector_base(vsi); |
---|
2817 | | - if (ret) |
---|
2818 | | - goto err_rings; |
---|
2819 | | - |
---|
2820 | | - ret = ice_vsi_alloc_rings(vsi); |
---|
2821 | | - if (ret) |
---|
2822 | | - goto err_rings; |
---|
2823 | | - |
---|
2824 | | - ice_vsi_map_rings_to_vectors(vsi); |
---|
2825 | | - |
---|
2826 | | - /* Do not exit if configuring RSS had an issue, at least |
---|
2827 | | - * receive traffic on first queue. Hence no need to capture |
---|
2828 | | - * return value |
---|
2829 | | - */ |
---|
2830 | | - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
---|
2831 | | - ice_vsi_cfg_rss(vsi); |
---|
2832 | | - break; |
---|
2833 | | - default: |
---|
2834 | | - /* if vsi type is not recognized, clean up the resources and |
---|
2835 | | - * exit |
---|
2836 | | - */ |
---|
2837 | | - goto err_rings; |
---|
2838 | | - } |
---|
2839 | | - |
---|
2840 | | - ice_vsi_set_tc_cfg(vsi); |
---|
2841 | | - |
---|
2842 | | - /* configure VSI nodes based on number of queues and TC's */ |
---|
2843 | | - for (i = 0; i < vsi->tc_cfg.numtc; i++) |
---|
2844 | | - max_txqs[i] = vsi->num_txq; |
---|
2845 | | - |
---|
2846 | | - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, |
---|
2847 | | - vsi->tc_cfg.ena_tc, max_txqs); |
---|
2848 | | - if (ret) { |
---|
2849 | | - dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); |
---|
2850 | | - goto err_rings; |
---|
2851 | | - } |
---|
2852 | | - |
---|
2853 | | - return vsi; |
---|
2854 | | - |
---|
2855 | | -err_rings: |
---|
2856 | | - ice_vsi_free_q_vectors(vsi); |
---|
2857 | | -err_msix: |
---|
2858 | | - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) |
---|
2859 | | - unregister_netdev(vsi->netdev); |
---|
2860 | | -err_register_netdev: |
---|
2861 | | - if (vsi->netdev) { |
---|
2862 | | - free_netdev(vsi->netdev); |
---|
2863 | | - vsi->netdev = NULL; |
---|
2864 | | - } |
---|
2865 | | -err_cfg_netdev: |
---|
2866 | | - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); |
---|
2867 | | - if (ret) |
---|
2868 | | - dev_err(&vsi->back->pdev->dev, |
---|
2869 | | - "Free VSI AQ call failed, err %d\n", ret); |
---|
2870 | | -err_vsi: |
---|
2871 | | - ice_vsi_put_qs(vsi); |
---|
2872 | | -err_get_qs: |
---|
2873 | | - pf->q_left_tx += vsi->alloc_txq; |
---|
2874 | | - pf->q_left_rx += vsi->alloc_rxq; |
---|
2875 | | - ice_vsi_clear(vsi); |
---|
2876 | | - |
---|
2877 | | - return NULL; |
---|
| 3090 | + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); |
---|
2878 | 3091 | } |
---|
2879 | 3092 | |
---|
2880 | 3093 | /** |
---|
2881 | | - * ice_vsi_add_vlan - Add vsi membership for given vlan |
---|
2882 | | - * @vsi: the vsi being configured |
---|
2883 | | - * @vid: vlan id to be added |
---|
| 3094 | + * ice_ctrl_vsi_setup - Set up a control VSI |
---|
| 3095 | + * @pf: board private structure |
---|
| 3096 | + * @pi: pointer to the port_info instance |
---|
| 3097 | + * |
---|
| 3098 | + * Returns pointer to the successfully allocated VSI software struct |
---|
| 3099 | + * on success, otherwise returns NULL on failure. |
---|
2884 | 3100 | */ |
---|
2885 | | -static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) |
---|
| 3101 | +static struct ice_vsi * |
---|
| 3102 | +ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
---|
2886 | 3103 | { |
---|
2887 | | - struct ice_fltr_list_entry *tmp; |
---|
2888 | | - struct ice_pf *pf = vsi->back; |
---|
2889 | | - LIST_HEAD(tmp_add_list); |
---|
2890 | | - enum ice_status status; |
---|
2891 | | - int err = 0; |
---|
2892 | | - |
---|
2893 | | - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); |
---|
2894 | | - if (!tmp) |
---|
2895 | | - return -ENOMEM; |
---|
2896 | | - |
---|
2897 | | - tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; |
---|
2898 | | - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; |
---|
2899 | | - tmp->fltr_info.flag = ICE_FLTR_TX; |
---|
2900 | | - tmp->fltr_info.src = vsi->vsi_num; |
---|
2901 | | - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; |
---|
2902 | | - tmp->fltr_info.l_data.vlan.vlan_id = vid; |
---|
2903 | | - |
---|
2904 | | - INIT_LIST_HEAD(&tmp->list_entry); |
---|
2905 | | - list_add(&tmp->list_entry, &tmp_add_list); |
---|
2906 | | - |
---|
2907 | | - status = ice_add_vlan(&pf->hw, &tmp_add_list); |
---|
2908 | | - if (status) { |
---|
2909 | | - err = -ENODEV; |
---|
2910 | | - dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", |
---|
2911 | | - vid, vsi->vsi_num); |
---|
2912 | | - } |
---|
2913 | | - |
---|
2914 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
---|
2915 | | - return err; |
---|
| 3104 | + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); |
---|
2916 | 3105 | } |
---|
2917 | 3106 | |
---|
2918 | 3107 | /** |
---|
2919 | | - * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload |
---|
| 3108 | + * ice_lb_vsi_setup - Set up a loopback VSI |
---|
| 3109 | + * @pf: board private structure |
---|
| 3110 | + * @pi: pointer to the port_info instance |
---|
| 3111 | + * |
---|
| 3112 | + * Returns pointer to the successfully allocated VSI software struct |
---|
| 3113 | + * on success, otherwise returns NULL on failure. |
---|
| 3114 | + */ |
---|
| 3115 | +struct ice_vsi * |
---|
| 3116 | +ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
---|
| 3117 | +{ |
---|
| 3118 | + return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); |
---|
| 3119 | +} |
---|
| 3120 | + |
---|
| 3121 | +/** |
---|
| 3122 | + * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload |
---|
2920 | 3123 | * @netdev: network interface to be adjusted |
---|
2921 | 3124 | * @proto: unused protocol |
---|
2922 | | - * @vid: vlan id to be added |
---|
| 3125 | + * @vid: VLAN ID to be added |
---|
2923 | 3126 | * |
---|
2924 | | - * net_device_ops implementation for adding vlan ids |
---|
| 3127 | + * net_device_ops implementation for adding VLAN IDs |
---|
2925 | 3128 | */ |
---|
2926 | | -static int ice_vlan_rx_add_vid(struct net_device *netdev, |
---|
2927 | | - __always_unused __be16 proto, u16 vid) |
---|
| 3129 | +static int |
---|
| 3130 | +ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, |
---|
| 3131 | + u16 vid) |
---|
2928 | 3132 | { |
---|
2929 | 3133 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
2930 | 3134 | struct ice_vsi *vsi = np->vsi; |
---|
2931 | | - int ret = 0; |
---|
| 3135 | + int ret; |
---|
2932 | 3136 | |
---|
2933 | 3137 | if (vid >= VLAN_N_VID) { |
---|
2934 | 3138 | netdev_err(netdev, "VLAN id requested %d is out of range %d\n", |
---|
.. | .. |
---|
2939 | 3143 | if (vsi->info.pvid) |
---|
2940 | 3144 | return -EINVAL; |
---|
2941 | 3145 | |
---|
2942 | | - /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is |
---|
2943 | | - * needed to continue allowing all untagged packets since VLAN prune |
---|
2944 | | - * list is applied to all packets by the switch |
---|
2945 | | - */ |
---|
2946 | | - ret = ice_vsi_add_vlan(vsi, vid); |
---|
| 3146 | + /* VLAN 0 is added by default during load/reset */ |
---|
| 3147 | + if (!vid) |
---|
| 3148 | + return 0; |
---|
2947 | 3149 | |
---|
2948 | | - if (!ret) |
---|
2949 | | - set_bit(vid, vsi->active_vlans); |
---|
| 3150 | + /* Enable VLAN pruning when a VLAN other than 0 is added */ |
---|
| 3151 | + if (!ice_vsi_is_vlan_pruning_ena(vsi)) { |
---|
| 3152 | + ret = ice_cfg_vlan_pruning(vsi, true, false); |
---|
| 3153 | + if (ret) |
---|
| 3154 | + return ret; |
---|
| 3155 | + } |
---|
| 3156 | + |
---|
| 3157 | + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged |
---|
| 3158 | + * packets aren't pruned by the device's internal switch on Rx |
---|
| 3159 | + */ |
---|
| 3160 | + ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); |
---|
| 3161 | + if (!ret) { |
---|
| 3162 | + vsi->vlan_ena = true; |
---|
| 3163 | + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); |
---|
| 3164 | + } |
---|
2950 | 3165 | |
---|
2951 | 3166 | return ret; |
---|
2952 | 3167 | } |
---|
2953 | 3168 | |
---|
2954 | 3169 | /** |
---|
2955 | | - * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN |
---|
2956 | | - * @vsi: the VSI being configured |
---|
2957 | | - * @vid: VLAN id to be removed |
---|
2958 | | - */ |
---|
2959 | | -static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) |
---|
2960 | | -{ |
---|
2961 | | - struct ice_fltr_list_entry *list; |
---|
2962 | | - struct ice_pf *pf = vsi->back; |
---|
2963 | | - LIST_HEAD(tmp_add_list); |
---|
2964 | | - |
---|
2965 | | - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); |
---|
2966 | | - if (!list) |
---|
2967 | | - return; |
---|
2968 | | - |
---|
2969 | | - list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; |
---|
2970 | | - list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; |
---|
2971 | | - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; |
---|
2972 | | - list->fltr_info.l_data.vlan.vlan_id = vid; |
---|
2973 | | - list->fltr_info.flag = ICE_FLTR_TX; |
---|
2974 | | - list->fltr_info.src = vsi->vsi_num; |
---|
2975 | | - |
---|
2976 | | - INIT_LIST_HEAD(&list->list_entry); |
---|
2977 | | - list_add(&list->list_entry, &tmp_add_list); |
---|
2978 | | - |
---|
2979 | | - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) |
---|
2980 | | - dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", |
---|
2981 | | - vid, vsi->vsi_num); |
---|
2982 | | - |
---|
2983 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
---|
2984 | | -} |
---|
2985 | | - |
---|
2986 | | -/** |
---|
2987 | | - * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload |
---|
| 3170 | + * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload |
---|
2988 | 3171 | * @netdev: network interface to be adjusted |
---|
2989 | 3172 | * @proto: unused protocol |
---|
2990 | | - * @vid: vlan id to be removed |
---|
| 3173 | + * @vid: VLAN ID to be removed |
---|
2991 | 3174 | * |
---|
2992 | | - * net_device_ops implementation for removing vlan ids |
---|
| 3175 | + * net_device_ops implementation for removing VLAN IDs |
---|
2993 | 3176 | */ |
---|
2994 | | -static int ice_vlan_rx_kill_vid(struct net_device *netdev, |
---|
2995 | | - __always_unused __be16 proto, u16 vid) |
---|
| 3177 | +static int |
---|
| 3178 | +ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, |
---|
| 3179 | + u16 vid) |
---|
2996 | 3180 | { |
---|
2997 | 3181 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
2998 | 3182 | struct ice_vsi *vsi = np->vsi; |
---|
| 3183 | + int ret; |
---|
2999 | 3184 | |
---|
3000 | 3185 | if (vsi->info.pvid) |
---|
3001 | 3186 | return -EINVAL; |
---|
3002 | 3187 | |
---|
3003 | | - /* return code is ignored as there is nothing a user |
---|
3004 | | - * can do about failure to remove and a log message was |
---|
3005 | | - * already printed from the other function |
---|
| 3188 | + /* don't allow removal of VLAN 0 */ |
---|
| 3189 | + if (!vid) |
---|
| 3190 | + return 0; |
---|
| 3191 | + |
---|
| 3192 | + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN |
---|
| 3193 | + * information |
---|
3006 | 3194 | */ |
---|
3007 | | - ice_vsi_kill_vlan(vsi, vid); |
---|
| 3195 | + ret = ice_vsi_kill_vlan(vsi, vid); |
---|
| 3196 | + if (ret) |
---|
| 3197 | + return ret; |
---|
3008 | 3198 | |
---|
3009 | | - clear_bit(vid, vsi->active_vlans); |
---|
| 3199 | + /* Disable pruning when VLAN 0 is the only VLAN rule */ |
---|
| 3200 | + if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) |
---|
| 3201 | + ret = ice_cfg_vlan_pruning(vsi, false, false); |
---|
3010 | 3202 | |
---|
3011 | | - return 0; |
---|
| 3203 | + vsi->vlan_ena = false; |
---|
| 3204 | + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); |
---|
| 3205 | + return ret; |
---|
3012 | 3206 | } |
---|
3013 | 3207 | |
---|
3014 | 3208 | /** |
---|
.. | .. |
---|
3019 | 3213 | */ |
---|
3020 | 3214 | static int ice_setup_pf_sw(struct ice_pf *pf) |
---|
3021 | 3215 | { |
---|
3022 | | - LIST_HEAD(tmp_add_list); |
---|
3023 | | - u8 broadcast[ETH_ALEN]; |
---|
3024 | 3216 | struct ice_vsi *vsi; |
---|
3025 | 3217 | int status = 0; |
---|
3026 | 3218 | |
---|
3027 | | - if (!ice_is_reset_recovery_pending(pf->state)) { |
---|
3028 | | - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); |
---|
3029 | | - if (!vsi) { |
---|
3030 | | - status = -ENOMEM; |
---|
3031 | | - goto error_exit; |
---|
3032 | | - } |
---|
3033 | | - } else { |
---|
3034 | | - vsi = pf->vsi[0]; |
---|
3035 | | - status = ice_vsi_reinit_setup(vsi); |
---|
3036 | | - if (status < 0) |
---|
3037 | | - return -EIO; |
---|
3038 | | - } |
---|
| 3219 | + if (ice_is_reset_in_progress(pf->state)) |
---|
| 3220 | + return -EBUSY; |
---|
3039 | 3221 | |
---|
3040 | | - /* tmp_add_list contains a list of MAC addresses for which MAC |
---|
3041 | | - * filters need to be programmed. Add the VSI's unicast MAC to |
---|
3042 | | - * this list |
---|
3043 | | - */ |
---|
3044 | | - status = ice_add_mac_to_list(vsi, &tmp_add_list, |
---|
3045 | | - vsi->port_info->mac.perm_addr); |
---|
3046 | | - if (status) |
---|
3047 | | - goto error_exit; |
---|
| 3222 | + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); |
---|
| 3223 | + if (!vsi) |
---|
| 3224 | + return -ENOMEM; |
---|
3048 | 3225 | |
---|
3049 | | - /* VSI needs to receive broadcast traffic, so add the broadcast |
---|
3050 | | - * MAC address to the list. |
---|
3051 | | - */ |
---|
3052 | | - eth_broadcast_addr(broadcast); |
---|
3053 | | - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); |
---|
3054 | | - if (status) |
---|
3055 | | - goto error_exit; |
---|
3056 | | - |
---|
3057 | | - /* program MAC filters for entries in tmp_add_list */ |
---|
3058 | | - status = ice_add_mac(&pf->hw, &tmp_add_list); |
---|
| 3226 | + status = ice_cfg_netdev(vsi); |
---|
3059 | 3227 | if (status) { |
---|
3060 | | - dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); |
---|
3061 | | - status = -ENOMEM; |
---|
3062 | | - goto error_exit; |
---|
| 3228 | + status = -ENODEV; |
---|
| 3229 | + goto unroll_vsi_setup; |
---|
3063 | 3230 | } |
---|
| 3231 | + /* netdev has to be configured before setting frame size */ |
---|
| 3232 | + ice_vsi_cfg_frame_size(vsi); |
---|
3064 | 3233 | |
---|
3065 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
---|
| 3234 | + /* Setup DCB netlink interface */ |
---|
| 3235 | + ice_dcbnl_setup(vsi); |
---|
| 3236 | + |
---|
| 3237 | + /* registering the NAPI handler requires both the queues and |
---|
| 3238 | + * netdev to be created, which are done in ice_pf_vsi_setup() |
---|
| 3239 | + * and ice_cfg_netdev() respectively |
---|
| 3240 | + */ |
---|
| 3241 | + ice_napi_add(vsi); |
---|
| 3242 | + |
---|
| 3243 | + status = ice_set_cpu_rx_rmap(vsi); |
---|
| 3244 | + if (status) { |
---|
| 3245 | + dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", |
---|
| 3246 | + vsi->vsi_num, status); |
---|
| 3247 | + status = -EINVAL; |
---|
| 3248 | + goto unroll_napi_add; |
---|
| 3249 | + } |
---|
| 3250 | + status = ice_init_mac_fltr(pf); |
---|
| 3251 | + if (status) |
---|
| 3252 | + goto free_cpu_rx_map; |
---|
| 3253 | + |
---|
3066 | 3254 | return status; |
---|
3067 | 3255 | |
---|
3068 | | -error_exit: |
---|
3069 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
---|
| 3256 | +free_cpu_rx_map: |
---|
| 3257 | + ice_free_cpu_rx_rmap(vsi); |
---|
3070 | 3258 | |
---|
| 3259 | +unroll_napi_add: |
---|
3071 | 3260 | if (vsi) { |
---|
3072 | | - ice_vsi_free_q_vectors(vsi); |
---|
3073 | | - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) |
---|
3074 | | - unregister_netdev(vsi->netdev); |
---|
| 3261 | + ice_napi_del(vsi); |
---|
3075 | 3262 | if (vsi->netdev) { |
---|
| 3263 | + if (vsi->netdev->reg_state == NETREG_REGISTERED) |
---|
| 3264 | + unregister_netdev(vsi->netdev); |
---|
3076 | 3265 | free_netdev(vsi->netdev); |
---|
3077 | 3266 | vsi->netdev = NULL; |
---|
3078 | 3267 | } |
---|
3079 | | - |
---|
3080 | | - ice_vsi_delete(vsi); |
---|
3081 | | - ice_vsi_put_qs(vsi); |
---|
3082 | | - pf->q_left_tx += vsi->alloc_txq; |
---|
3083 | | - pf->q_left_rx += vsi->alloc_rxq; |
---|
3084 | | - ice_vsi_clear(vsi); |
---|
3085 | 3268 | } |
---|
| 3269 | + |
---|
| 3270 | +unroll_vsi_setup: |
---|
| 3271 | + ice_vsi_release(vsi); |
---|
3086 | 3272 | return status; |
---|
3087 | 3273 | } |
---|
3088 | 3274 | |
---|
3089 | 3275 | /** |
---|
3090 | | - * ice_determine_q_usage - Calculate queue distribution |
---|
3091 | | - * @pf: board private structure |
---|
3092 | | - * |
---|
3093 | | - * Return -ENOMEM if we don't get enough queues for all ports |
---|
| 3276 | + * ice_get_avail_q_count - Get count of queues in use |
---|
| 3277 | + * @pf_qmap: bitmap to get queue use count from |
---|
| 3278 | + * @lock: pointer to a mutex that protects access to pf_qmap |
---|
| 3279 | + * @size: size of the bitmap |
---|
3094 | 3280 | */ |
---|
3095 | | -static void ice_determine_q_usage(struct ice_pf *pf) |
---|
| 3281 | +static u16 |
---|
| 3282 | +ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) |
---|
3096 | 3283 | { |
---|
3097 | | - u16 q_left_tx, q_left_rx; |
---|
| 3284 | + unsigned long bit; |
---|
| 3285 | + u16 count = 0; |
---|
3098 | 3286 | |
---|
3099 | | - q_left_tx = pf->hw.func_caps.common_cap.num_txq; |
---|
3100 | | - q_left_rx = pf->hw.func_caps.common_cap.num_rxq; |
---|
| 3287 | + mutex_lock(lock); |
---|
| 3288 | + for_each_clear_bit(bit, pf_qmap, size) |
---|
| 3289 | + count++; |
---|
| 3290 | + mutex_unlock(lock); |
---|
3101 | 3291 | |
---|
3102 | | - pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); |
---|
| 3292 | + return count; |
---|
| 3293 | +} |
---|
3103 | 3294 | |
---|
3104 | | - /* only 1 rx queue unless RSS is enabled */ |
---|
3105 | | - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
---|
3106 | | - pf->num_lan_rx = 1; |
---|
3107 | | - else |
---|
3108 | | - pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); |
---|
| 3295 | +/** |
---|
| 3296 | + * ice_get_avail_txq_count - Get count of Tx queues in use |
---|
| 3297 | + * @pf: pointer to an ice_pf instance |
---|
| 3298 | + */ |
---|
| 3299 | +u16 ice_get_avail_txq_count(struct ice_pf *pf) |
---|
| 3300 | +{ |
---|
| 3301 | + return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, |
---|
| 3302 | + pf->max_pf_txqs); |
---|
| 3303 | +} |
---|
3109 | 3304 | |
---|
3110 | | - pf->q_left_tx = q_left_tx - pf->num_lan_tx; |
---|
3111 | | - pf->q_left_rx = q_left_rx - pf->num_lan_rx; |
---|
| 3305 | +/** |
---|
| 3306 | + * ice_get_avail_rxq_count - Get count of Rx queues in use |
---|
| 3307 | + * @pf: pointer to an ice_pf instance |
---|
| 3308 | + */ |
---|
| 3309 | +u16 ice_get_avail_rxq_count(struct ice_pf *pf) |
---|
| 3310 | +{ |
---|
| 3311 | + return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, |
---|
| 3312 | + pf->max_pf_rxqs); |
---|
3112 | 3313 | } |
---|
3113 | 3314 | |
---|
3114 | 3315 | /** |
---|
.. | .. |
---|
3117 | 3318 | */ |
---|
3118 | 3319 | static void ice_deinit_pf(struct ice_pf *pf) |
---|
3119 | 3320 | { |
---|
3120 | | - if (pf->serv_tmr.function) |
---|
3121 | | - del_timer_sync(&pf->serv_tmr); |
---|
3122 | | - if (pf->serv_task.func) |
---|
3123 | | - cancel_work_sync(&pf->serv_task); |
---|
| 3321 | + ice_service_task_stop(pf); |
---|
3124 | 3322 | mutex_destroy(&pf->sw_mutex); |
---|
| 3323 | + mutex_destroy(&pf->tc_mutex); |
---|
3125 | 3324 | mutex_destroy(&pf->avail_q_mutex); |
---|
| 3325 | + |
---|
| 3326 | + if (pf->avail_txqs) { |
---|
| 3327 | + bitmap_free(pf->avail_txqs); |
---|
| 3328 | + pf->avail_txqs = NULL; |
---|
| 3329 | + } |
---|
| 3330 | + |
---|
| 3331 | + if (pf->avail_rxqs) { |
---|
| 3332 | + bitmap_free(pf->avail_rxqs); |
---|
| 3333 | + pf->avail_rxqs = NULL; |
---|
| 3334 | + } |
---|
| 3335 | +} |
---|
| 3336 | + |
---|
| 3337 | +/** |
---|
| 3338 | + * ice_set_pf_caps - set PFs capability flags |
---|
| 3339 | + * @pf: pointer to the PF instance |
---|
| 3340 | + */ |
---|
| 3341 | +static void ice_set_pf_caps(struct ice_pf *pf) |
---|
| 3342 | +{ |
---|
| 3343 | + struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; |
---|
| 3344 | + |
---|
| 3345 | + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
---|
| 3346 | + if (func_caps->common_cap.dcb) |
---|
| 3347 | + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
---|
| 3348 | + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
---|
| 3349 | + if (func_caps->common_cap.sr_iov_1_1) { |
---|
| 3350 | + set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
---|
| 3351 | + pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, |
---|
| 3352 | + ICE_MAX_VF_COUNT); |
---|
| 3353 | + } |
---|
| 3354 | + clear_bit(ICE_FLAG_RSS_ENA, pf->flags); |
---|
| 3355 | + if (func_caps->common_cap.rss_table_size) |
---|
| 3356 | + set_bit(ICE_FLAG_RSS_ENA, pf->flags); |
---|
| 3357 | + |
---|
| 3358 | + clear_bit(ICE_FLAG_FD_ENA, pf->flags); |
---|
| 3359 | + if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { |
---|
| 3360 | + u16 unused; |
---|
| 3361 | + |
---|
| 3362 | + /* ctrl_vsi_idx will be set to a valid value when flow director |
---|
| 3363 | + * is setup by ice_init_fdir |
---|
| 3364 | + */ |
---|
| 3365 | + pf->ctrl_vsi_idx = ICE_NO_VSI; |
---|
| 3366 | + set_bit(ICE_FLAG_FD_ENA, pf->flags); |
---|
| 3367 | + /* force guaranteed filter pool for PF */ |
---|
| 3368 | + ice_alloc_fd_guar_item(&pf->hw, &unused, |
---|
| 3369 | + func_caps->fd_fltr_guar); |
---|
| 3370 | + /* force shared filter pool for PF */ |
---|
| 3371 | + ice_alloc_fd_shrd_item(&pf->hw, &unused, |
---|
| 3372 | + func_caps->fd_fltr_best_effort); |
---|
| 3373 | + } |
---|
| 3374 | + |
---|
| 3375 | + pf->max_pf_txqs = func_caps->common_cap.num_txq; |
---|
| 3376 | + pf->max_pf_rxqs = func_caps->common_cap.num_rxq; |
---|
3126 | 3377 | } |
---|
3127 | 3378 | |
---|
3128 | 3379 | /** |
---|
3129 | 3380 | * ice_init_pf - Initialize general software structures (struct ice_pf) |
---|
3130 | 3381 | * @pf: board private structure to initialize |
---|
3131 | 3382 | */ |
---|
3132 | | -static void ice_init_pf(struct ice_pf *pf) |
---|
| 3383 | +static int ice_init_pf(struct ice_pf *pf) |
---|
3133 | 3384 | { |
---|
3134 | | - bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); |
---|
3135 | | - set_bit(ICE_FLAG_MSIX_ENA, pf->flags); |
---|
| 3385 | + ice_set_pf_caps(pf); |
---|
3136 | 3386 | |
---|
3137 | 3387 | mutex_init(&pf->sw_mutex); |
---|
3138 | | - mutex_init(&pf->avail_q_mutex); |
---|
| 3388 | + mutex_init(&pf->tc_mutex); |
---|
3139 | 3389 | |
---|
3140 | | - /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ |
---|
3141 | | - mutex_lock(&pf->avail_q_mutex); |
---|
3142 | | - bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); |
---|
3143 | | - bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); |
---|
3144 | | - mutex_unlock(&pf->avail_q_mutex); |
---|
3145 | | - |
---|
3146 | | - if (pf->hw.func_caps.common_cap.rss_table_size) |
---|
3147 | | - set_bit(ICE_FLAG_RSS_ENA, pf->flags); |
---|
| 3390 | + INIT_HLIST_HEAD(&pf->aq_wait_list); |
---|
| 3391 | + spin_lock_init(&pf->aq_wait_lock); |
---|
| 3392 | + init_waitqueue_head(&pf->aq_wait_queue); |
---|
3148 | 3393 | |
---|
3149 | 3394 | /* setup service timer and periodic service task */ |
---|
3150 | 3395 | timer_setup(&pf->serv_tmr, ice_service_timer, 0); |
---|
3151 | 3396 | pf->serv_tmr_period = HZ; |
---|
3152 | 3397 | INIT_WORK(&pf->serv_task, ice_service_task); |
---|
3153 | 3398 | clear_bit(__ICE_SERVICE_SCHED, pf->state); |
---|
| 3399 | + |
---|
| 3400 | + mutex_init(&pf->avail_q_mutex); |
---|
| 3401 | + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); |
---|
| 3402 | + if (!pf->avail_txqs) |
---|
| 3403 | + return -ENOMEM; |
---|
| 3404 | + |
---|
| 3405 | + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); |
---|
| 3406 | + if (!pf->avail_rxqs) { |
---|
| 3407 | + bitmap_free(pf->avail_txqs); |
---|
| 3408 | + pf->avail_txqs = NULL; |
---|
| 3409 | + return -ENOMEM; |
---|
| 3410 | + } |
---|
| 3411 | + |
---|
| 3412 | + return 0; |
---|
3154 | 3413 | } |
---|
3155 | 3414 | |
---|
3156 | 3415 | /** |
---|
.. | .. |
---|
3162 | 3421 | */ |
---|
3163 | 3422 | static int ice_ena_msix_range(struct ice_pf *pf) |
---|
3164 | 3423 | { |
---|
| 3424 | + struct device *dev = ice_pf_to_dev(pf); |
---|
3165 | 3425 | int v_left, v_actual, v_budget = 0; |
---|
3166 | 3426 | int needed, err, i; |
---|
3167 | 3427 | |
---|
.. | .. |
---|
3169 | 3429 | |
---|
3170 | 3430 | /* reserve one vector for miscellaneous handler */ |
---|
3171 | 3431 | needed = 1; |
---|
| 3432 | + if (v_left < needed) |
---|
| 3433 | + goto no_hw_vecs_left_err; |
---|
3172 | 3434 | v_budget += needed; |
---|
3173 | 3435 | v_left -= needed; |
---|
3174 | 3436 | |
---|
3175 | 3437 | /* reserve vectors for LAN traffic */ |
---|
3176 | | - pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); |
---|
3177 | | - v_budget += pf->num_lan_msix; |
---|
| 3438 | + needed = min_t(int, num_online_cpus(), v_left); |
---|
| 3439 | + if (v_left < needed) |
---|
| 3440 | + goto no_hw_vecs_left_err; |
---|
| 3441 | + pf->num_lan_msix = needed; |
---|
| 3442 | + v_budget += needed; |
---|
| 3443 | + v_left -= needed; |
---|
3178 | 3444 | |
---|
3179 | | - pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, |
---|
3180 | | - sizeof(struct msix_entry), GFP_KERNEL); |
---|
| 3445 | + /* reserve one vector for flow director */ |
---|
| 3446 | + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
---|
| 3447 | + needed = ICE_FDIR_MSIX; |
---|
| 3448 | + if (v_left < needed) |
---|
| 3449 | + goto no_hw_vecs_left_err; |
---|
| 3450 | + v_budget += needed; |
---|
| 3451 | + v_left -= needed; |
---|
| 3452 | + } |
---|
| 3453 | + |
---|
| 3454 | + pf->msix_entries = devm_kcalloc(dev, v_budget, |
---|
| 3455 | + sizeof(*pf->msix_entries), GFP_KERNEL); |
---|
3181 | 3456 | |
---|
3182 | 3457 | if (!pf->msix_entries) { |
---|
3183 | 3458 | err = -ENOMEM; |
---|
.. | .. |
---|
3192 | 3467 | ICE_MIN_MSIX, v_budget); |
---|
3193 | 3468 | |
---|
3194 | 3469 | if (v_actual < 0) { |
---|
3195 | | - dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); |
---|
| 3470 | + dev_err(dev, "unable to reserve MSI-X vectors\n"); |
---|
3196 | 3471 | err = v_actual; |
---|
3197 | 3472 | goto msix_err; |
---|
3198 | 3473 | } |
---|
3199 | 3474 | |
---|
3200 | 3475 | if (v_actual < v_budget) { |
---|
3201 | | - dev_warn(&pf->pdev->dev, |
---|
3202 | | - "not enough vectors. requested = %d, obtained = %d\n", |
---|
| 3476 | + dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", |
---|
3203 | 3477 | v_budget, v_actual); |
---|
3204 | | - if (v_actual >= (pf->num_lan_msix + 1)) { |
---|
3205 | | - pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1); |
---|
3206 | | - } else if (v_actual >= 2) { |
---|
3207 | | - pf->num_lan_msix = 1; |
---|
3208 | | - pf->num_avail_msix = v_actual - 2; |
---|
3209 | | - } else { |
---|
| 3478 | + |
---|
| 3479 | + if (v_actual < ICE_MIN_MSIX) { |
---|
| 3480 | + /* error if we can't get minimum vectors */ |
---|
3210 | 3481 | pci_disable_msix(pf->pdev); |
---|
3211 | 3482 | err = -ERANGE; |
---|
3212 | 3483 | goto msix_err; |
---|
| 3484 | + } else { |
---|
| 3485 | + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; |
---|
3213 | 3486 | } |
---|
3214 | 3487 | } |
---|
3215 | 3488 | |
---|
3216 | 3489 | return v_actual; |
---|
3217 | 3490 | |
---|
3218 | 3491 | msix_err: |
---|
3219 | | - devm_kfree(&pf->pdev->dev, pf->msix_entries); |
---|
| 3492 | + devm_kfree(dev, pf->msix_entries); |
---|
3220 | 3493 | goto exit_err; |
---|
3221 | 3494 | |
---|
| 3495 | +no_hw_vecs_left_err: |
---|
| 3496 | + dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", |
---|
| 3497 | + needed, v_left); |
---|
| 3498 | + err = -ERANGE; |
---|
3222 | 3499 | exit_err: |
---|
3223 | 3500 | pf->num_lan_msix = 0; |
---|
3224 | | - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); |
---|
3225 | 3501 | return err; |
---|
3226 | 3502 | } |
---|
3227 | 3503 | |
---|
.. | .. |
---|
3232 | 3508 | static void ice_dis_msix(struct ice_pf *pf) |
---|
3233 | 3509 | { |
---|
3234 | 3510 | pci_disable_msix(pf->pdev); |
---|
3235 | | - devm_kfree(&pf->pdev->dev, pf->msix_entries); |
---|
| 3511 | + devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); |
---|
3236 | 3512 | pf->msix_entries = NULL; |
---|
3237 | | - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); |
---|
3238 | | -} |
---|
3239 | | - |
---|
3240 | | -/** |
---|
3241 | | - * ice_init_interrupt_scheme - Determine proper interrupt scheme |
---|
3242 | | - * @pf: board private structure to initialize |
---|
3243 | | - */ |
---|
3244 | | -static int ice_init_interrupt_scheme(struct ice_pf *pf) |
---|
3245 | | -{ |
---|
3246 | | - int vectors = 0; |
---|
3247 | | - ssize_t size; |
---|
3248 | | - |
---|
3249 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
---|
3250 | | - vectors = ice_ena_msix_range(pf); |
---|
3251 | | - else |
---|
3252 | | - return -ENODEV; |
---|
3253 | | - |
---|
3254 | | - if (vectors < 0) |
---|
3255 | | - return vectors; |
---|
3256 | | - |
---|
3257 | | - /* set up vector assignment tracking */ |
---|
3258 | | - size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); |
---|
3259 | | - |
---|
3260 | | - pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); |
---|
3261 | | - if (!pf->irq_tracker) { |
---|
3262 | | - ice_dis_msix(pf); |
---|
3263 | | - return -ENOMEM; |
---|
3264 | | - } |
---|
3265 | | - |
---|
3266 | | - pf->irq_tracker->num_entries = vectors; |
---|
3267 | | - |
---|
3268 | | - return 0; |
---|
3269 | 3513 | } |
---|
3270 | 3514 | |
---|
3271 | 3515 | /** |
---|
.. | .. |
---|
3274 | 3518 | */ |
---|
3275 | 3519 | static void ice_clear_interrupt_scheme(struct ice_pf *pf) |
---|
3276 | 3520 | { |
---|
3277 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
---|
3278 | | - ice_dis_msix(pf); |
---|
| 3521 | + ice_dis_msix(pf); |
---|
3279 | 3522 | |
---|
3280 | 3523 | if (pf->irq_tracker) { |
---|
3281 | | - devm_kfree(&pf->pdev->dev, pf->irq_tracker); |
---|
| 3524 | + devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); |
---|
3282 | 3525 | pf->irq_tracker = NULL; |
---|
3283 | 3526 | } |
---|
| 3527 | +} |
---|
| 3528 | + |
---|
| 3529 | +/** |
---|
| 3530 | + * ice_init_interrupt_scheme - Determine proper interrupt scheme |
---|
| 3531 | + * @pf: board private structure to initialize |
---|
| 3532 | + */ |
---|
| 3533 | +static int ice_init_interrupt_scheme(struct ice_pf *pf) |
---|
| 3534 | +{ |
---|
| 3535 | + int vectors; |
---|
| 3536 | + |
---|
| 3537 | + vectors = ice_ena_msix_range(pf); |
---|
| 3538 | + |
---|
| 3539 | + if (vectors < 0) |
---|
| 3540 | + return vectors; |
---|
| 3541 | + |
---|
| 3542 | + /* set up vector assignment tracking */ |
---|
| 3543 | + pf->irq_tracker = |
---|
| 3544 | + devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) + |
---|
| 3545 | + (sizeof(u16) * vectors), GFP_KERNEL); |
---|
| 3546 | + if (!pf->irq_tracker) { |
---|
| 3547 | + ice_dis_msix(pf); |
---|
| 3548 | + return -ENOMEM; |
---|
| 3549 | + } |
---|
| 3550 | + |
---|
| 3551 | + /* populate SW interrupts pool with number of OS granted IRQs. */ |
---|
| 3552 | + pf->num_avail_sw_msix = (u16)vectors; |
---|
| 3553 | + pf->irq_tracker->num_entries = (u16)vectors; |
---|
| 3554 | + pf->irq_tracker->end = pf->irq_tracker->num_entries; |
---|
| 3555 | + |
---|
| 3556 | + return 0; |
---|
| 3557 | +} |
---|
| 3558 | + |
---|
| 3559 | +/** |
---|
| 3560 | + * ice_is_wol_supported - check if WoL is supported |
---|
| 3561 | + * @hw: pointer to hardware info |
---|
| 3562 | + * |
---|
| 3563 | + * Check if WoL is supported based on the HW configuration. |
---|
| 3564 | + * Returns true if NVM supports and enables WoL for this port, false otherwise |
---|
| 3565 | + */ |
---|
| 3566 | +bool ice_is_wol_supported(struct ice_hw *hw) |
---|
| 3567 | +{ |
---|
| 3568 | + u16 wol_ctrl; |
---|
| 3569 | + |
---|
| 3570 | + /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control |
---|
| 3571 | + * word) indicates WoL is not supported on the corresponding PF ID. |
---|
| 3572 | + */ |
---|
| 3573 | + if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) |
---|
| 3574 | + return false; |
---|
| 3575 | + |
---|
| 3576 | + return !(BIT(hw->port_info->lport) & wol_ctrl); |
---|
| 3577 | +} |
---|
| 3578 | + |
---|
| 3579 | +/** |
---|
| 3580 | + * ice_vsi_recfg_qs - Change the number of queues on a VSI |
---|
| 3581 | + * @vsi: VSI being changed |
---|
| 3582 | + * @new_rx: new number of Rx queues |
---|
| 3583 | + * @new_tx: new number of Tx queues |
---|
| 3584 | + * |
---|
| 3585 | + * Only change the number of queues if new_tx, or new_rx is non-0. |
---|
| 3586 | + * |
---|
| 3587 | + * Returns 0 on success. |
---|
| 3588 | + */ |
---|
| 3589 | +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) |
---|
| 3590 | +{ |
---|
| 3591 | + struct ice_pf *pf = vsi->back; |
---|
| 3592 | + int err = 0, timeout = 50; |
---|
| 3593 | + |
---|
| 3594 | + if (!new_rx && !new_tx) |
---|
| 3595 | + return -EINVAL; |
---|
| 3596 | + |
---|
| 3597 | + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { |
---|
| 3598 | + timeout--; |
---|
| 3599 | + if (!timeout) |
---|
| 3600 | + return -EBUSY; |
---|
| 3601 | + usleep_range(1000, 2000); |
---|
| 3602 | + } |
---|
| 3603 | + |
---|
| 3604 | + if (new_tx) |
---|
| 3605 | + vsi->req_txq = (u16)new_tx; |
---|
| 3606 | + if (new_rx) |
---|
| 3607 | + vsi->req_rxq = (u16)new_rx; |
---|
| 3608 | + |
---|
| 3609 | + /* set for the next time the netdev is started */ |
---|
| 3610 | + if (!netif_running(vsi->netdev)) { |
---|
| 3611 | + ice_vsi_rebuild(vsi, false); |
---|
| 3612 | + dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); |
---|
| 3613 | + goto done; |
---|
| 3614 | + } |
---|
| 3615 | + |
---|
| 3616 | + ice_vsi_close(vsi); |
---|
| 3617 | + ice_vsi_rebuild(vsi, false); |
---|
| 3618 | + ice_pf_dcb_recfg(pf); |
---|
| 3619 | + ice_vsi_open(vsi); |
---|
| 3620 | +done: |
---|
| 3621 | + clear_bit(__ICE_CFG_BUSY, pf->state); |
---|
| 3622 | + return err; |
---|
| 3623 | +} |
---|
| 3624 | + |
---|
| 3625 | +/** |
---|
| 3626 | + * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode |
---|
| 3627 | + * @pf: PF to configure |
---|
| 3628 | + * |
---|
| 3629 | + * No VLAN offloads/filtering are advertised in safe mode so make sure the PF |
---|
| 3630 | + * VSI can still Tx/Rx VLAN tagged packets. |
---|
| 3631 | + */ |
---|
| 3632 | +static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) |
---|
| 3633 | +{ |
---|
| 3634 | + struct ice_vsi *vsi = ice_get_main_vsi(pf); |
---|
| 3635 | + struct ice_vsi_ctx *ctxt; |
---|
| 3636 | + enum ice_status status; |
---|
| 3637 | + struct ice_hw *hw; |
---|
| 3638 | + |
---|
| 3639 | + if (!vsi) |
---|
| 3640 | + return; |
---|
| 3641 | + |
---|
| 3642 | + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
---|
| 3643 | + if (!ctxt) |
---|
| 3644 | + return; |
---|
| 3645 | + |
---|
| 3646 | + hw = &pf->hw; |
---|
| 3647 | + ctxt->info = vsi->info; |
---|
| 3648 | + |
---|
| 3649 | + ctxt->info.valid_sections = |
---|
| 3650 | + cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | |
---|
| 3651 | + ICE_AQ_VSI_PROP_SECURITY_VALID | |
---|
| 3652 | + ICE_AQ_VSI_PROP_SW_VALID); |
---|
| 3653 | + |
---|
| 3654 | + /* disable VLAN anti-spoof */ |
---|
| 3655 | + ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << |
---|
| 3656 | + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); |
---|
| 3657 | + |
---|
| 3658 | + /* disable VLAN pruning and keep all other settings */ |
---|
| 3659 | + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; |
---|
| 3660 | + |
---|
| 3661 | + /* allow all VLANs on Tx and don't strip on Rx */ |
---|
| 3662 | + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | |
---|
| 3663 | + ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
---|
| 3664 | + |
---|
| 3665 | + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
---|
| 3666 | + if (status) { |
---|
| 3667 | + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", |
---|
| 3668 | + ice_stat_str(status), |
---|
| 3669 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
| 3670 | + } else { |
---|
| 3671 | + vsi->info.sec_flags = ctxt->info.sec_flags; |
---|
| 3672 | + vsi->info.sw_flags2 = ctxt->info.sw_flags2; |
---|
| 3673 | + vsi->info.vlan_flags = ctxt->info.vlan_flags; |
---|
| 3674 | + } |
---|
| 3675 | + |
---|
| 3676 | + kfree(ctxt); |
---|
| 3677 | +} |
---|
| 3678 | + |
---|
| 3679 | +/** |
---|
| 3680 | + * ice_log_pkg_init - log result of DDP package load |
---|
| 3681 | + * @hw: pointer to hardware info |
---|
| 3682 | + * @status: status of package load |
---|
| 3683 | + */ |
---|
| 3684 | +static void |
---|
| 3685 | +ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) |
---|
| 3686 | +{ |
---|
| 3687 | + struct ice_pf *pf = (struct ice_pf *)hw->back; |
---|
| 3688 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 3689 | + |
---|
| 3690 | + switch (*status) { |
---|
| 3691 | + case ICE_SUCCESS: |
---|
| 3692 | + /* The package download AdminQ command returned success because |
---|
| 3693 | + * this download succeeded or ICE_ERR_AQ_NO_WORK since there is |
---|
| 3694 | + * already a package loaded on the device. |
---|
| 3695 | + */ |
---|
| 3696 | + if (hw->pkg_ver.major == hw->active_pkg_ver.major && |
---|
| 3697 | + hw->pkg_ver.minor == hw->active_pkg_ver.minor && |
---|
| 3698 | + hw->pkg_ver.update == hw->active_pkg_ver.update && |
---|
| 3699 | + hw->pkg_ver.draft == hw->active_pkg_ver.draft && |
---|
| 3700 | + !memcmp(hw->pkg_name, hw->active_pkg_name, |
---|
| 3701 | + sizeof(hw->pkg_name))) { |
---|
| 3702 | + if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) |
---|
| 3703 | + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", |
---|
| 3704 | + hw->active_pkg_name, |
---|
| 3705 | + hw->active_pkg_ver.major, |
---|
| 3706 | + hw->active_pkg_ver.minor, |
---|
| 3707 | + hw->active_pkg_ver.update, |
---|
| 3708 | + hw->active_pkg_ver.draft); |
---|
| 3709 | + else |
---|
| 3710 | + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", |
---|
| 3711 | + hw->active_pkg_name, |
---|
| 3712 | + hw->active_pkg_ver.major, |
---|
| 3713 | + hw->active_pkg_ver.minor, |
---|
| 3714 | + hw->active_pkg_ver.update, |
---|
| 3715 | + hw->active_pkg_ver.draft); |
---|
| 3716 | + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || |
---|
| 3717 | + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { |
---|
| 3718 | + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", |
---|
| 3719 | + hw->active_pkg_name, |
---|
| 3720 | + hw->active_pkg_ver.major, |
---|
| 3721 | + hw->active_pkg_ver.minor, |
---|
| 3722 | + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
---|
| 3723 | + *status = ICE_ERR_NOT_SUPPORTED; |
---|
| 3724 | + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && |
---|
| 3725 | + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { |
---|
| 3726 | + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", |
---|
| 3727 | + hw->active_pkg_name, |
---|
| 3728 | + hw->active_pkg_ver.major, |
---|
| 3729 | + hw->active_pkg_ver.minor, |
---|
| 3730 | + hw->active_pkg_ver.update, |
---|
| 3731 | + hw->active_pkg_ver.draft, |
---|
| 3732 | + hw->pkg_name, |
---|
| 3733 | + hw->pkg_ver.major, |
---|
| 3734 | + hw->pkg_ver.minor, |
---|
| 3735 | + hw->pkg_ver.update, |
---|
| 3736 | + hw->pkg_ver.draft); |
---|
| 3737 | + } else { |
---|
| 3738 | + dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); |
---|
| 3739 | + *status = ICE_ERR_NOT_SUPPORTED; |
---|
| 3740 | + } |
---|
| 3741 | + break; |
---|
| 3742 | + case ICE_ERR_FW_DDP_MISMATCH: |
---|
| 3743 | + dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); |
---|
| 3744 | + break; |
---|
| 3745 | + case ICE_ERR_BUF_TOO_SHORT: |
---|
| 3746 | + case ICE_ERR_CFG: |
---|
| 3747 | + dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); |
---|
| 3748 | + break; |
---|
| 3749 | + case ICE_ERR_NOT_SUPPORTED: |
---|
| 3750 | + /* Package File version not supported */ |
---|
| 3751 | + if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || |
---|
| 3752 | + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && |
---|
| 3753 | + hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) |
---|
| 3754 | + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); |
---|
| 3755 | + else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || |
---|
| 3756 | + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && |
---|
| 3757 | + hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) |
---|
| 3758 | + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", |
---|
| 3759 | + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
---|
| 3760 | + break; |
---|
| 3761 | + case ICE_ERR_AQ_ERROR: |
---|
| 3762 | + switch (hw->pkg_dwnld_status) { |
---|
| 3763 | + case ICE_AQ_RC_ENOSEC: |
---|
| 3764 | + case ICE_AQ_RC_EBADSIG: |
---|
| 3765 | + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); |
---|
| 3766 | + return; |
---|
| 3767 | + case ICE_AQ_RC_ESVN: |
---|
| 3768 | + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); |
---|
| 3769 | + return; |
---|
| 3770 | + case ICE_AQ_RC_EBADMAN: |
---|
| 3771 | + case ICE_AQ_RC_EBADBUF: |
---|
| 3772 | + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); |
---|
| 3773 | + /* poll for reset to complete */ |
---|
| 3774 | + if (ice_check_reset(hw)) |
---|
| 3775 | + dev_err(dev, "Error resetting device. Please reload the driver\n"); |
---|
| 3776 | + return; |
---|
| 3777 | + default: |
---|
| 3778 | + break; |
---|
| 3779 | + } |
---|
| 3780 | + fallthrough; |
---|
| 3781 | + default: |
---|
| 3782 | + dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", |
---|
| 3783 | + *status); |
---|
| 3784 | + break; |
---|
| 3785 | + } |
---|
| 3786 | +} |
---|
| 3787 | + |
---|
| 3788 | +/** |
---|
| 3789 | + * ice_load_pkg - load/reload the DDP Package file |
---|
| 3790 | + * @firmware: firmware structure when firmware requested or NULL for reload |
---|
| 3791 | + * @pf: pointer to the PF instance |
---|
| 3792 | + * |
---|
| 3793 | + * Called on probe and post CORER/GLOBR rebuild to load DDP Package and |
---|
| 3794 | + * initialize HW tables. |
---|
| 3795 | + */ |
---|
| 3796 | +static void |
---|
| 3797 | +ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) |
---|
| 3798 | +{ |
---|
| 3799 | + enum ice_status status = ICE_ERR_PARAM; |
---|
| 3800 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 3801 | + struct ice_hw *hw = &pf->hw; |
---|
| 3802 | + |
---|
| 3803 | + /* Load DDP Package */ |
---|
| 3804 | + if (firmware && !hw->pkg_copy) { |
---|
| 3805 | + status = ice_copy_and_init_pkg(hw, firmware->data, |
---|
| 3806 | + firmware->size); |
---|
| 3807 | + ice_log_pkg_init(hw, &status); |
---|
| 3808 | + } else if (!firmware && hw->pkg_copy) { |
---|
| 3809 | + /* Reload package during rebuild after CORER/GLOBR reset */ |
---|
| 3810 | + status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); |
---|
| 3811 | + ice_log_pkg_init(hw, &status); |
---|
| 3812 | + } else { |
---|
| 3813 | + dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); |
---|
| 3814 | + } |
---|
| 3815 | + |
---|
| 3816 | + if (status) { |
---|
| 3817 | + /* Safe Mode */ |
---|
| 3818 | + clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); |
---|
| 3819 | + return; |
---|
| 3820 | + } |
---|
| 3821 | + |
---|
| 3822 | + /* Successful download package is the precondition for advanced |
---|
| 3823 | + * features, hence setting the ICE_FLAG_ADV_FEATURES flag |
---|
| 3824 | + */ |
---|
| 3825 | + set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); |
---|
| 3826 | +} |
---|
| 3827 | + |
---|
| 3828 | +/** |
---|
| 3829 | + * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines |
---|
| 3830 | + * @pf: pointer to the PF structure |
---|
| 3831 | + * |
---|
| 3832 | + * There is no error returned here because the driver should be able to handle |
---|
| 3833 | + * 128 Byte cache lines, so we only print a warning in case issues are seen, |
---|
| 3834 | + * specifically with Tx. |
---|
| 3835 | + */ |
---|
| 3836 | +static void ice_verify_cacheline_size(struct ice_pf *pf) |
---|
| 3837 | +{ |
---|
| 3838 | + if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) |
---|
| 3839 | + dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", |
---|
| 3840 | + ICE_CACHE_LINE_BYTES); |
---|
| 3841 | +} |
---|
| 3842 | + |
---|
| 3843 | +/** |
---|
| 3844 | + * ice_send_version - update firmware with driver version |
---|
| 3845 | + * @pf: PF struct |
---|
| 3846 | + * |
---|
| 3847 | + * Returns ICE_SUCCESS on success, else error code |
---|
| 3848 | + */ |
---|
| 3849 | +static enum ice_status ice_send_version(struct ice_pf *pf) |
---|
| 3850 | +{ |
---|
| 3851 | + struct ice_driver_ver dv; |
---|
| 3852 | + |
---|
| 3853 | + dv.major_ver = 0xff; |
---|
| 3854 | + dv.minor_ver = 0xff; |
---|
| 3855 | + dv.build_ver = 0xff; |
---|
| 3856 | + dv.subbuild_ver = 0; |
---|
| 3857 | + strscpy((char *)dv.driver_string, UTS_RELEASE, |
---|
| 3858 | + sizeof(dv.driver_string)); |
---|
| 3859 | + return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); |
---|
| 3860 | +} |
---|
| 3861 | + |
---|
| 3862 | +/** |
---|
| 3863 | + * ice_init_fdir - Initialize flow director VSI and configuration |
---|
| 3864 | + * @pf: pointer to the PF instance |
---|
| 3865 | + * |
---|
| 3866 | + * returns 0 on success, negative on error |
---|
| 3867 | + */ |
---|
| 3868 | +static int ice_init_fdir(struct ice_pf *pf) |
---|
| 3869 | +{ |
---|
| 3870 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 3871 | + struct ice_vsi *ctrl_vsi; |
---|
| 3872 | + int err; |
---|
| 3873 | + |
---|
| 3874 | + /* Side Band Flow Director needs to have a control VSI. |
---|
| 3875 | + * Allocate it and store it in the PF. |
---|
| 3876 | + */ |
---|
| 3877 | + ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); |
---|
| 3878 | + if (!ctrl_vsi) { |
---|
| 3879 | + dev_dbg(dev, "could not create control VSI\n"); |
---|
| 3880 | + return -ENOMEM; |
---|
| 3881 | + } |
---|
| 3882 | + |
---|
| 3883 | + err = ice_vsi_open_ctrl(ctrl_vsi); |
---|
| 3884 | + if (err) { |
---|
| 3885 | + dev_dbg(dev, "could not open control VSI\n"); |
---|
| 3886 | + goto err_vsi_open; |
---|
| 3887 | + } |
---|
| 3888 | + |
---|
| 3889 | + mutex_init(&pf->hw.fdir_fltr_lock); |
---|
| 3890 | + |
---|
| 3891 | + err = ice_fdir_create_dflt_rules(pf); |
---|
| 3892 | + if (err) |
---|
| 3893 | + goto err_fdir_rule; |
---|
| 3894 | + |
---|
| 3895 | + return 0; |
---|
| 3896 | + |
---|
| 3897 | +err_fdir_rule: |
---|
| 3898 | + ice_fdir_release_flows(&pf->hw); |
---|
| 3899 | + ice_vsi_close(ctrl_vsi); |
---|
| 3900 | +err_vsi_open: |
---|
| 3901 | + ice_vsi_release(ctrl_vsi); |
---|
| 3902 | + if (pf->ctrl_vsi_idx != ICE_NO_VSI) { |
---|
| 3903 | + pf->vsi[pf->ctrl_vsi_idx] = NULL; |
---|
| 3904 | + pf->ctrl_vsi_idx = ICE_NO_VSI; |
---|
| 3905 | + } |
---|
| 3906 | + return err; |
---|
| 3907 | +} |
---|
| 3908 | + |
---|
| 3909 | +/** |
---|
| 3910 | + * ice_get_opt_fw_name - return optional firmware file name or NULL |
---|
| 3911 | + * @pf: pointer to the PF instance |
---|
| 3912 | + */ |
---|
| 3913 | +static char *ice_get_opt_fw_name(struct ice_pf *pf) |
---|
| 3914 | +{ |
---|
| 3915 | + /* Optional firmware name same as default with additional dash |
---|
| 3916 | + * followed by a EUI-64 identifier (PCIe Device Serial Number) |
---|
| 3917 | + */ |
---|
| 3918 | + struct pci_dev *pdev = pf->pdev; |
---|
| 3919 | + char *opt_fw_filename; |
---|
| 3920 | + u64 dsn; |
---|
| 3921 | + |
---|
| 3922 | + /* Determine the name of the optional file using the DSN (two |
---|
| 3923 | + * dwords following the start of the DSN Capability). |
---|
| 3924 | + */ |
---|
| 3925 | + dsn = pci_get_dsn(pdev); |
---|
| 3926 | + if (!dsn) |
---|
| 3927 | + return NULL; |
---|
| 3928 | + |
---|
| 3929 | + opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); |
---|
| 3930 | + if (!opt_fw_filename) |
---|
| 3931 | + return NULL; |
---|
| 3932 | + |
---|
| 3933 | + snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", |
---|
| 3934 | + ICE_DDP_PKG_PATH, dsn); |
---|
| 3935 | + |
---|
| 3936 | + return opt_fw_filename; |
---|
| 3937 | +} |
---|
| 3938 | + |
---|
| 3939 | +/** |
---|
| 3940 | + * ice_request_fw - Device initialization routine |
---|
| 3941 | + * @pf: pointer to the PF instance |
---|
| 3942 | + */ |
---|
| 3943 | +static void ice_request_fw(struct ice_pf *pf) |
---|
| 3944 | +{ |
---|
| 3945 | + char *opt_fw_filename = ice_get_opt_fw_name(pf); |
---|
| 3946 | + const struct firmware *firmware = NULL; |
---|
| 3947 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 3948 | + int err = 0; |
---|
| 3949 | + |
---|
| 3950 | + /* optional device-specific DDP (if present) overrides the default DDP |
---|
| 3951 | + * package file. kernel logs a debug message if the file doesn't exist, |
---|
| 3952 | + * and warning messages for other errors. |
---|
| 3953 | + */ |
---|
| 3954 | + if (opt_fw_filename) { |
---|
| 3955 | + err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); |
---|
| 3956 | + if (err) { |
---|
| 3957 | + kfree(opt_fw_filename); |
---|
| 3958 | + goto dflt_pkg_load; |
---|
| 3959 | + } |
---|
| 3960 | + |
---|
| 3961 | + /* request for firmware was successful. Download to device */ |
---|
| 3962 | + ice_load_pkg(firmware, pf); |
---|
| 3963 | + kfree(opt_fw_filename); |
---|
| 3964 | + release_firmware(firmware); |
---|
| 3965 | + return; |
---|
| 3966 | + } |
---|
| 3967 | + |
---|
| 3968 | +dflt_pkg_load: |
---|
| 3969 | + err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); |
---|
| 3970 | + if (err) { |
---|
| 3971 | + dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); |
---|
| 3972 | + return; |
---|
| 3973 | + } |
---|
| 3974 | + |
---|
| 3975 | + /* request for firmware was successful. Download to device */ |
---|
| 3976 | + ice_load_pkg(firmware, pf); |
---|
| 3977 | + release_firmware(firmware); |
---|
| 3978 | +} |
---|
| 3979 | + |
---|
| 3980 | +/** |
---|
| 3981 | + * ice_print_wake_reason - show the wake up cause in the log |
---|
| 3982 | + * @pf: pointer to the PF struct |
---|
| 3983 | + */ |
---|
| 3984 | +static void ice_print_wake_reason(struct ice_pf *pf) |
---|
| 3985 | +{ |
---|
| 3986 | + u32 wus = pf->wakeup_reason; |
---|
| 3987 | + const char *wake_str; |
---|
| 3988 | + |
---|
| 3989 | + /* if no wake event, nothing to print */ |
---|
| 3990 | + if (!wus) |
---|
| 3991 | + return; |
---|
| 3992 | + |
---|
| 3993 | + if (wus & PFPM_WUS_LNKC_M) |
---|
| 3994 | + wake_str = "Link\n"; |
---|
| 3995 | + else if (wus & PFPM_WUS_MAG_M) |
---|
| 3996 | + wake_str = "Magic Packet\n"; |
---|
| 3997 | + else if (wus & PFPM_WUS_MNG_M) |
---|
| 3998 | + wake_str = "Management\n"; |
---|
| 3999 | + else if (wus & PFPM_WUS_FW_RST_WK_M) |
---|
| 4000 | + wake_str = "Firmware Reset\n"; |
---|
| 4001 | + else |
---|
| 4002 | + wake_str = "Unknown\n"; |
---|
| 4003 | + |
---|
| 4004 | + dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); |
---|
3284 | 4005 | } |
---|
3285 | 4006 | |
---|
3286 | 4007 | /** |
---|
.. | .. |
---|
3290 | 4011 | * |
---|
3291 | 4012 | * Returns 0 on success, negative on failure |
---|
3292 | 4013 | */ |
---|
3293 | | -static int ice_probe(struct pci_dev *pdev, |
---|
3294 | | - const struct pci_device_id __always_unused *ent) |
---|
| 4014 | +static int |
---|
| 4015 | +ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) |
---|
3295 | 4016 | { |
---|
| 4017 | + struct device *dev = &pdev->dev; |
---|
3296 | 4018 | struct ice_pf *pf; |
---|
3297 | 4019 | struct ice_hw *hw; |
---|
3298 | | - int err; |
---|
| 4020 | + int i, err; |
---|
3299 | 4021 | |
---|
3300 | | - /* this driver uses devres, see Documentation/driver-model/devres.txt */ |
---|
| 4022 | + if (pdev->is_virtfn) { |
---|
| 4023 | + dev_err(dev, "can't probe a virtual function\n"); |
---|
| 4024 | + return -EINVAL; |
---|
| 4025 | + } |
---|
| 4026 | + |
---|
| 4027 | + /* this driver uses devres, see |
---|
| 4028 | + * Documentation/driver-api/driver-model/devres.rst |
---|
| 4029 | + */ |
---|
3301 | 4030 | err = pcim_enable_device(pdev); |
---|
3302 | 4031 | if (err) |
---|
3303 | 4032 | return err; |
---|
3304 | 4033 | |
---|
3305 | 4034 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); |
---|
3306 | 4035 | if (err) { |
---|
3307 | | - dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); |
---|
| 4036 | + dev_err(dev, "BAR0 I/O map error %d\n", err); |
---|
3308 | 4037 | return err; |
---|
3309 | 4038 | } |
---|
3310 | 4039 | |
---|
3311 | | - pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); |
---|
| 4040 | + pf = ice_allocate_pf(dev); |
---|
3312 | 4041 | if (!pf) |
---|
3313 | 4042 | return -ENOMEM; |
---|
3314 | 4043 | |
---|
3315 | | - /* set up for high or low dma */ |
---|
3316 | | - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
---|
| 4044 | + /* set up for high or low DMA */ |
---|
| 4045 | + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
---|
3317 | 4046 | if (err) |
---|
3318 | | - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 4047 | + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
---|
3319 | 4048 | if (err) { |
---|
3320 | | - dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); |
---|
| 4049 | + dev_err(dev, "DMA configuration failed: 0x%x\n", err); |
---|
3321 | 4050 | return err; |
---|
3322 | 4051 | } |
---|
3323 | 4052 | |
---|
.. | .. |
---|
3327 | 4056 | pf->pdev = pdev; |
---|
3328 | 4057 | pci_set_drvdata(pdev, pf); |
---|
3329 | 4058 | set_bit(__ICE_DOWN, pf->state); |
---|
| 4059 | + /* Disable service task until DOWN bit is cleared */ |
---|
| 4060 | + set_bit(__ICE_SERVICE_DIS, pf->state); |
---|
3330 | 4061 | |
---|
3331 | 4062 | hw = &pf->hw; |
---|
3332 | 4063 | hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; |
---|
| 4064 | + pci_save_state(pdev); |
---|
| 4065 | + |
---|
3333 | 4066 | hw->back = pf; |
---|
3334 | 4067 | hw->vendor_id = pdev->vendor; |
---|
3335 | 4068 | hw->device_id = pdev->device; |
---|
.. | .. |
---|
3342 | 4075 | |
---|
3343 | 4076 | pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); |
---|
3344 | 4077 | |
---|
| 4078 | + err = ice_devlink_register(pf); |
---|
| 4079 | + if (err) { |
---|
| 4080 | + dev_err(dev, "ice_devlink_register failed: %d\n", err); |
---|
| 4081 | + goto err_exit_unroll; |
---|
| 4082 | + } |
---|
| 4083 | + |
---|
3345 | 4084 | #ifndef CONFIG_DYNAMIC_DEBUG |
---|
3346 | 4085 | if (debug < -1) |
---|
3347 | 4086 | hw->debug_mask = debug; |
---|
.. | .. |
---|
3349 | 4088 | |
---|
3350 | 4089 | err = ice_init_hw(hw); |
---|
3351 | 4090 | if (err) { |
---|
3352 | | - dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); |
---|
| 4091 | + dev_err(dev, "ice_init_hw failed: %d\n", err); |
---|
3353 | 4092 | err = -EIO; |
---|
3354 | 4093 | goto err_exit_unroll; |
---|
3355 | 4094 | } |
---|
3356 | 4095 | |
---|
3357 | | - dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", |
---|
3358 | | - hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, |
---|
3359 | | - hw->api_maj_ver, hw->api_min_ver); |
---|
| 4096 | + ice_request_fw(pf); |
---|
3360 | 4097 | |
---|
3361 | | - ice_init_pf(pf); |
---|
| 4098 | + /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be |
---|
| 4099 | + * set in pf->state, which will cause ice_is_safe_mode to return |
---|
| 4100 | + * true |
---|
| 4101 | + */ |
---|
| 4102 | + if (ice_is_safe_mode(pf)) { |
---|
| 4103 | + dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); |
---|
| 4104 | + /* we already got function/device capabilities but these don't |
---|
| 4105 | + * reflect what the driver needs to do in safe mode. Instead of |
---|
| 4106 | + * adding conditional logic everywhere to ignore these |
---|
| 4107 | + * device/function capabilities, override them. |
---|
| 4108 | + */ |
---|
| 4109 | + ice_set_safe_mode_caps(hw); |
---|
| 4110 | + } |
---|
3362 | 4111 | |
---|
3363 | | - ice_determine_q_usage(pf); |
---|
| 4112 | + err = ice_init_pf(pf); |
---|
| 4113 | + if (err) { |
---|
| 4114 | + dev_err(dev, "ice_init_pf failed: %d\n", err); |
---|
| 4115 | + goto err_init_pf_unroll; |
---|
| 4116 | + } |
---|
3364 | 4117 | |
---|
3365 | | - pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC, |
---|
3366 | | - hw->func_caps.guaranteed_num_vsi); |
---|
| 4118 | + ice_devlink_init_regions(pf); |
---|
| 4119 | + |
---|
| 4120 | + pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; |
---|
| 4121 | + pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; |
---|
| 4122 | + pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; |
---|
| 4123 | + pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; |
---|
| 4124 | + i = 0; |
---|
| 4125 | + if (pf->hw.tnl.valid_count[TNL_VXLAN]) { |
---|
| 4126 | + pf->hw.udp_tunnel_nic.tables[i].n_entries = |
---|
| 4127 | + pf->hw.tnl.valid_count[TNL_VXLAN]; |
---|
| 4128 | + pf->hw.udp_tunnel_nic.tables[i].tunnel_types = |
---|
| 4129 | + UDP_TUNNEL_TYPE_VXLAN; |
---|
| 4130 | + i++; |
---|
| 4131 | + } |
---|
| 4132 | + if (pf->hw.tnl.valid_count[TNL_GENEVE]) { |
---|
| 4133 | + pf->hw.udp_tunnel_nic.tables[i].n_entries = |
---|
| 4134 | + pf->hw.tnl.valid_count[TNL_GENEVE]; |
---|
| 4135 | + pf->hw.udp_tunnel_nic.tables[i].tunnel_types = |
---|
| 4136 | + UDP_TUNNEL_TYPE_GENEVE; |
---|
| 4137 | + i++; |
---|
| 4138 | + } |
---|
| 4139 | + |
---|
| 4140 | + pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; |
---|
3367 | 4141 | if (!pf->num_alloc_vsi) { |
---|
3368 | 4142 | err = -EIO; |
---|
3369 | 4143 | goto err_init_pf_unroll; |
---|
3370 | 4144 | } |
---|
| 4145 | + if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { |
---|
| 4146 | + dev_warn(&pf->pdev->dev, |
---|
| 4147 | + "limiting the VSI count due to UDP tunnel limitation %d > %d\n", |
---|
| 4148 | + pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); |
---|
| 4149 | + pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; |
---|
| 4150 | + } |
---|
3371 | 4151 | |
---|
3372 | | - pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, |
---|
3373 | | - sizeof(struct ice_vsi *), GFP_KERNEL); |
---|
| 4152 | + pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), |
---|
| 4153 | + GFP_KERNEL); |
---|
3374 | 4154 | if (!pf->vsi) { |
---|
3375 | 4155 | err = -ENOMEM; |
---|
3376 | 4156 | goto err_init_pf_unroll; |
---|
.. | .. |
---|
3378 | 4158 | |
---|
3379 | 4159 | err = ice_init_interrupt_scheme(pf); |
---|
3380 | 4160 | if (err) { |
---|
3381 | | - dev_err(&pdev->dev, |
---|
3382 | | - "ice_init_interrupt_scheme failed: %d\n", err); |
---|
| 4161 | + dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); |
---|
3383 | 4162 | err = -EIO; |
---|
3384 | | - goto err_init_interrupt_unroll; |
---|
| 4163 | + goto err_init_vsi_unroll; |
---|
3385 | 4164 | } |
---|
3386 | 4165 | |
---|
3387 | 4166 | /* In case of MSIX we are going to setup the misc vector right here |
---|
.. | .. |
---|
3389 | 4168 | * the misc functionality and queue processing is combined in |
---|
3390 | 4169 | * the same vector and that gets setup at open. |
---|
3391 | 4170 | */ |
---|
3392 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
---|
3393 | | - err = ice_req_irq_msix_misc(pf); |
---|
3394 | | - if (err) { |
---|
3395 | | - dev_err(&pdev->dev, |
---|
3396 | | - "setup of misc vector failed: %d\n", err); |
---|
3397 | | - goto err_init_interrupt_unroll; |
---|
3398 | | - } |
---|
| 4171 | + err = ice_req_irq_msix_misc(pf); |
---|
| 4172 | + if (err) { |
---|
| 4173 | + dev_err(dev, "setup of misc vector failed: %d\n", err); |
---|
| 4174 | + goto err_init_interrupt_unroll; |
---|
3399 | 4175 | } |
---|
3400 | 4176 | |
---|
3401 | 4177 | /* create switch struct for the switch element created by FW on boot */ |
---|
3402 | | - pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), |
---|
3403 | | - GFP_KERNEL); |
---|
| 4178 | + pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); |
---|
3404 | 4179 | if (!pf->first_sw) { |
---|
3405 | 4180 | err = -ENOMEM; |
---|
3406 | 4181 | goto err_msix_misc_unroll; |
---|
3407 | 4182 | } |
---|
3408 | 4183 | |
---|
3409 | | - pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; |
---|
| 4184 | + if (hw->evb_veb) |
---|
| 4185 | + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; |
---|
| 4186 | + else |
---|
| 4187 | + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; |
---|
| 4188 | + |
---|
3410 | 4189 | pf->first_sw->pf = pf; |
---|
3411 | 4190 | |
---|
3412 | 4191 | /* record the sw_id available for later use */ |
---|
.. | .. |
---|
3414 | 4193 | |
---|
3415 | 4194 | err = ice_setup_pf_sw(pf); |
---|
3416 | 4195 | if (err) { |
---|
3417 | | - dev_err(&pdev->dev, |
---|
3418 | | - "probe failed due to setup pf switch:%d\n", err); |
---|
| 4196 | + dev_err(dev, "probe failed due to setup PF switch: %d\n", err); |
---|
3419 | 4197 | goto err_alloc_sw_unroll; |
---|
3420 | 4198 | } |
---|
3421 | 4199 | |
---|
3422 | | - /* Driver is mostly up */ |
---|
3423 | | - clear_bit(__ICE_DOWN, pf->state); |
---|
| 4200 | + clear_bit(__ICE_SERVICE_DIS, pf->state); |
---|
| 4201 | + |
---|
| 4202 | + /* tell the firmware we are up */ |
---|
| 4203 | + err = ice_send_version(pf); |
---|
| 4204 | + if (err) { |
---|
| 4205 | + dev_err(dev, "probe failed sending driver version %s. error: %d\n", |
---|
| 4206 | + UTS_RELEASE, err); |
---|
| 4207 | + goto err_send_version_unroll; |
---|
| 4208 | + } |
---|
3424 | 4209 | |
---|
3425 | 4210 | /* since everything is good, start the service timer */ |
---|
3426 | 4211 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
---|
3427 | 4212 | |
---|
3428 | 4213 | err = ice_init_link_events(pf->hw.port_info); |
---|
3429 | 4214 | if (err) { |
---|
3430 | | - dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err); |
---|
3431 | | - goto err_alloc_sw_unroll; |
---|
| 4215 | + dev_err(dev, "ice_init_link_events failed: %d\n", err); |
---|
| 4216 | + goto err_send_version_unroll; |
---|
3432 | 4217 | } |
---|
3433 | 4218 | |
---|
| 4219 | + /* not a fatal error if this fails */ |
---|
| 4220 | + err = ice_init_nvm_phy_type(pf->hw.port_info); |
---|
| 4221 | + if (err) |
---|
| 4222 | + dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); |
---|
| 4223 | + |
---|
| 4224 | + /* not a fatal error if this fails */ |
---|
| 4225 | + err = ice_update_link_info(pf->hw.port_info); |
---|
| 4226 | + if (err) |
---|
| 4227 | + dev_err(dev, "ice_update_link_info failed: %d\n", err); |
---|
| 4228 | + |
---|
| 4229 | + ice_init_link_dflt_override(pf->hw.port_info); |
---|
| 4230 | + |
---|
| 4231 | + /* if media available, initialize PHY settings */ |
---|
| 4232 | + if (pf->hw.port_info->phy.link_info.link_info & |
---|
| 4233 | + ICE_AQ_MEDIA_AVAILABLE) { |
---|
| 4234 | + /* not a fatal error if this fails */ |
---|
| 4235 | + err = ice_init_phy_user_cfg(pf->hw.port_info); |
---|
| 4236 | + if (err) |
---|
| 4237 | + dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); |
---|
| 4238 | + |
---|
| 4239 | + if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { |
---|
| 4240 | + struct ice_vsi *vsi = ice_get_main_vsi(pf); |
---|
| 4241 | + |
---|
| 4242 | + if (vsi) |
---|
| 4243 | + ice_configure_phy(vsi); |
---|
| 4244 | + } |
---|
| 4245 | + } else { |
---|
| 4246 | + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
---|
| 4247 | + } |
---|
| 4248 | + |
---|
| 4249 | + ice_verify_cacheline_size(pf); |
---|
| 4250 | + |
---|
| 4251 | + /* Save wakeup reason register for later use */ |
---|
| 4252 | + pf->wakeup_reason = rd32(hw, PFPM_WUS); |
---|
| 4253 | + |
---|
| 4254 | + /* check for a power management event */ |
---|
| 4255 | + ice_print_wake_reason(pf); |
---|
| 4256 | + |
---|
| 4257 | + /* clear wake status, all bits */ |
---|
| 4258 | + wr32(hw, PFPM_WUS, U32_MAX); |
---|
| 4259 | + |
---|
| 4260 | + /* Disable WoL at init, wait for user to enable */ |
---|
| 4261 | + device_set_wakeup_enable(dev, false); |
---|
| 4262 | + |
---|
| 4263 | + if (ice_is_safe_mode(pf)) { |
---|
| 4264 | + ice_set_safe_mode_vlan_cfg(pf); |
---|
| 4265 | + goto probe_done; |
---|
| 4266 | + } |
---|
| 4267 | + |
---|
| 4268 | + /* initialize DDP driven features */ |
---|
| 4269 | + |
---|
| 4270 | + /* Note: Flow director init failure is non-fatal to load */ |
---|
| 4271 | + if (ice_init_fdir(pf)) |
---|
| 4272 | + dev_err(dev, "could not initialize flow director\n"); |
---|
| 4273 | + |
---|
| 4274 | + /* Note: DCB init failure is non-fatal to load */ |
---|
| 4275 | + if (ice_init_pf_dcb(pf, false)) { |
---|
| 4276 | + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
---|
| 4277 | + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); |
---|
| 4278 | + } else { |
---|
| 4279 | + ice_cfg_lldp_mib_change(&pf->hw, true); |
---|
| 4280 | + } |
---|
| 4281 | + |
---|
| 4282 | + /* print PCI link speed and width */ |
---|
| 4283 | + pcie_print_link_status(pf->pdev); |
---|
| 4284 | + |
---|
| 4285 | +probe_done: |
---|
| 4286 | + /* ready to go, so clear down state bit */ |
---|
| 4287 | + clear_bit(__ICE_DOWN, pf->state); |
---|
3434 | 4288 | return 0; |
---|
3435 | 4289 | |
---|
| 4290 | +err_send_version_unroll: |
---|
| 4291 | + ice_vsi_release_all(pf); |
---|
3436 | 4292 | err_alloc_sw_unroll: |
---|
| 4293 | + set_bit(__ICE_SERVICE_DIS, pf->state); |
---|
3437 | 4294 | set_bit(__ICE_DOWN, pf->state); |
---|
3438 | | - devm_kfree(&pf->pdev->dev, pf->first_sw); |
---|
| 4295 | + devm_kfree(dev, pf->first_sw); |
---|
3439 | 4296 | err_msix_misc_unroll: |
---|
3440 | 4297 | ice_free_irq_msix_misc(pf); |
---|
3441 | 4298 | err_init_interrupt_unroll: |
---|
3442 | 4299 | ice_clear_interrupt_scheme(pf); |
---|
3443 | | - devm_kfree(&pdev->dev, pf->vsi); |
---|
| 4300 | +err_init_vsi_unroll: |
---|
| 4301 | + devm_kfree(dev, pf->vsi); |
---|
3444 | 4302 | err_init_pf_unroll: |
---|
3445 | 4303 | ice_deinit_pf(pf); |
---|
| 4304 | + ice_devlink_destroy_regions(pf); |
---|
3446 | 4305 | ice_deinit_hw(hw); |
---|
3447 | 4306 | err_exit_unroll: |
---|
| 4307 | + ice_devlink_unregister(pf); |
---|
3448 | 4308 | pci_disable_pcie_error_reporting(pdev); |
---|
| 4309 | + pci_disable_device(pdev); |
---|
3449 | 4310 | return err; |
---|
| 4311 | +} |
---|
| 4312 | + |
---|
| 4313 | +/** |
---|
| 4314 | + * ice_set_wake - enable or disable Wake on LAN |
---|
| 4315 | + * @pf: pointer to the PF struct |
---|
| 4316 | + * |
---|
| 4317 | + * Simple helper for WoL control |
---|
| 4318 | + */ |
---|
| 4319 | +static void ice_set_wake(struct ice_pf *pf) |
---|
| 4320 | +{ |
---|
| 4321 | + struct ice_hw *hw = &pf->hw; |
---|
| 4322 | + bool wol = pf->wol_ena; |
---|
| 4323 | + |
---|
| 4324 | + /* clear wake state, otherwise new wake events won't fire */ |
---|
| 4325 | + wr32(hw, PFPM_WUS, U32_MAX); |
---|
| 4326 | + |
---|
| 4327 | + /* enable / disable APM wake up, no RMW needed */ |
---|
| 4328 | + wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); |
---|
| 4329 | + |
---|
| 4330 | + /* set magic packet filter enabled */ |
---|
| 4331 | + wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); |
---|
| 4332 | +} |
---|
| 4333 | + |
---|
| 4334 | +/** |
---|
| 4335 | + * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet |
---|
| 4336 | + * @pf: pointer to the PF struct |
---|
| 4337 | + * |
---|
| 4338 | + * Issue firmware command to enable multicast magic wake, making |
---|
| 4339 | + * sure that any locally administered address (LAA) is used for |
---|
| 4340 | + * wake, and that PF reset doesn't undo the LAA. |
---|
| 4341 | + */ |
---|
| 4342 | +static void ice_setup_mc_magic_wake(struct ice_pf *pf) |
---|
| 4343 | +{ |
---|
| 4344 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 4345 | + struct ice_hw *hw = &pf->hw; |
---|
| 4346 | + enum ice_status status; |
---|
| 4347 | + u8 mac_addr[ETH_ALEN]; |
---|
| 4348 | + struct ice_vsi *vsi; |
---|
| 4349 | + u8 flags; |
---|
| 4350 | + |
---|
| 4351 | + if (!pf->wol_ena) |
---|
| 4352 | + return; |
---|
| 4353 | + |
---|
| 4354 | + vsi = ice_get_main_vsi(pf); |
---|
| 4355 | + if (!vsi) |
---|
| 4356 | + return; |
---|
| 4357 | + |
---|
| 4358 | + /* Get current MAC address in case it's an LAA */ |
---|
| 4359 | + if (vsi->netdev) |
---|
| 4360 | + ether_addr_copy(mac_addr, vsi->netdev->dev_addr); |
---|
| 4361 | + else |
---|
| 4362 | + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
---|
| 4363 | + |
---|
| 4364 | + flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | |
---|
| 4365 | + ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | |
---|
| 4366 | + ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; |
---|
| 4367 | + |
---|
| 4368 | + status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); |
---|
| 4369 | + if (status) |
---|
| 4370 | + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", |
---|
| 4371 | + ice_stat_str(status), |
---|
| 4372 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
3450 | 4373 | } |
---|
3451 | 4374 | |
---|
3452 | 4375 | /** |
---|
.. | .. |
---|
3456 | 4379 | static void ice_remove(struct pci_dev *pdev) |
---|
3457 | 4380 | { |
---|
3458 | 4381 | struct ice_pf *pf = pci_get_drvdata(pdev); |
---|
3459 | | - int i = 0; |
---|
3460 | | - int err; |
---|
| 4382 | + int i; |
---|
3461 | 4383 | |
---|
3462 | | - if (!pf) |
---|
3463 | | - return; |
---|
3464 | | - |
---|
3465 | | - set_bit(__ICE_DOWN, pf->state); |
---|
3466 | | - |
---|
3467 | | - for (i = 0; i < pf->num_alloc_vsi; i++) { |
---|
3468 | | - if (!pf->vsi[i]) |
---|
3469 | | - continue; |
---|
3470 | | - |
---|
3471 | | - err = ice_vsi_release(pf->vsi[i]); |
---|
3472 | | - if (err) |
---|
3473 | | - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", |
---|
3474 | | - i, err); |
---|
| 4384 | + for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { |
---|
| 4385 | + if (!ice_is_reset_in_progress(pf->state)) |
---|
| 4386 | + break; |
---|
| 4387 | + msleep(100); |
---|
3475 | 4388 | } |
---|
3476 | 4389 | |
---|
| 4390 | + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { |
---|
| 4391 | + set_bit(__ICE_VF_RESETS_DISABLED, pf->state); |
---|
| 4392 | + ice_free_vfs(pf); |
---|
| 4393 | + } |
---|
| 4394 | + |
---|
| 4395 | + set_bit(__ICE_DOWN, pf->state); |
---|
| 4396 | + ice_service_task_stop(pf); |
---|
| 4397 | + |
---|
| 4398 | + ice_aq_cancel_waiting_tasks(pf); |
---|
| 4399 | + |
---|
| 4400 | + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); |
---|
| 4401 | + if (!ice_is_safe_mode(pf)) |
---|
| 4402 | + ice_remove_arfs(pf); |
---|
| 4403 | + ice_setup_mc_magic_wake(pf); |
---|
| 4404 | + ice_vsi_release_all(pf); |
---|
| 4405 | + ice_set_wake(pf); |
---|
3477 | 4406 | ice_free_irq_msix_misc(pf); |
---|
3478 | | - ice_clear_interrupt_scheme(pf); |
---|
| 4407 | + ice_for_each_vsi(pf, i) { |
---|
| 4408 | + if (!pf->vsi[i]) |
---|
| 4409 | + continue; |
---|
| 4410 | + ice_vsi_free_q_vectors(pf->vsi[i]); |
---|
| 4411 | + } |
---|
3479 | 4412 | ice_deinit_pf(pf); |
---|
| 4413 | + ice_devlink_destroy_regions(pf); |
---|
3480 | 4414 | ice_deinit_hw(&pf->hw); |
---|
| 4415 | + ice_devlink_unregister(pf); |
---|
| 4416 | + |
---|
| 4417 | + /* Issue a PFR as part of the prescribed driver unload flow. Do not |
---|
| 4418 | + * do it via ice_schedule_reset() since there is no need to rebuild |
---|
| 4419 | + * and the service task is already stopped. |
---|
| 4420 | + */ |
---|
| 4421 | + ice_reset(&pf->hw, ICE_RESET_PFR); |
---|
| 4422 | + pci_wait_for_pending_transaction(pdev); |
---|
| 4423 | + ice_clear_interrupt_scheme(pf); |
---|
3481 | 4424 | pci_disable_pcie_error_reporting(pdev); |
---|
| 4425 | + pci_disable_device(pdev); |
---|
| 4426 | +} |
---|
| 4427 | + |
---|
| 4428 | +/** |
---|
| 4429 | + * ice_shutdown - PCI callback for shutting down device |
---|
| 4430 | + * @pdev: PCI device information struct |
---|
| 4431 | + */ |
---|
| 4432 | +static void ice_shutdown(struct pci_dev *pdev) |
---|
| 4433 | +{ |
---|
| 4434 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
---|
| 4435 | + |
---|
| 4436 | + ice_remove(pdev); |
---|
| 4437 | + |
---|
| 4438 | + if (system_state == SYSTEM_POWER_OFF) { |
---|
| 4439 | + pci_wake_from_d3(pdev, pf->wol_ena); |
---|
| 4440 | + pci_set_power_state(pdev, PCI_D3hot); |
---|
| 4441 | + } |
---|
| 4442 | +} |
---|
| 4443 | + |
---|
| 4444 | +#ifdef CONFIG_PM |
---|
| 4445 | +/** |
---|
| 4446 | + * ice_prepare_for_shutdown - prep for PCI shutdown |
---|
| 4447 | + * @pf: board private structure |
---|
| 4448 | + * |
---|
| 4449 | + * Inform or close all dependent features in prep for PCI device shutdown |
---|
| 4450 | + */ |
---|
| 4451 | +static void ice_prepare_for_shutdown(struct ice_pf *pf) |
---|
| 4452 | +{ |
---|
| 4453 | + struct ice_hw *hw = &pf->hw; |
---|
| 4454 | + u32 v; |
---|
| 4455 | + |
---|
| 4456 | + /* Notify VFs of impending reset */ |
---|
| 4457 | + if (ice_check_sq_alive(hw, &hw->mailboxq)) |
---|
| 4458 | + ice_vc_notify_reset(pf); |
---|
| 4459 | + |
---|
| 4460 | + dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); |
---|
| 4461 | + |
---|
| 4462 | + /* disable the VSIs and their queues that are not already DOWN */ |
---|
| 4463 | + ice_pf_dis_all_vsi(pf, false); |
---|
| 4464 | + |
---|
| 4465 | + ice_for_each_vsi(pf, v) |
---|
| 4466 | + if (pf->vsi[v]) |
---|
| 4467 | + pf->vsi[v]->vsi_num = 0; |
---|
| 4468 | + |
---|
| 4469 | + ice_shutdown_all_ctrlq(hw); |
---|
| 4470 | +} |
---|
| 4471 | + |
---|
| 4472 | +/** |
---|
| 4473 | + * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme |
---|
| 4474 | + * @pf: board private structure to reinitialize |
---|
| 4475 | + * |
---|
| 4476 | + * This routine reinitialize interrupt scheme that was cleared during |
---|
| 4477 | + * power management suspend callback. |
---|
| 4478 | + * |
---|
| 4479 | + * This should be called during resume routine to re-allocate the q_vectors |
---|
| 4480 | + * and reacquire interrupts. |
---|
| 4481 | + */ |
---|
| 4482 | +static int ice_reinit_interrupt_scheme(struct ice_pf *pf) |
---|
| 4483 | +{ |
---|
| 4484 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 4485 | + int ret, v; |
---|
| 4486 | + |
---|
| 4487 | + /* Since we clear MSIX flag during suspend, we need to |
---|
| 4488 | + * set it back during resume... |
---|
| 4489 | + */ |
---|
| 4490 | + |
---|
| 4491 | + ret = ice_init_interrupt_scheme(pf); |
---|
| 4492 | + if (ret) { |
---|
| 4493 | + dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); |
---|
| 4494 | + return ret; |
---|
| 4495 | + } |
---|
| 4496 | + |
---|
| 4497 | + /* Remap vectors and rings, after successful re-init interrupts */ |
---|
| 4498 | + ice_for_each_vsi(pf, v) { |
---|
| 4499 | + if (!pf->vsi[v]) |
---|
| 4500 | + continue; |
---|
| 4501 | + |
---|
| 4502 | + ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); |
---|
| 4503 | + if (ret) |
---|
| 4504 | + goto err_reinit; |
---|
| 4505 | + ice_vsi_map_rings_to_vectors(pf->vsi[v]); |
---|
| 4506 | + } |
---|
| 4507 | + |
---|
| 4508 | + ret = ice_req_irq_msix_misc(pf); |
---|
| 4509 | + if (ret) { |
---|
| 4510 | + dev_err(dev, "Setting up misc vector failed after device suspend %d\n", |
---|
| 4511 | + ret); |
---|
| 4512 | + goto err_reinit; |
---|
| 4513 | + } |
---|
| 4514 | + |
---|
| 4515 | + return 0; |
---|
| 4516 | + |
---|
| 4517 | +err_reinit: |
---|
| 4518 | + while (v--) |
---|
| 4519 | + if (pf->vsi[v]) |
---|
| 4520 | + ice_vsi_free_q_vectors(pf->vsi[v]); |
---|
| 4521 | + |
---|
| 4522 | + return ret; |
---|
| 4523 | +} |
---|
| 4524 | + |
---|
| 4525 | +/** |
---|
| 4526 | + * ice_suspend |
---|
| 4527 | + * @dev: generic device information structure |
---|
| 4528 | + * |
---|
| 4529 | + * Power Management callback to quiesce the device and prepare |
---|
| 4530 | + * for D3 transition. |
---|
| 4531 | + */ |
---|
| 4532 | +static int __maybe_unused ice_suspend(struct device *dev) |
---|
| 4533 | +{ |
---|
| 4534 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 4535 | + struct ice_pf *pf; |
---|
| 4536 | + int disabled, v; |
---|
| 4537 | + |
---|
| 4538 | + pf = pci_get_drvdata(pdev); |
---|
| 4539 | + |
---|
| 4540 | + if (!ice_pf_state_is_nominal(pf)) { |
---|
| 4541 | + dev_err(dev, "Device is not ready, no need to suspend it\n"); |
---|
| 4542 | + return -EBUSY; |
---|
| 4543 | + } |
---|
| 4544 | + |
---|
| 4545 | + /* Stop watchdog tasks until resume completion. |
---|
| 4546 | + * Even though it is most likely that the service task is |
---|
| 4547 | + * disabled if the device is suspended or down, the service task's |
---|
| 4548 | + * state is controlled by a different state bit, and we should |
---|
| 4549 | + * store and honor whatever state that bit is in at this point. |
---|
| 4550 | + */ |
---|
| 4551 | + disabled = ice_service_task_stop(pf); |
---|
| 4552 | + |
---|
| 4553 | + /* Already suspended?, then there is nothing to do */ |
---|
| 4554 | + if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { |
---|
| 4555 | + if (!disabled) |
---|
| 4556 | + ice_service_task_restart(pf); |
---|
| 4557 | + return 0; |
---|
| 4558 | + } |
---|
| 4559 | + |
---|
| 4560 | + if (test_bit(__ICE_DOWN, pf->state) || |
---|
| 4561 | + ice_is_reset_in_progress(pf->state)) { |
---|
| 4562 | + dev_err(dev, "can't suspend device in reset or already down\n"); |
---|
| 4563 | + if (!disabled) |
---|
| 4564 | + ice_service_task_restart(pf); |
---|
| 4565 | + return 0; |
---|
| 4566 | + } |
---|
| 4567 | + |
---|
| 4568 | + ice_setup_mc_magic_wake(pf); |
---|
| 4569 | + |
---|
| 4570 | + ice_prepare_for_shutdown(pf); |
---|
| 4571 | + |
---|
| 4572 | + ice_set_wake(pf); |
---|
| 4573 | + |
---|
| 4574 | + /* Free vectors, clear the interrupt scheme and release IRQs |
---|
| 4575 | + * for proper hibernation, especially with large number of CPUs. |
---|
| 4576 | + * Otherwise hibernation might fail when mapping all the vectors back |
---|
| 4577 | + * to CPU0. |
---|
| 4578 | + */ |
---|
| 4579 | + ice_free_irq_msix_misc(pf); |
---|
| 4580 | + ice_for_each_vsi(pf, v) { |
---|
| 4581 | + if (!pf->vsi[v]) |
---|
| 4582 | + continue; |
---|
| 4583 | + ice_vsi_free_q_vectors(pf->vsi[v]); |
---|
| 4584 | + } |
---|
| 4585 | + ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); |
---|
| 4586 | + ice_clear_interrupt_scheme(pf); |
---|
| 4587 | + |
---|
| 4588 | + pci_save_state(pdev); |
---|
| 4589 | + pci_wake_from_d3(pdev, pf->wol_ena); |
---|
| 4590 | + pci_set_power_state(pdev, PCI_D3hot); |
---|
| 4591 | + return 0; |
---|
| 4592 | +} |
---|
| 4593 | + |
---|
| 4594 | +/** |
---|
| 4595 | + * ice_resume - PM callback for waking up from D3 |
---|
| 4596 | + * @dev: generic device information structure |
---|
| 4597 | + */ |
---|
| 4598 | +static int __maybe_unused ice_resume(struct device *dev) |
---|
| 4599 | +{ |
---|
| 4600 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 4601 | + enum ice_reset_req reset_type; |
---|
| 4602 | + struct ice_pf *pf; |
---|
| 4603 | + struct ice_hw *hw; |
---|
| 4604 | + int ret; |
---|
| 4605 | + |
---|
| 4606 | + pci_set_power_state(pdev, PCI_D0); |
---|
| 4607 | + pci_restore_state(pdev); |
---|
| 4608 | + pci_save_state(pdev); |
---|
| 4609 | + |
---|
| 4610 | + if (!pci_device_is_present(pdev)) |
---|
| 4611 | + return -ENODEV; |
---|
| 4612 | + |
---|
| 4613 | + ret = pci_enable_device_mem(pdev); |
---|
| 4614 | + if (ret) { |
---|
| 4615 | + dev_err(dev, "Cannot enable device after suspend\n"); |
---|
| 4616 | + return ret; |
---|
| 4617 | + } |
---|
| 4618 | + |
---|
| 4619 | + pf = pci_get_drvdata(pdev); |
---|
| 4620 | + hw = &pf->hw; |
---|
| 4621 | + |
---|
| 4622 | + pf->wakeup_reason = rd32(hw, PFPM_WUS); |
---|
| 4623 | + ice_print_wake_reason(pf); |
---|
| 4624 | + |
---|
| 4625 | + /* We cleared the interrupt scheme when we suspended, so we need to |
---|
| 4626 | + * restore it now to resume device functionality. |
---|
| 4627 | + */ |
---|
| 4628 | + ret = ice_reinit_interrupt_scheme(pf); |
---|
| 4629 | + if (ret) |
---|
| 4630 | + dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); |
---|
| 4631 | + |
---|
| 4632 | + clear_bit(__ICE_DOWN, pf->state); |
---|
| 4633 | + /* Now perform PF reset and rebuild */ |
---|
| 4634 | + reset_type = ICE_RESET_PFR; |
---|
| 4635 | + /* re-enable service task for reset, but allow reset to schedule it */ |
---|
| 4636 | + clear_bit(__ICE_SERVICE_DIS, pf->state); |
---|
| 4637 | + |
---|
| 4638 | + if (ice_schedule_reset(pf, reset_type)) |
---|
| 4639 | + dev_err(dev, "Reset during resume failed.\n"); |
---|
| 4640 | + |
---|
| 4641 | + clear_bit(__ICE_SUSPENDED, pf->state); |
---|
| 4642 | + ice_service_task_restart(pf); |
---|
| 4643 | + |
---|
| 4644 | + /* Restart the service task */ |
---|
| 4645 | + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
---|
| 4646 | + |
---|
| 4647 | + return 0; |
---|
| 4648 | +} |
---|
| 4649 | +#endif /* CONFIG_PM */ |
---|
| 4650 | + |
---|
| 4651 | +/** |
---|
| 4652 | + * ice_pci_err_detected - warning that PCI error has been detected |
---|
| 4653 | + * @pdev: PCI device information struct |
---|
| 4654 | + * @err: the type of PCI error |
---|
| 4655 | + * |
---|
| 4656 | + * Called to warn that something happened on the PCI bus and the error handling |
---|
| 4657 | + * is in progress. Allows the driver to gracefully prepare/handle PCI errors. |
---|
| 4658 | + */ |
---|
| 4659 | +static pci_ers_result_t |
---|
| 4660 | +ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) |
---|
| 4661 | +{ |
---|
| 4662 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
---|
| 4663 | + |
---|
| 4664 | + if (!pf) { |
---|
| 4665 | + dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", |
---|
| 4666 | + __func__, err); |
---|
| 4667 | + return PCI_ERS_RESULT_DISCONNECT; |
---|
| 4668 | + } |
---|
| 4669 | + |
---|
| 4670 | + if (!test_bit(__ICE_SUSPENDED, pf->state)) { |
---|
| 4671 | + ice_service_task_stop(pf); |
---|
| 4672 | + |
---|
| 4673 | + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { |
---|
| 4674 | + set_bit(__ICE_PFR_REQ, pf->state); |
---|
| 4675 | + ice_prepare_for_reset(pf); |
---|
| 4676 | + } |
---|
| 4677 | + } |
---|
| 4678 | + |
---|
| 4679 | + return PCI_ERS_RESULT_NEED_RESET; |
---|
| 4680 | +} |
---|
| 4681 | + |
---|
| 4682 | +/** |
---|
| 4683 | + * ice_pci_err_slot_reset - a PCI slot reset has just happened |
---|
| 4684 | + * @pdev: PCI device information struct |
---|
| 4685 | + * |
---|
| 4686 | + * Called to determine if the driver can recover from the PCI slot reset by |
---|
| 4687 | + * using a register read to determine if the device is recoverable. |
---|
| 4688 | + */ |
---|
| 4689 | +static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) |
---|
| 4690 | +{ |
---|
| 4691 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
---|
| 4692 | + pci_ers_result_t result; |
---|
| 4693 | + int err; |
---|
| 4694 | + u32 reg; |
---|
| 4695 | + |
---|
| 4696 | + err = pci_enable_device_mem(pdev); |
---|
| 4697 | + if (err) { |
---|
| 4698 | + dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", |
---|
| 4699 | + err); |
---|
| 4700 | + result = PCI_ERS_RESULT_DISCONNECT; |
---|
| 4701 | + } else { |
---|
| 4702 | + pci_set_master(pdev); |
---|
| 4703 | + pci_restore_state(pdev); |
---|
| 4704 | + pci_save_state(pdev); |
---|
| 4705 | + pci_wake_from_d3(pdev, false); |
---|
| 4706 | + |
---|
| 4707 | + /* Check for life */ |
---|
| 4708 | + reg = rd32(&pf->hw, GLGEN_RTRIG); |
---|
| 4709 | + if (!reg) |
---|
| 4710 | + result = PCI_ERS_RESULT_RECOVERED; |
---|
| 4711 | + else |
---|
| 4712 | + result = PCI_ERS_RESULT_DISCONNECT; |
---|
| 4713 | + } |
---|
| 4714 | + |
---|
| 4715 | + err = pci_aer_clear_nonfatal_status(pdev); |
---|
| 4716 | + if (err) |
---|
| 4717 | + dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", |
---|
| 4718 | + err); |
---|
| 4719 | + /* non-fatal, continue */ |
---|
| 4720 | + |
---|
| 4721 | + return result; |
---|
| 4722 | +} |
---|
| 4723 | + |
---|
| 4724 | +/** |
---|
| 4725 | + * ice_pci_err_resume - restart operations after PCI error recovery |
---|
| 4726 | + * @pdev: PCI device information struct |
---|
| 4727 | + * |
---|
| 4728 | + * Called to allow the driver to bring things back up after PCI error and/or |
---|
| 4729 | + * reset recovery have finished |
---|
| 4730 | + */ |
---|
| 4731 | +static void ice_pci_err_resume(struct pci_dev *pdev) |
---|
| 4732 | +{ |
---|
| 4733 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
---|
| 4734 | + |
---|
| 4735 | + if (!pf) { |
---|
| 4736 | + dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", |
---|
| 4737 | + __func__); |
---|
| 4738 | + return; |
---|
| 4739 | + } |
---|
| 4740 | + |
---|
| 4741 | + if (test_bit(__ICE_SUSPENDED, pf->state)) { |
---|
| 4742 | + dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", |
---|
| 4743 | + __func__); |
---|
| 4744 | + return; |
---|
| 4745 | + } |
---|
| 4746 | + |
---|
| 4747 | + ice_restore_all_vfs_msi_state(pdev); |
---|
| 4748 | + |
---|
| 4749 | + ice_do_reset(pf, ICE_RESET_PFR); |
---|
| 4750 | + ice_service_task_restart(pf); |
---|
| 4751 | + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
---|
| 4752 | +} |
---|
| 4753 | + |
---|
| 4754 | +/** |
---|
| 4755 | + * ice_pci_err_reset_prepare - prepare device driver for PCI reset |
---|
| 4756 | + * @pdev: PCI device information struct |
---|
| 4757 | + */ |
---|
| 4758 | +static void ice_pci_err_reset_prepare(struct pci_dev *pdev) |
---|
| 4759 | +{ |
---|
| 4760 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
---|
| 4761 | + |
---|
| 4762 | + if (!test_bit(__ICE_SUSPENDED, pf->state)) { |
---|
| 4763 | + ice_service_task_stop(pf); |
---|
| 4764 | + |
---|
| 4765 | + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { |
---|
| 4766 | + set_bit(__ICE_PFR_REQ, pf->state); |
---|
| 4767 | + ice_prepare_for_reset(pf); |
---|
| 4768 | + } |
---|
| 4769 | + } |
---|
| 4770 | +} |
---|
| 4771 | + |
---|
| 4772 | +/** |
---|
| 4773 | + * ice_pci_err_reset_done - PCI reset done, device driver reset can begin |
---|
| 4774 | + * @pdev: PCI device information struct |
---|
| 4775 | + */ |
---|
| 4776 | +static void ice_pci_err_reset_done(struct pci_dev *pdev) |
---|
| 4777 | +{ |
---|
| 4778 | + ice_pci_err_resume(pdev); |
---|
3482 | 4779 | } |
---|
3483 | 4780 | |
---|
3484 | 4781 | /* ice_pci_tbl - PCI Device ID Table |
---|
.. | .. |
---|
3490 | 4787 | * Class, Class Mask, private data (not used) } |
---|
3491 | 4788 | */ |
---|
3492 | 4789 | static const struct pci_device_id ice_pci_tbl[] = { |
---|
3493 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 }, |
---|
3494 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 }, |
---|
3495 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 }, |
---|
3496 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 }, |
---|
3497 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 }, |
---|
| 4790 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, |
---|
| 4791 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, |
---|
| 4792 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, |
---|
| 4793 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, |
---|
| 4794 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, |
---|
| 4795 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, |
---|
| 4796 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, |
---|
| 4797 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, |
---|
| 4798 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, |
---|
| 4799 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, |
---|
| 4800 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, |
---|
| 4801 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, |
---|
| 4802 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, |
---|
| 4803 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, |
---|
| 4804 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, |
---|
| 4805 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, |
---|
| 4806 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, |
---|
| 4807 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, |
---|
| 4808 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, |
---|
| 4809 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, |
---|
| 4810 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, |
---|
| 4811 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, |
---|
| 4812 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, |
---|
| 4813 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, |
---|
| 4814 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, |
---|
3498 | 4815 | /* required last entry */ |
---|
3499 | 4816 | { 0, } |
---|
3500 | 4817 | }; |
---|
3501 | 4818 | MODULE_DEVICE_TABLE(pci, ice_pci_tbl); |
---|
| 4819 | + |
---|
| 4820 | +static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); |
---|
| 4821 | + |
---|
| 4822 | +static const struct pci_error_handlers ice_pci_err_handler = { |
---|
| 4823 | + .error_detected = ice_pci_err_detected, |
---|
| 4824 | + .slot_reset = ice_pci_err_slot_reset, |
---|
| 4825 | + .reset_prepare = ice_pci_err_reset_prepare, |
---|
| 4826 | + .reset_done = ice_pci_err_reset_done, |
---|
| 4827 | + .resume = ice_pci_err_resume |
---|
| 4828 | +}; |
---|
3502 | 4829 | |
---|
3503 | 4830 | static struct pci_driver ice_driver = { |
---|
3504 | 4831 | .name = KBUILD_MODNAME, |
---|
3505 | 4832 | .id_table = ice_pci_tbl, |
---|
3506 | 4833 | .probe = ice_probe, |
---|
3507 | 4834 | .remove = ice_remove, |
---|
| 4835 | +#ifdef CONFIG_PM |
---|
| 4836 | + .driver.pm = &ice_pm_ops, |
---|
| 4837 | +#endif /* CONFIG_PM */ |
---|
| 4838 | + .shutdown = ice_shutdown, |
---|
| 4839 | + .sriov_configure = ice_sriov_configure, |
---|
| 4840 | + .err_handler = &ice_pci_err_handler |
---|
3508 | 4841 | }; |
---|
3509 | 4842 | |
---|
3510 | 4843 | /** |
---|
.. | .. |
---|
3517 | 4850 | { |
---|
3518 | 4851 | int status; |
---|
3519 | 4852 | |
---|
3520 | | - pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); |
---|
| 4853 | + pr_info("%s\n", ice_driver_string); |
---|
3521 | 4854 | pr_info("%s\n", ice_copyright); |
---|
3522 | 4855 | |
---|
3523 | | - ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); |
---|
| 4856 | + ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); |
---|
3524 | 4857 | if (!ice_wq) { |
---|
3525 | 4858 | pr_err("Failed to create workqueue\n"); |
---|
3526 | 4859 | return -ENOMEM; |
---|
.. | .. |
---|
3528 | 4861 | |
---|
3529 | 4862 | status = pci_register_driver(&ice_driver); |
---|
3530 | 4863 | if (status) { |
---|
3531 | | - pr_err("failed to register pci driver, err %d\n", status); |
---|
| 4864 | + pr_err("failed to register PCI driver, err %d\n", status); |
---|
3532 | 4865 | destroy_workqueue(ice_wq); |
---|
3533 | 4866 | } |
---|
3534 | 4867 | |
---|
.. | .. |
---|
3551 | 4884 | module_exit(ice_module_exit); |
---|
3552 | 4885 | |
---|
3553 | 4886 | /** |
---|
3554 | | - * ice_set_mac_address - NDO callback to set mac address |
---|
| 4887 | + * ice_set_mac_address - NDO callback to set MAC address |
---|
3555 | 4888 | * @netdev: network interface device structure |
---|
3556 | 4889 | * @pi: pointer to an address structure |
---|
3557 | 4890 | * |
---|
.. | .. |
---|
3565 | 4898 | struct ice_hw *hw = &pf->hw; |
---|
3566 | 4899 | struct sockaddr *addr = pi; |
---|
3567 | 4900 | enum ice_status status; |
---|
3568 | | - LIST_HEAD(a_mac_list); |
---|
3569 | | - LIST_HEAD(r_mac_list); |
---|
| 4901 | + u8 old_mac[ETH_ALEN]; |
---|
3570 | 4902 | u8 flags = 0; |
---|
3571 | | - int err; |
---|
| 4903 | + int err = 0; |
---|
3572 | 4904 | u8 *mac; |
---|
3573 | 4905 | |
---|
3574 | 4906 | mac = (u8 *)addr->sa_data; |
---|
.. | .. |
---|
3577 | 4909 | return -EADDRNOTAVAIL; |
---|
3578 | 4910 | |
---|
3579 | 4911 | if (ether_addr_equal(netdev->dev_addr, mac)) { |
---|
3580 | | - netdev_warn(netdev, "already using mac %pM\n", mac); |
---|
| 4912 | + netdev_dbg(netdev, "already using mac %pM\n", mac); |
---|
3581 | 4913 | return 0; |
---|
3582 | 4914 | } |
---|
3583 | 4915 | |
---|
3584 | 4916 | if (test_bit(__ICE_DOWN, pf->state) || |
---|
3585 | | - ice_is_reset_recovery_pending(pf->state)) { |
---|
| 4917 | + ice_is_reset_in_progress(pf->state)) { |
---|
3586 | 4918 | netdev_err(netdev, "can't set mac %pM. device not ready\n", |
---|
3587 | 4919 | mac); |
---|
3588 | 4920 | return -EBUSY; |
---|
3589 | 4921 | } |
---|
3590 | 4922 | |
---|
3591 | | - /* When we change the mac address we also have to change the mac address |
---|
3592 | | - * based filter rules that were created previously for the old mac |
---|
3593 | | - * address. So first, we remove the old filter rule using ice_remove_mac |
---|
3594 | | - * and then create a new filter rule using ice_add_mac. Note that for |
---|
3595 | | - * both these operations, we first need to form a "list" of mac |
---|
3596 | | - * addresses (even though in this case, we have only 1 mac address to be |
---|
3597 | | - * added/removed) and this done using ice_add_mac_to_list. Depending on |
---|
3598 | | - * the ensuing operation this "list" of mac addresses is either to be |
---|
3599 | | - * added or removed from the filter. |
---|
3600 | | - */ |
---|
3601 | | - err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); |
---|
| 4923 | + netif_addr_lock_bh(netdev); |
---|
| 4924 | + ether_addr_copy(old_mac, netdev->dev_addr); |
---|
| 4925 | + /* change the netdev's MAC address */ |
---|
| 4926 | + memcpy(netdev->dev_addr, mac, netdev->addr_len); |
---|
| 4927 | + netif_addr_unlock_bh(netdev); |
---|
| 4928 | + |
---|
| 4929 | + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ |
---|
| 4930 | + status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); |
---|
| 4931 | + if (status && status != ICE_ERR_DOES_NOT_EXIST) { |
---|
| 4932 | + err = -EADDRNOTAVAIL; |
---|
| 4933 | + goto err_update_filters; |
---|
| 4934 | + } |
---|
| 4935 | + |
---|
| 4936 | + /* Add filter for new MAC. If filter exists, return success */ |
---|
| 4937 | + status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); |
---|
| 4938 | + if (status == ICE_ERR_ALREADY_EXISTS) |
---|
| 4939 | + /* Although this MAC filter is already present in hardware it's |
---|
| 4940 | + * possible in some cases (e.g. bonding) that dev_addr was |
---|
| 4941 | + * modified outside of the driver and needs to be restored back |
---|
| 4942 | + * to this value. |
---|
| 4943 | + */ |
---|
| 4944 | + netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); |
---|
| 4945 | + else if (status) |
---|
| 4946 | + /* error if the new filter addition failed */ |
---|
| 4947 | + err = -EADDRNOTAVAIL; |
---|
| 4948 | + |
---|
| 4949 | +err_update_filters: |
---|
3602 | 4950 | if (err) { |
---|
3603 | | - err = -EADDRNOTAVAIL; |
---|
3604 | | - goto free_lists; |
---|
3605 | | - } |
---|
3606 | | - |
---|
3607 | | - status = ice_remove_mac(hw, &r_mac_list); |
---|
3608 | | - if (status) { |
---|
3609 | | - err = -EADDRNOTAVAIL; |
---|
3610 | | - goto free_lists; |
---|
3611 | | - } |
---|
3612 | | - |
---|
3613 | | - err = ice_add_mac_to_list(vsi, &a_mac_list, mac); |
---|
3614 | | - if (err) { |
---|
3615 | | - err = -EADDRNOTAVAIL; |
---|
3616 | | - goto free_lists; |
---|
3617 | | - } |
---|
3618 | | - |
---|
3619 | | - status = ice_add_mac(hw, &a_mac_list); |
---|
3620 | | - if (status) { |
---|
3621 | | - err = -EADDRNOTAVAIL; |
---|
3622 | | - goto free_lists; |
---|
3623 | | - } |
---|
3624 | | - |
---|
3625 | | -free_lists: |
---|
3626 | | - /* free list entries */ |
---|
3627 | | - ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); |
---|
3628 | | - ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); |
---|
3629 | | - |
---|
3630 | | - if (err) { |
---|
3631 | | - netdev_err(netdev, "can't set mac %pM. filter update failed\n", |
---|
| 4951 | + netdev_err(netdev, "can't set MAC %pM. filter update failed\n", |
---|
3632 | 4952 | mac); |
---|
| 4953 | + netif_addr_lock_bh(netdev); |
---|
| 4954 | + ether_addr_copy(netdev->dev_addr, old_mac); |
---|
| 4955 | + netif_addr_unlock_bh(netdev); |
---|
3633 | 4956 | return err; |
---|
3634 | 4957 | } |
---|
3635 | 4958 | |
---|
3636 | | - /* change the netdev's mac address */ |
---|
3637 | | - memcpy(netdev->dev_addr, mac, netdev->addr_len); |
---|
3638 | | - netdev_dbg(vsi->netdev, "updated mac address to %pM\n", |
---|
| 4959 | + netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", |
---|
3639 | 4960 | netdev->dev_addr); |
---|
3640 | 4961 | |
---|
3641 | | - /* write new mac address to the firmware */ |
---|
| 4962 | + /* write new MAC address to the firmware */ |
---|
3642 | 4963 | flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; |
---|
3643 | 4964 | status = ice_aq_manage_mac_write(hw, mac, flags, NULL); |
---|
3644 | 4965 | if (status) { |
---|
3645 | | - netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", |
---|
3646 | | - mac); |
---|
| 4966 | + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", |
---|
| 4967 | + mac, ice_stat_str(status)); |
---|
3647 | 4968 | } |
---|
3648 | 4969 | return 0; |
---|
3649 | 4970 | } |
---|
.. | .. |
---|
3675 | 4996 | } |
---|
3676 | 4997 | |
---|
3677 | 4998 | /** |
---|
| 4999 | + * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate |
---|
| 5000 | + * @netdev: network interface device structure |
---|
| 5001 | + * @queue_index: Queue ID |
---|
| 5002 | + * @maxrate: maximum bandwidth in Mbps |
---|
| 5003 | + */ |
---|
| 5004 | +static int |
---|
| 5005 | +ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) |
---|
| 5006 | +{ |
---|
| 5007 | + struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
| 5008 | + struct ice_vsi *vsi = np->vsi; |
---|
| 5009 | + enum ice_status status; |
---|
| 5010 | + u16 q_handle; |
---|
| 5011 | + u8 tc; |
---|
| 5012 | + |
---|
| 5013 | + /* Validate maxrate requested is within permitted range */ |
---|
| 5014 | + if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { |
---|
| 5015 | + netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", |
---|
| 5016 | + maxrate, queue_index); |
---|
| 5017 | + return -EINVAL; |
---|
| 5018 | + } |
---|
| 5019 | + |
---|
| 5020 | + q_handle = vsi->tx_rings[queue_index]->q_handle; |
---|
| 5021 | + tc = ice_dcb_get_tc(vsi, queue_index); |
---|
| 5022 | + |
---|
| 5023 | + /* Set BW back to default, when user set maxrate to 0 */ |
---|
| 5024 | + if (!maxrate) |
---|
| 5025 | + status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, |
---|
| 5026 | + q_handle, ICE_MAX_BW); |
---|
| 5027 | + else |
---|
| 5028 | + status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, |
---|
| 5029 | + q_handle, ICE_MAX_BW, maxrate * 1000); |
---|
| 5030 | + if (status) { |
---|
| 5031 | + netdev_err(netdev, "Unable to set Tx max rate, error %s\n", |
---|
| 5032 | + ice_stat_str(status)); |
---|
| 5033 | + return -EIO; |
---|
| 5034 | + } |
---|
| 5035 | + |
---|
| 5036 | + return 0; |
---|
| 5037 | +} |
---|
| 5038 | + |
---|
| 5039 | +/** |
---|
3678 | 5040 | * ice_fdb_add - add an entry to the hardware database |
---|
3679 | 5041 | * @ndm: the input from the stack |
---|
3680 | 5042 | * @tb: pointer to array of nladdr (unused) |
---|
3681 | 5043 | * @dev: the net device pointer |
---|
3682 | 5044 | * @addr: the MAC address entry being added |
---|
3683 | | - * @vid: VLAN id |
---|
| 5045 | + * @vid: VLAN ID |
---|
3684 | 5046 | * @flags: instructions from stack about fdb operation |
---|
| 5047 | + * @extack: netlink extended ack |
---|
3685 | 5048 | */ |
---|
3686 | | -static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], |
---|
3687 | | - struct net_device *dev, const unsigned char *addr, |
---|
3688 | | - u16 vid, u16 flags) |
---|
| 5049 | +static int |
---|
| 5050 | +ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], |
---|
| 5051 | + struct net_device *dev, const unsigned char *addr, u16 vid, |
---|
| 5052 | + u16 flags, struct netlink_ext_ack __always_unused *extack) |
---|
3689 | 5053 | { |
---|
3690 | 5054 | int err; |
---|
3691 | 5055 | |
---|
.. | .. |
---|
3718 | 5082 | * @tb: pointer to array of nladdr (unused) |
---|
3719 | 5083 | * @dev: the net device pointer |
---|
3720 | 5084 | * @addr: the MAC address entry being added |
---|
3721 | | - * @vid: VLAN id |
---|
| 5085 | + * @vid: VLAN ID |
---|
3722 | 5086 | */ |
---|
3723 | | -static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], |
---|
3724 | | - struct net_device *dev, const unsigned char *addr, |
---|
3725 | | - __always_unused u16 vid) |
---|
| 5087 | +static int |
---|
| 5088 | +ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], |
---|
| 5089 | + struct net_device *dev, const unsigned char *addr, |
---|
| 5090 | + __always_unused u16 vid) |
---|
3726 | 5091 | { |
---|
3727 | 5092 | int err; |
---|
3728 | 5093 | |
---|
.. | .. |
---|
3742 | 5107 | } |
---|
3743 | 5108 | |
---|
3744 | 5109 | /** |
---|
3745 | | - * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx |
---|
3746 | | - * @vsi: the vsi being changed |
---|
3747 | | - */ |
---|
3748 | | -static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) |
---|
3749 | | -{ |
---|
3750 | | - struct device *dev = &vsi->back->pdev->dev; |
---|
3751 | | - struct ice_hw *hw = &vsi->back->hw; |
---|
3752 | | - struct ice_vsi_ctx ctxt = { 0 }; |
---|
3753 | | - enum ice_status status; |
---|
3754 | | - |
---|
3755 | | - /* Here we are configuring the VSI to let the driver add VLAN tags by |
---|
3756 | | - * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag |
---|
3757 | | - * insertion happens in the Tx hot path, in ice_tx_map. |
---|
3758 | | - */ |
---|
3759 | | - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; |
---|
3760 | | - |
---|
3761 | | - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
---|
3762 | | - ctxt.vsi_num = vsi->vsi_num; |
---|
3763 | | - |
---|
3764 | | - status = ice_aq_update_vsi(hw, &ctxt, NULL); |
---|
3765 | | - if (status) { |
---|
3766 | | - dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", |
---|
3767 | | - status, hw->adminq.sq_last_status); |
---|
3768 | | - return -EIO; |
---|
3769 | | - } |
---|
3770 | | - |
---|
3771 | | - vsi->info.vlan_flags = ctxt.info.vlan_flags; |
---|
3772 | | - return 0; |
---|
3773 | | -} |
---|
3774 | | - |
---|
3775 | | -/** |
---|
3776 | | - * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx |
---|
3777 | | - * @vsi: the vsi being changed |
---|
3778 | | - * @ena: boolean value indicating if this is a enable or disable request |
---|
3779 | | - */ |
---|
3780 | | -static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) |
---|
3781 | | -{ |
---|
3782 | | - struct device *dev = &vsi->back->pdev->dev; |
---|
3783 | | - struct ice_hw *hw = &vsi->back->hw; |
---|
3784 | | - struct ice_vsi_ctx ctxt = { 0 }; |
---|
3785 | | - enum ice_status status; |
---|
3786 | | - |
---|
3787 | | - /* Here we are configuring what the VSI should do with the VLAN tag in |
---|
3788 | | - * the Rx packet. We can either leave the tag in the packet or put it in |
---|
3789 | | - * the Rx descriptor. |
---|
3790 | | - */ |
---|
3791 | | - if (ena) { |
---|
3792 | | - /* Strip VLAN tag from Rx packet and put it in the desc */ |
---|
3793 | | - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; |
---|
3794 | | - } else { |
---|
3795 | | - /* Disable stripping. Leave tag in packet */ |
---|
3796 | | - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
---|
3797 | | - } |
---|
3798 | | - |
---|
3799 | | - /* Allow all packets untagged/tagged */ |
---|
3800 | | - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; |
---|
3801 | | - |
---|
3802 | | - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
---|
3803 | | - ctxt.vsi_num = vsi->vsi_num; |
---|
3804 | | - |
---|
3805 | | - status = ice_aq_update_vsi(hw, &ctxt, NULL); |
---|
3806 | | - if (status) { |
---|
3807 | | - dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n", |
---|
3808 | | - ena, status, hw->adminq.sq_last_status); |
---|
3809 | | - return -EIO; |
---|
3810 | | - } |
---|
3811 | | - |
---|
3812 | | - vsi->info.vlan_flags = ctxt.info.vlan_flags; |
---|
3813 | | - return 0; |
---|
3814 | | -} |
---|
3815 | | - |
---|
3816 | | -/** |
---|
3817 | 5110 | * ice_set_features - set the netdev feature flags |
---|
3818 | 5111 | * @netdev: ptr to the netdev being adjusted |
---|
3819 | 5112 | * @features: the feature set that the stack is suggesting |
---|
3820 | 5113 | */ |
---|
3821 | | -static int ice_set_features(struct net_device *netdev, |
---|
3822 | | - netdev_features_t features) |
---|
| 5114 | +static int |
---|
| 5115 | +ice_set_features(struct net_device *netdev, netdev_features_t features) |
---|
3823 | 5116 | { |
---|
3824 | 5117 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
3825 | 5118 | struct ice_vsi *vsi = np->vsi; |
---|
| 5119 | + struct ice_pf *pf = vsi->back; |
---|
3826 | 5120 | int ret = 0; |
---|
| 5121 | + |
---|
| 5122 | + /* Don't set any netdev advanced features with device in Safe Mode */ |
---|
| 5123 | + if (ice_is_safe_mode(vsi->back)) { |
---|
| 5124 | + dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); |
---|
| 5125 | + return ret; |
---|
| 5126 | + } |
---|
| 5127 | + |
---|
| 5128 | + /* Do not change setting during reset */ |
---|
| 5129 | + if (ice_is_reset_in_progress(pf->state)) { |
---|
| 5130 | + dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); |
---|
| 5131 | + return -EBUSY; |
---|
| 5132 | + } |
---|
| 5133 | + |
---|
| 5134 | + /* Multiple features can be changed in one call so keep features in |
---|
| 5135 | + * separate if/else statements to guarantee each feature is checked |
---|
| 5136 | + */ |
---|
| 5137 | + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) |
---|
| 5138 | + ret = ice_vsi_manage_rss_lut(vsi, true); |
---|
| 5139 | + else if (!(features & NETIF_F_RXHASH) && |
---|
| 5140 | + netdev->features & NETIF_F_RXHASH) |
---|
| 5141 | + ret = ice_vsi_manage_rss_lut(vsi, false); |
---|
3827 | 5142 | |
---|
3828 | 5143 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && |
---|
3829 | 5144 | !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
---|
.. | .. |
---|
3831 | 5146 | else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && |
---|
3832 | 5147 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
---|
3833 | 5148 | ret = ice_vsi_manage_vlan_stripping(vsi, false); |
---|
3834 | | - else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && |
---|
3835 | | - !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) |
---|
| 5149 | + |
---|
| 5150 | + if ((features & NETIF_F_HW_VLAN_CTAG_TX) && |
---|
| 5151 | + !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) |
---|
3836 | 5152 | ret = ice_vsi_manage_vlan_insertion(vsi); |
---|
3837 | 5153 | else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && |
---|
3838 | 5154 | (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) |
---|
3839 | 5155 | ret = ice_vsi_manage_vlan_insertion(vsi); |
---|
3840 | 5156 | |
---|
| 5157 | + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
---|
| 5158 | + !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
---|
| 5159 | + ret = ice_cfg_vlan_pruning(vsi, true, false); |
---|
| 5160 | + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
---|
| 5161 | + (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
---|
| 5162 | + ret = ice_cfg_vlan_pruning(vsi, false, false); |
---|
| 5163 | + |
---|
| 5164 | + if ((features & NETIF_F_NTUPLE) && |
---|
| 5165 | + !(netdev->features & NETIF_F_NTUPLE)) { |
---|
| 5166 | + ice_vsi_manage_fdir(vsi, true); |
---|
| 5167 | + ice_init_arfs(vsi); |
---|
| 5168 | + } else if (!(features & NETIF_F_NTUPLE) && |
---|
| 5169 | + (netdev->features & NETIF_F_NTUPLE)) { |
---|
| 5170 | + ice_vsi_manage_fdir(vsi, false); |
---|
| 5171 | + ice_clear_arfs(vsi); |
---|
| 5172 | + } |
---|
| 5173 | + |
---|
3841 | 5174 | return ret; |
---|
3842 | 5175 | } |
---|
3843 | 5176 | |
---|
3844 | 5177 | /** |
---|
3845 | | - * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI |
---|
3846 | | - * @vsi: VSI to setup vlan properties for |
---|
| 5178 | + * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI |
---|
| 5179 | + * @vsi: VSI to setup VLAN properties for |
---|
3847 | 5180 | */ |
---|
3848 | 5181 | static int ice_vsi_vlan_setup(struct ice_vsi *vsi) |
---|
3849 | 5182 | { |
---|
.. | .. |
---|
3858 | 5191 | } |
---|
3859 | 5192 | |
---|
3860 | 5193 | /** |
---|
3861 | | - * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up |
---|
3862 | | - * @vsi: the VSI being brought back up |
---|
3863 | | - */ |
---|
3864 | | -static int ice_restore_vlan(struct ice_vsi *vsi) |
---|
3865 | | -{ |
---|
3866 | | - int err; |
---|
3867 | | - u16 vid; |
---|
3868 | | - |
---|
3869 | | - if (!vsi->netdev) |
---|
3870 | | - return -EINVAL; |
---|
3871 | | - |
---|
3872 | | - err = ice_vsi_vlan_setup(vsi); |
---|
3873 | | - if (err) |
---|
3874 | | - return err; |
---|
3875 | | - |
---|
3876 | | - for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { |
---|
3877 | | - err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); |
---|
3878 | | - if (err) |
---|
3879 | | - break; |
---|
3880 | | - } |
---|
3881 | | - |
---|
3882 | | - return err; |
---|
3883 | | -} |
---|
3884 | | - |
---|
3885 | | -/** |
---|
3886 | | - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance |
---|
3887 | | - * @ring: The Tx ring to configure |
---|
3888 | | - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized |
---|
3889 | | - * @pf_q: queue index in the PF space |
---|
3890 | | - * |
---|
3891 | | - * Configure the Tx descriptor ring in TLAN context. |
---|
3892 | | - */ |
---|
3893 | | -static void |
---|
3894 | | -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) |
---|
3895 | | -{ |
---|
3896 | | - struct ice_vsi *vsi = ring->vsi; |
---|
3897 | | - struct ice_hw *hw = &vsi->back->hw; |
---|
3898 | | - |
---|
3899 | | - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; |
---|
3900 | | - |
---|
3901 | | - tlan_ctx->port_num = vsi->port_info->lport; |
---|
3902 | | - |
---|
3903 | | - /* Transmit Queue Length */ |
---|
3904 | | - tlan_ctx->qlen = ring->count; |
---|
3905 | | - |
---|
3906 | | - /* PF number */ |
---|
3907 | | - tlan_ctx->pf_num = hw->pf_id; |
---|
3908 | | - |
---|
3909 | | - /* queue belongs to a specific VSI type |
---|
3910 | | - * VF / VM index should be programmed per vmvf_type setting: |
---|
3911 | | - * for vmvf_type = VF, it is VF number between 0-256 |
---|
3912 | | - * for vmvf_type = VM, it is VM number between 0-767 |
---|
3913 | | - * for PF or EMP this field should be set to zero |
---|
3914 | | - */ |
---|
3915 | | - switch (vsi->type) { |
---|
3916 | | - case ICE_VSI_PF: |
---|
3917 | | - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; |
---|
3918 | | - break; |
---|
3919 | | - default: |
---|
3920 | | - return; |
---|
3921 | | - } |
---|
3922 | | - |
---|
3923 | | - /* make sure the context is associated with the right VSI */ |
---|
3924 | | - tlan_ctx->src_vsi = vsi->vsi_num; |
---|
3925 | | - |
---|
3926 | | - tlan_ctx->tso_ena = ICE_TX_LEGACY; |
---|
3927 | | - tlan_ctx->tso_qnum = pf_q; |
---|
3928 | | - |
---|
3929 | | - /* Legacy or Advanced Host Interface: |
---|
3930 | | - * 0: Advanced Host Interface |
---|
3931 | | - * 1: Legacy Host Interface |
---|
3932 | | - */ |
---|
3933 | | - tlan_ctx->legacy_int = ICE_TX_LEGACY; |
---|
3934 | | -} |
---|
3935 | | - |
---|
3936 | | -/** |
---|
3937 | | - * ice_vsi_cfg_txqs - Configure the VSI for Tx |
---|
3938 | | - * @vsi: the VSI being configured |
---|
3939 | | - * |
---|
3940 | | - * Return 0 on success and a negative value on error |
---|
3941 | | - * Configure the Tx VSI for operation. |
---|
3942 | | - */ |
---|
3943 | | -static int ice_vsi_cfg_txqs(struct ice_vsi *vsi) |
---|
3944 | | -{ |
---|
3945 | | - struct ice_aqc_add_tx_qgrp *qg_buf; |
---|
3946 | | - struct ice_aqc_add_txqs_perq *txq; |
---|
3947 | | - struct ice_pf *pf = vsi->back; |
---|
3948 | | - enum ice_status status; |
---|
3949 | | - u16 buf_len, i, pf_q; |
---|
3950 | | - int err = 0, tc = 0; |
---|
3951 | | - u8 num_q_grps; |
---|
3952 | | - |
---|
3953 | | - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); |
---|
3954 | | - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); |
---|
3955 | | - if (!qg_buf) |
---|
3956 | | - return -ENOMEM; |
---|
3957 | | - |
---|
3958 | | - if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { |
---|
3959 | | - err = -EINVAL; |
---|
3960 | | - goto err_cfg_txqs; |
---|
3961 | | - } |
---|
3962 | | - qg_buf->num_txqs = 1; |
---|
3963 | | - num_q_grps = 1; |
---|
3964 | | - |
---|
3965 | | - /* set up and configure the tx queues */ |
---|
3966 | | - ice_for_each_txq(vsi, i) { |
---|
3967 | | - struct ice_tlan_ctx tlan_ctx = { 0 }; |
---|
3968 | | - |
---|
3969 | | - pf_q = vsi->txq_map[i]; |
---|
3970 | | - ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); |
---|
3971 | | - /* copy context contents into the qg_buf */ |
---|
3972 | | - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); |
---|
3973 | | - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, |
---|
3974 | | - ice_tlan_ctx_info); |
---|
3975 | | - |
---|
3976 | | - /* init queue specific tail reg. It is referred as transmit |
---|
3977 | | - * comm scheduler queue doorbell. |
---|
3978 | | - */ |
---|
3979 | | - vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); |
---|
3980 | | - status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, |
---|
3981 | | - num_q_grps, qg_buf, buf_len, NULL); |
---|
3982 | | - if (status) { |
---|
3983 | | - dev_err(&vsi->back->pdev->dev, |
---|
3984 | | - "Failed to set LAN Tx queue context, error: %d\n", |
---|
3985 | | - status); |
---|
3986 | | - err = -ENODEV; |
---|
3987 | | - goto err_cfg_txqs; |
---|
3988 | | - } |
---|
3989 | | - |
---|
3990 | | - /* Add Tx Queue TEID into the VSI tx ring from the response |
---|
3991 | | - * This will complete configuring and enabling the queue. |
---|
3992 | | - */ |
---|
3993 | | - txq = &qg_buf->txqs[0]; |
---|
3994 | | - if (pf_q == le16_to_cpu(txq->txq_id)) |
---|
3995 | | - vsi->tx_rings[i]->txq_teid = |
---|
3996 | | - le32_to_cpu(txq->q_teid); |
---|
3997 | | - } |
---|
3998 | | -err_cfg_txqs: |
---|
3999 | | - devm_kfree(&pf->pdev->dev, qg_buf); |
---|
4000 | | - return err; |
---|
4001 | | -} |
---|
4002 | | - |
---|
4003 | | -/** |
---|
4004 | | - * ice_setup_rx_ctx - Configure a receive ring context |
---|
4005 | | - * @ring: The Rx ring to configure |
---|
4006 | | - * |
---|
4007 | | - * Configure the Rx descriptor ring in RLAN context. |
---|
4008 | | - */ |
---|
4009 | | -static int ice_setup_rx_ctx(struct ice_ring *ring) |
---|
4010 | | -{ |
---|
4011 | | - struct ice_vsi *vsi = ring->vsi; |
---|
4012 | | - struct ice_hw *hw = &vsi->back->hw; |
---|
4013 | | - u32 rxdid = ICE_RXDID_FLEX_NIC; |
---|
4014 | | - struct ice_rlan_ctx rlan_ctx; |
---|
4015 | | - u32 regval; |
---|
4016 | | - u16 pf_q; |
---|
4017 | | - int err; |
---|
4018 | | - |
---|
4019 | | - /* what is RX queue number in global space of 2K rx queues */ |
---|
4020 | | - pf_q = vsi->rxq_map[ring->q_index]; |
---|
4021 | | - |
---|
4022 | | - /* clear the context structure first */ |
---|
4023 | | - memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
---|
4024 | | - |
---|
4025 | | - rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
---|
4026 | | - |
---|
4027 | | - rlan_ctx.qlen = ring->count; |
---|
4028 | | - |
---|
4029 | | - /* Receive Packet Data Buffer Size. |
---|
4030 | | - * The Packet Data Buffer Size is defined in 128 byte units. |
---|
4031 | | - */ |
---|
4032 | | - rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; |
---|
4033 | | - |
---|
4034 | | - /* use 32 byte descriptors */ |
---|
4035 | | - rlan_ctx.dsize = 1; |
---|
4036 | | - |
---|
4037 | | - /* Strip the Ethernet CRC bytes before the packet is posted to host |
---|
4038 | | - * memory. |
---|
4039 | | - */ |
---|
4040 | | - rlan_ctx.crcstrip = 1; |
---|
4041 | | - |
---|
4042 | | - /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ |
---|
4043 | | - rlan_ctx.l2tsel = 1; |
---|
4044 | | - |
---|
4045 | | - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; |
---|
4046 | | - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; |
---|
4047 | | - rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; |
---|
4048 | | - |
---|
4049 | | - /* This controls whether VLAN is stripped from inner headers |
---|
4050 | | - * The VLAN in the inner L2 header is stripped to the receive |
---|
4051 | | - * descriptor if enabled by this flag. |
---|
4052 | | - */ |
---|
4053 | | - rlan_ctx.showiv = 0; |
---|
4054 | | - |
---|
4055 | | - /* Max packet size for this queue - must not be set to a larger value |
---|
4056 | | - * than 5 x DBUF |
---|
4057 | | - */ |
---|
4058 | | - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, |
---|
4059 | | - ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); |
---|
4060 | | - |
---|
4061 | | - /* Rx queue threshold in units of 64 */ |
---|
4062 | | - rlan_ctx.lrxqthresh = 1; |
---|
4063 | | - |
---|
4064 | | - /* Enable Flexible Descriptors in the queue context which |
---|
4065 | | - * allows this driver to select a specific receive descriptor format |
---|
4066 | | - */ |
---|
4067 | | - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); |
---|
4068 | | - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & |
---|
4069 | | - QRXFLXP_CNTXT_RXDID_IDX_M; |
---|
4070 | | - |
---|
4071 | | - /* increasing context priority to pick up profile id; |
---|
4072 | | - * default is 0x01; setting to 0x03 to ensure profile |
---|
4073 | | - * is programming if prev context is of same priority |
---|
4074 | | - */ |
---|
4075 | | - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & |
---|
4076 | | - QRXFLXP_CNTXT_RXDID_PRIO_M; |
---|
4077 | | - |
---|
4078 | | - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); |
---|
4079 | | - |
---|
4080 | | - /* Absolute queue number out of 2K needs to be passed */ |
---|
4081 | | - err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); |
---|
4082 | | - if (err) { |
---|
4083 | | - dev_err(&vsi->back->pdev->dev, |
---|
4084 | | - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", |
---|
4085 | | - pf_q, err); |
---|
4086 | | - return -EIO; |
---|
4087 | | - } |
---|
4088 | | - |
---|
4089 | | - /* init queue specific tail register */ |
---|
4090 | | - ring->tail = hw->hw_addr + QRX_TAIL(pf_q); |
---|
4091 | | - writel(0, ring->tail); |
---|
4092 | | - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); |
---|
4093 | | - |
---|
4094 | | - return 0; |
---|
4095 | | -} |
---|
4096 | | - |
---|
4097 | | -/** |
---|
4098 | | - * ice_vsi_cfg_rxqs - Configure the VSI for Rx |
---|
4099 | | - * @vsi: the VSI being configured |
---|
4100 | | - * |
---|
4101 | | - * Return 0 on success and a negative value on error |
---|
4102 | | - * Configure the Rx VSI for operation. |
---|
4103 | | - */ |
---|
4104 | | -static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) |
---|
4105 | | -{ |
---|
4106 | | - int err = 0; |
---|
4107 | | - u16 i; |
---|
4108 | | - |
---|
4109 | | - if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) |
---|
4110 | | - vsi->max_frame = vsi->netdev->mtu + |
---|
4111 | | - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
---|
4112 | | - else |
---|
4113 | | - vsi->max_frame = ICE_RXBUF_2048; |
---|
4114 | | - |
---|
4115 | | - vsi->rx_buf_len = ICE_RXBUF_2048; |
---|
4116 | | - /* set up individual rings */ |
---|
4117 | | - for (i = 0; i < vsi->num_rxq && !err; i++) |
---|
4118 | | - err = ice_setup_rx_ctx(vsi->rx_rings[i]); |
---|
4119 | | - |
---|
4120 | | - if (err) { |
---|
4121 | | - dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); |
---|
4122 | | - return -EIO; |
---|
4123 | | - } |
---|
4124 | | - return err; |
---|
4125 | | -} |
---|
4126 | | - |
---|
4127 | | -/** |
---|
4128 | 5194 | * ice_vsi_cfg - Setup the VSI |
---|
4129 | 5195 | * @vsi: the VSI being configured |
---|
4130 | 5196 | * |
---|
4131 | 5197 | * Return 0 on success and negative value on error |
---|
4132 | 5198 | */ |
---|
4133 | | -static int ice_vsi_cfg(struct ice_vsi *vsi) |
---|
| 5199 | +int ice_vsi_cfg(struct ice_vsi *vsi) |
---|
4134 | 5200 | { |
---|
4135 | 5201 | int err; |
---|
4136 | 5202 | |
---|
4137 | 5203 | if (vsi->netdev) { |
---|
4138 | 5204 | ice_set_rx_mode(vsi->netdev); |
---|
4139 | | - err = ice_restore_vlan(vsi); |
---|
4140 | | - if (err) |
---|
4141 | | - return err; |
---|
4142 | | - } |
---|
4143 | 5205 | |
---|
4144 | | - err = ice_vsi_cfg_txqs(vsi); |
---|
| 5206 | + if (vsi->type != ICE_VSI_LB) { |
---|
| 5207 | + err = ice_vsi_vlan_setup(vsi); |
---|
| 5208 | + |
---|
| 5209 | + if (err) |
---|
| 5210 | + return err; |
---|
| 5211 | + } |
---|
| 5212 | + } |
---|
| 5213 | + ice_vsi_cfg_dcb_rings(vsi); |
---|
| 5214 | + |
---|
| 5215 | + err = ice_vsi_cfg_lan_txqs(vsi); |
---|
| 5216 | + if (!err && ice_is_xdp_ena_vsi(vsi)) |
---|
| 5217 | + err = ice_vsi_cfg_xdp_txqs(vsi); |
---|
4145 | 5218 | if (!err) |
---|
4146 | 5219 | err = ice_vsi_cfg_rxqs(vsi); |
---|
4147 | 5220 | |
---|
4148 | 5221 | return err; |
---|
4149 | | -} |
---|
4150 | | - |
---|
4151 | | -/** |
---|
4152 | | - * ice_vsi_stop_tx_rings - Disable Tx rings |
---|
4153 | | - * @vsi: the VSI being configured |
---|
4154 | | - */ |
---|
4155 | | -static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) |
---|
4156 | | -{ |
---|
4157 | | - struct ice_pf *pf = vsi->back; |
---|
4158 | | - struct ice_hw *hw = &pf->hw; |
---|
4159 | | - enum ice_status status; |
---|
4160 | | - u32 *q_teids, val; |
---|
4161 | | - u16 *q_ids, i; |
---|
4162 | | - int err = 0; |
---|
4163 | | - |
---|
4164 | | - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) |
---|
4165 | | - return -EINVAL; |
---|
4166 | | - |
---|
4167 | | - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), |
---|
4168 | | - GFP_KERNEL); |
---|
4169 | | - if (!q_teids) |
---|
4170 | | - return -ENOMEM; |
---|
4171 | | - |
---|
4172 | | - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), |
---|
4173 | | - GFP_KERNEL); |
---|
4174 | | - if (!q_ids) { |
---|
4175 | | - err = -ENOMEM; |
---|
4176 | | - goto err_alloc_q_ids; |
---|
4177 | | - } |
---|
4178 | | - |
---|
4179 | | - /* set up the tx queue list to be disabled */ |
---|
4180 | | - ice_for_each_txq(vsi, i) { |
---|
4181 | | - u16 v_idx; |
---|
4182 | | - |
---|
4183 | | - if (!vsi->tx_rings || !vsi->tx_rings[i]) { |
---|
4184 | | - err = -EINVAL; |
---|
4185 | | - goto err_out; |
---|
4186 | | - } |
---|
4187 | | - |
---|
4188 | | - q_ids[i] = vsi->txq_map[i]; |
---|
4189 | | - q_teids[i] = vsi->tx_rings[i]->txq_teid; |
---|
4190 | | - |
---|
4191 | | - /* clear cause_ena bit for disabled queues */ |
---|
4192 | | - val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); |
---|
4193 | | - val &= ~QINT_TQCTL_CAUSE_ENA_M; |
---|
4194 | | - wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); |
---|
4195 | | - |
---|
4196 | | - /* software is expected to wait for 100 ns */ |
---|
4197 | | - ndelay(100); |
---|
4198 | | - |
---|
4199 | | - /* trigger a software interrupt for the vector associated to |
---|
4200 | | - * the queue to schedule napi handler |
---|
4201 | | - */ |
---|
4202 | | - v_idx = vsi->tx_rings[i]->q_vector->v_idx; |
---|
4203 | | - wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), |
---|
4204 | | - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); |
---|
4205 | | - } |
---|
4206 | | - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, |
---|
4207 | | - NULL); |
---|
4208 | | - /* if the disable queue command was exercised during an active reset |
---|
4209 | | - * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as |
---|
4210 | | - * the reset operation disables queues at the hardware level anyway. |
---|
4211 | | - */ |
---|
4212 | | - if (status == ICE_ERR_RESET_ONGOING) { |
---|
4213 | | - dev_dbg(&pf->pdev->dev, |
---|
4214 | | - "Reset in progress. LAN Tx queues already disabled\n"); |
---|
4215 | | - } else if (status) { |
---|
4216 | | - dev_err(&pf->pdev->dev, |
---|
4217 | | - "Failed to disable LAN Tx queues, error: %d\n", |
---|
4218 | | - status); |
---|
4219 | | - err = -ENODEV; |
---|
4220 | | - } |
---|
4221 | | - |
---|
4222 | | -err_out: |
---|
4223 | | - devm_kfree(&pf->pdev->dev, q_ids); |
---|
4224 | | - |
---|
4225 | | -err_alloc_q_ids: |
---|
4226 | | - devm_kfree(&pf->pdev->dev, q_teids); |
---|
4227 | | - |
---|
4228 | | - return err; |
---|
4229 | | -} |
---|
4230 | | - |
---|
4231 | | -/** |
---|
4232 | | - * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled |
---|
4233 | | - * @pf: the PF being configured |
---|
4234 | | - * @pf_q: the PF queue |
---|
4235 | | - * @ena: enable or disable state of the queue |
---|
4236 | | - * |
---|
4237 | | - * This routine will wait for the given Rx queue of the PF to reach the |
---|
4238 | | - * enabled or disabled state. |
---|
4239 | | - * Returns -ETIMEDOUT in case of failing to reach the requested state after |
---|
4240 | | - * multiple retries; else will return 0 in case of success. |
---|
4241 | | - */ |
---|
4242 | | -static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) |
---|
4243 | | -{ |
---|
4244 | | - int i; |
---|
4245 | | - |
---|
4246 | | - for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { |
---|
4247 | | - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); |
---|
4248 | | - |
---|
4249 | | - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) |
---|
4250 | | - break; |
---|
4251 | | - |
---|
4252 | | - usleep_range(10, 20); |
---|
4253 | | - } |
---|
4254 | | - if (i >= ICE_Q_WAIT_RETRY_LIMIT) |
---|
4255 | | - return -ETIMEDOUT; |
---|
4256 | | - |
---|
4257 | | - return 0; |
---|
4258 | | -} |
---|
4259 | | - |
---|
4260 | | -/** |
---|
4261 | | - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings |
---|
4262 | | - * @vsi: the VSI being configured |
---|
4263 | | - * @ena: start or stop the rx rings |
---|
4264 | | - */ |
---|
4265 | | -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) |
---|
4266 | | -{ |
---|
4267 | | - struct ice_pf *pf = vsi->back; |
---|
4268 | | - struct ice_hw *hw = &pf->hw; |
---|
4269 | | - int i, j, ret = 0; |
---|
4270 | | - |
---|
4271 | | - for (i = 0; i < vsi->num_rxq; i++) { |
---|
4272 | | - int pf_q = vsi->rxq_map[i]; |
---|
4273 | | - u32 rx_reg; |
---|
4274 | | - |
---|
4275 | | - for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { |
---|
4276 | | - rx_reg = rd32(hw, QRX_CTRL(pf_q)); |
---|
4277 | | - if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == |
---|
4278 | | - ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) |
---|
4279 | | - break; |
---|
4280 | | - usleep_range(1000, 2000); |
---|
4281 | | - } |
---|
4282 | | - |
---|
4283 | | - /* Skip if the queue is already in the requested state */ |
---|
4284 | | - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) |
---|
4285 | | - continue; |
---|
4286 | | - |
---|
4287 | | - /* turn on/off the queue */ |
---|
4288 | | - if (ena) |
---|
4289 | | - rx_reg |= QRX_CTRL_QENA_REQ_M; |
---|
4290 | | - else |
---|
4291 | | - rx_reg &= ~QRX_CTRL_QENA_REQ_M; |
---|
4292 | | - wr32(hw, QRX_CTRL(pf_q), rx_reg); |
---|
4293 | | - |
---|
4294 | | - /* wait for the change to finish */ |
---|
4295 | | - ret = ice_pf_rxq_wait(pf, pf_q, ena); |
---|
4296 | | - if (ret) { |
---|
4297 | | - dev_err(&pf->pdev->dev, |
---|
4298 | | - "VSI idx %d Rx ring %d %sable timeout\n", |
---|
4299 | | - vsi->idx, pf_q, (ena ? "en" : "dis")); |
---|
4300 | | - break; |
---|
4301 | | - } |
---|
4302 | | - } |
---|
4303 | | - |
---|
4304 | | - return ret; |
---|
4305 | | -} |
---|
4306 | | - |
---|
4307 | | -/** |
---|
4308 | | - * ice_vsi_start_rx_rings - start VSI's rx rings |
---|
4309 | | - * @vsi: the VSI whose rings are to be started |
---|
4310 | | - * |
---|
4311 | | - * Returns 0 on success and a negative value on error |
---|
4312 | | - */ |
---|
4313 | | -static int ice_vsi_start_rx_rings(struct ice_vsi *vsi) |
---|
4314 | | -{ |
---|
4315 | | - return ice_vsi_ctrl_rx_rings(vsi, true); |
---|
4316 | | -} |
---|
4317 | | - |
---|
4318 | | -/** |
---|
4319 | | - * ice_vsi_stop_rx_rings - stop VSI's rx rings |
---|
4320 | | - * @vsi: the VSI |
---|
4321 | | - * |
---|
4322 | | - * Returns 0 on success and a negative value on error |
---|
4323 | | - */ |
---|
4324 | | -static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) |
---|
4325 | | -{ |
---|
4326 | | - return ice_vsi_ctrl_rx_rings(vsi, false); |
---|
4327 | | -} |
---|
4328 | | - |
---|
4329 | | -/** |
---|
4330 | | - * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings |
---|
4331 | | - * @vsi: the VSI |
---|
4332 | | - * Returns 0 on success and a negative value on error |
---|
4333 | | - */ |
---|
4334 | | -static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi) |
---|
4335 | | -{ |
---|
4336 | | - int err_tx, err_rx; |
---|
4337 | | - |
---|
4338 | | - err_tx = ice_vsi_stop_tx_rings(vsi); |
---|
4339 | | - if (err_tx) |
---|
4340 | | - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n"); |
---|
4341 | | - |
---|
4342 | | - err_rx = ice_vsi_stop_rx_rings(vsi); |
---|
4343 | | - if (err_rx) |
---|
4344 | | - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n"); |
---|
4345 | | - |
---|
4346 | | - if (err_tx || err_rx) |
---|
4347 | | - return -EIO; |
---|
4348 | | - |
---|
4349 | | - return 0; |
---|
4350 | 5222 | } |
---|
4351 | 5223 | |
---|
4352 | 5224 | /** |
---|
.. | .. |
---|
4360 | 5232 | if (!vsi->netdev) |
---|
4361 | 5233 | return; |
---|
4362 | 5234 | |
---|
4363 | | - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { |
---|
| 5235 | + ice_for_each_q_vector(vsi, q_idx) { |
---|
4364 | 5236 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
---|
4365 | 5237 | |
---|
4366 | 5238 | if (q_vector->rx.ring || q_vector->tx.ring) |
---|
.. | .. |
---|
4379 | 5251 | struct ice_pf *pf = vsi->back; |
---|
4380 | 5252 | int err; |
---|
4381 | 5253 | |
---|
4382 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
---|
4383 | | - ice_vsi_cfg_msix(vsi); |
---|
4384 | | - else |
---|
4385 | | - return -ENOTSUPP; |
---|
| 5254 | + ice_vsi_cfg_msix(vsi); |
---|
4386 | 5255 | |
---|
4387 | 5256 | /* Enable only Rx rings, Tx rings were enabled by the FW when the |
---|
4388 | 5257 | * Tx queue group list was configured and the context bits were |
---|
4389 | 5258 | * programmed using ice_vsi_cfg_txqs |
---|
4390 | 5259 | */ |
---|
4391 | | - err = ice_vsi_start_rx_rings(vsi); |
---|
| 5260 | + err = ice_vsi_start_all_rx_rings(vsi); |
---|
4392 | 5261 | if (err) |
---|
4393 | 5262 | return err; |
---|
4394 | 5263 | |
---|
.. | .. |
---|
4404 | 5273 | netif_carrier_on(vsi->netdev); |
---|
4405 | 5274 | } |
---|
4406 | 5275 | |
---|
4407 | | - /* clear this now, and the first stats read will be used as baseline */ |
---|
4408 | | - vsi->stat_offsets_loaded = false; |
---|
4409 | | - |
---|
| 5276 | + /* Perform an initial read of the statistics registers now to |
---|
| 5277 | + * set the baseline so counters are ready when interface is up |
---|
| 5278 | + */ |
---|
| 5279 | + ice_update_eth_stats(vsi); |
---|
4410 | 5280 | ice_service_task_schedule(pf); |
---|
4411 | 5281 | |
---|
4412 | | - return err; |
---|
| 5282 | + return 0; |
---|
4413 | 5283 | } |
---|
4414 | 5284 | |
---|
4415 | 5285 | /** |
---|
.. | .. |
---|
4436 | 5306 | * This function fetches stats from the ring considering the atomic operations |
---|
4437 | 5307 | * that needs to be performed to read u64 values in 32 bit machine. |
---|
4438 | 5308 | */ |
---|
4439 | | -static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, |
---|
4440 | | - u64 *bytes) |
---|
| 5309 | +static void |
---|
| 5310 | +ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) |
---|
4441 | 5311 | { |
---|
4442 | 5312 | unsigned int start; |
---|
4443 | 5313 | *pkts = 0; |
---|
.. | .. |
---|
4453 | 5323 | } |
---|
4454 | 5324 | |
---|
4455 | 5325 | /** |
---|
4456 | | - * ice_stat_update40 - read 40 bit stat from the chip and update stat values |
---|
4457 | | - * @hw: ptr to the hardware info |
---|
4458 | | - * @hireg: high 32 bit HW register to read from |
---|
4459 | | - * @loreg: low 32 bit HW register to read from |
---|
4460 | | - * @prev_stat_loaded: bool to specify if previous stats are loaded |
---|
4461 | | - * @prev_stat: ptr to previous loaded stat value |
---|
4462 | | - * @cur_stat: ptr to current stat value |
---|
4463 | | - */ |
---|
4464 | | -static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, |
---|
4465 | | - bool prev_stat_loaded, u64 *prev_stat, |
---|
4466 | | - u64 *cur_stat) |
---|
4467 | | -{ |
---|
4468 | | - u64 new_data; |
---|
4469 | | - |
---|
4470 | | - new_data = rd32(hw, loreg); |
---|
4471 | | - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; |
---|
4472 | | - |
---|
4473 | | - /* device stats are not reset at PFR, they likely will not be zeroed |
---|
4474 | | - * when the driver starts. So save the first values read and use them as |
---|
4475 | | - * offsets to be subtracted from the raw values in order to report stats |
---|
4476 | | - * that count from zero. |
---|
4477 | | - */ |
---|
4478 | | - if (!prev_stat_loaded) |
---|
4479 | | - *prev_stat = new_data; |
---|
4480 | | - if (likely(new_data >= *prev_stat)) |
---|
4481 | | - *cur_stat = new_data - *prev_stat; |
---|
4482 | | - else |
---|
4483 | | - /* to manage the potential roll-over */ |
---|
4484 | | - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; |
---|
4485 | | - *cur_stat &= 0xFFFFFFFFFFULL; |
---|
4486 | | -} |
---|
4487 | | - |
---|
4488 | | -/** |
---|
4489 | | - * ice_stat_update32 - read 32 bit stat from the chip and update stat values |
---|
4490 | | - * @hw: ptr to the hardware info |
---|
4491 | | - * @reg: HW register to read from |
---|
4492 | | - * @prev_stat_loaded: bool to specify if previous stats are loaded |
---|
4493 | | - * @prev_stat: ptr to previous loaded stat value |
---|
4494 | | - * @cur_stat: ptr to current stat value |
---|
4495 | | - */ |
---|
4496 | | -static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, |
---|
4497 | | - u64 *prev_stat, u64 *cur_stat) |
---|
4498 | | -{ |
---|
4499 | | - u32 new_data; |
---|
4500 | | - |
---|
4501 | | - new_data = rd32(hw, reg); |
---|
4502 | | - |
---|
4503 | | - /* device stats are not reset at PFR, they likely will not be zeroed |
---|
4504 | | - * when the driver starts. So save the first values read and use them as |
---|
4505 | | - * offsets to be subtracted from the raw values in order to report stats |
---|
4506 | | - * that count from zero. |
---|
4507 | | - */ |
---|
4508 | | - if (!prev_stat_loaded) |
---|
4509 | | - *prev_stat = new_data; |
---|
4510 | | - if (likely(new_data >= *prev_stat)) |
---|
4511 | | - *cur_stat = new_data - *prev_stat; |
---|
4512 | | - else |
---|
4513 | | - /* to manage the potential roll-over */ |
---|
4514 | | - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; |
---|
4515 | | -} |
---|
4516 | | - |
---|
4517 | | -/** |
---|
4518 | | - * ice_update_eth_stats - Update VSI-specific ethernet statistics counters |
---|
| 5326 | + * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters |
---|
4519 | 5327 | * @vsi: the VSI to be updated |
---|
| 5328 | + * @rings: rings to work on |
---|
| 5329 | + * @count: number of rings |
---|
4520 | 5330 | */ |
---|
4521 | | -static void ice_update_eth_stats(struct ice_vsi *vsi) |
---|
| 5331 | +static void |
---|
| 5332 | +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, |
---|
| 5333 | + u16 count) |
---|
4522 | 5334 | { |
---|
4523 | | - struct ice_eth_stats *prev_es, *cur_es; |
---|
4524 | | - struct ice_hw *hw = &vsi->back->hw; |
---|
4525 | | - u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ |
---|
| 5335 | + struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; |
---|
| 5336 | + u16 i; |
---|
4526 | 5337 | |
---|
4527 | | - prev_es = &vsi->eth_stats_prev; |
---|
4528 | | - cur_es = &vsi->eth_stats; |
---|
| 5338 | + for (i = 0; i < count; i++) { |
---|
| 5339 | + struct ice_ring *ring; |
---|
| 5340 | + u64 pkts, bytes; |
---|
4529 | 5341 | |
---|
4530 | | - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), |
---|
4531 | | - vsi->stat_offsets_loaded, &prev_es->rx_bytes, |
---|
4532 | | - &cur_es->rx_bytes); |
---|
4533 | | - |
---|
4534 | | - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), |
---|
4535 | | - vsi->stat_offsets_loaded, &prev_es->rx_unicast, |
---|
4536 | | - &cur_es->rx_unicast); |
---|
4537 | | - |
---|
4538 | | - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), |
---|
4539 | | - vsi->stat_offsets_loaded, &prev_es->rx_multicast, |
---|
4540 | | - &cur_es->rx_multicast); |
---|
4541 | | - |
---|
4542 | | - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), |
---|
4543 | | - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, |
---|
4544 | | - &cur_es->rx_broadcast); |
---|
4545 | | - |
---|
4546 | | - ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, |
---|
4547 | | - &prev_es->rx_discards, &cur_es->rx_discards); |
---|
4548 | | - |
---|
4549 | | - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), |
---|
4550 | | - vsi->stat_offsets_loaded, &prev_es->tx_bytes, |
---|
4551 | | - &cur_es->tx_bytes); |
---|
4552 | | - |
---|
4553 | | - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), |
---|
4554 | | - vsi->stat_offsets_loaded, &prev_es->tx_unicast, |
---|
4555 | | - &cur_es->tx_unicast); |
---|
4556 | | - |
---|
4557 | | - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), |
---|
4558 | | - vsi->stat_offsets_loaded, &prev_es->tx_multicast, |
---|
4559 | | - &cur_es->tx_multicast); |
---|
4560 | | - |
---|
4561 | | - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), |
---|
4562 | | - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, |
---|
4563 | | - &cur_es->tx_broadcast); |
---|
4564 | | - |
---|
4565 | | - ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, |
---|
4566 | | - &prev_es->tx_errors, &cur_es->tx_errors); |
---|
4567 | | - |
---|
4568 | | - vsi->stat_offsets_loaded = true; |
---|
| 5342 | + ring = READ_ONCE(rings[i]); |
---|
| 5343 | + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); |
---|
| 5344 | + vsi_stats->tx_packets += pkts; |
---|
| 5345 | + vsi_stats->tx_bytes += bytes; |
---|
| 5346 | + vsi->tx_restart += ring->tx_stats.restart_q; |
---|
| 5347 | + vsi->tx_busy += ring->tx_stats.tx_busy; |
---|
| 5348 | + vsi->tx_linearize += ring->tx_stats.tx_linearize; |
---|
| 5349 | + } |
---|
4569 | 5350 | } |
---|
4570 | 5351 | |
---|
4571 | 5352 | /** |
---|
.. | .. |
---|
4591 | 5372 | vsi->tx_linearize = 0; |
---|
4592 | 5373 | vsi->rx_buf_failed = 0; |
---|
4593 | 5374 | vsi->rx_page_failed = 0; |
---|
| 5375 | + vsi->rx_gro_dropped = 0; |
---|
4594 | 5376 | |
---|
4595 | 5377 | rcu_read_lock(); |
---|
4596 | 5378 | |
---|
4597 | 5379 | /* update Tx rings counters */ |
---|
4598 | | - ice_for_each_txq(vsi, i) { |
---|
4599 | | - ring = READ_ONCE(vsi->tx_rings[i]); |
---|
4600 | | - ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); |
---|
4601 | | - vsi_stats->tx_packets += pkts; |
---|
4602 | | - vsi_stats->tx_bytes += bytes; |
---|
4603 | | - vsi->tx_restart += ring->tx_stats.restart_q; |
---|
4604 | | - vsi->tx_busy += ring->tx_stats.tx_busy; |
---|
4605 | | - vsi->tx_linearize += ring->tx_stats.tx_linearize; |
---|
4606 | | - } |
---|
| 5380 | + ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); |
---|
4607 | 5381 | |
---|
4608 | 5382 | /* update Rx rings counters */ |
---|
4609 | 5383 | ice_for_each_rxq(vsi, i) { |
---|
.. | .. |
---|
4613 | 5387 | vsi_stats->rx_bytes += bytes; |
---|
4614 | 5388 | vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; |
---|
4615 | 5389 | vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; |
---|
| 5390 | + vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; |
---|
4616 | 5391 | } |
---|
| 5392 | + |
---|
| 5393 | + /* update XDP Tx rings counters */ |
---|
| 5394 | + if (ice_is_xdp_ena_vsi(vsi)) |
---|
| 5395 | + ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, |
---|
| 5396 | + vsi->num_xdp_txq); |
---|
4617 | 5397 | |
---|
4618 | 5398 | rcu_read_unlock(); |
---|
4619 | 5399 | } |
---|
.. | .. |
---|
4622 | 5402 | * ice_update_vsi_stats - Update VSI stats counters |
---|
4623 | 5403 | * @vsi: the VSI to be updated |
---|
4624 | 5404 | */ |
---|
4625 | | -static void ice_update_vsi_stats(struct ice_vsi *vsi) |
---|
| 5405 | +void ice_update_vsi_stats(struct ice_vsi *vsi) |
---|
4626 | 5406 | { |
---|
4627 | 5407 | struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; |
---|
4628 | 5408 | struct ice_eth_stats *cur_es = &vsi->eth_stats; |
---|
.. | .. |
---|
4639 | 5419 | ice_update_eth_stats(vsi); |
---|
4640 | 5420 | |
---|
4641 | 5421 | cur_ns->tx_errors = cur_es->tx_errors; |
---|
4642 | | - cur_ns->rx_dropped = cur_es->rx_discards; |
---|
| 5422 | + cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; |
---|
4643 | 5423 | cur_ns->tx_dropped = cur_es->tx_discards; |
---|
4644 | 5424 | cur_ns->multicast = cur_es->rx_multicast; |
---|
4645 | 5425 | |
---|
.. | .. |
---|
4647 | 5427 | if (vsi->type == ICE_VSI_PF) { |
---|
4648 | 5428 | cur_ns->rx_crc_errors = pf->stats.crc_errors; |
---|
4649 | 5429 | cur_ns->rx_errors = pf->stats.crc_errors + |
---|
4650 | | - pf->stats.illegal_bytes; |
---|
| 5430 | + pf->stats.illegal_bytes + |
---|
| 5431 | + pf->stats.rx_len_errors + |
---|
| 5432 | + pf->stats.rx_undersize + |
---|
| 5433 | + pf->hw_csum_rx_error + |
---|
| 5434 | + pf->stats.rx_jabber + |
---|
| 5435 | + pf->stats.rx_fragments + |
---|
| 5436 | + pf->stats.rx_oversize; |
---|
4651 | 5437 | cur_ns->rx_length_errors = pf->stats.rx_len_errors; |
---|
| 5438 | + /* record drops from the port level */ |
---|
| 5439 | + cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; |
---|
4652 | 5440 | } |
---|
4653 | 5441 | } |
---|
4654 | 5442 | |
---|
.. | .. |
---|
4656 | 5444 | * ice_update_pf_stats - Update PF port stats counters |
---|
4657 | 5445 | * @pf: PF whose stats needs to be updated |
---|
4658 | 5446 | */ |
---|
4659 | | -static void ice_update_pf_stats(struct ice_pf *pf) |
---|
| 5447 | +void ice_update_pf_stats(struct ice_pf *pf) |
---|
4660 | 5448 | { |
---|
4661 | 5449 | struct ice_hw_port_stats *prev_ps, *cur_ps; |
---|
4662 | 5450 | struct ice_hw *hw = &pf->hw; |
---|
4663 | | - u8 pf_id; |
---|
| 5451 | + u16 fd_ctr_base; |
---|
| 5452 | + u8 port; |
---|
4664 | 5453 | |
---|
| 5454 | + port = hw->port_info->lport; |
---|
4665 | 5455 | prev_ps = &pf->stats_prev; |
---|
4666 | 5456 | cur_ps = &pf->stats; |
---|
4667 | | - pf_id = hw->pf_id; |
---|
4668 | 5457 | |
---|
4669 | | - ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), |
---|
4670 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, |
---|
| 5458 | + ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, |
---|
| 5459 | + &prev_ps->eth.rx_bytes, |
---|
4671 | 5460 | &cur_ps->eth.rx_bytes); |
---|
4672 | 5461 | |
---|
4673 | | - ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), |
---|
4674 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, |
---|
| 5462 | + ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, |
---|
| 5463 | + &prev_ps->eth.rx_unicast, |
---|
4675 | 5464 | &cur_ps->eth.rx_unicast); |
---|
4676 | 5465 | |
---|
4677 | | - ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), |
---|
4678 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, |
---|
| 5466 | + ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, |
---|
| 5467 | + &prev_ps->eth.rx_multicast, |
---|
4679 | 5468 | &cur_ps->eth.rx_multicast); |
---|
4680 | 5469 | |
---|
4681 | | - ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), |
---|
4682 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, |
---|
| 5470 | + ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, |
---|
| 5471 | + &prev_ps->eth.rx_broadcast, |
---|
4683 | 5472 | &cur_ps->eth.rx_broadcast); |
---|
4684 | 5473 | |
---|
4685 | | - ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), |
---|
4686 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, |
---|
| 5474 | + ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, |
---|
| 5475 | + &prev_ps->eth.rx_discards, |
---|
| 5476 | + &cur_ps->eth.rx_discards); |
---|
| 5477 | + |
---|
| 5478 | + ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, |
---|
| 5479 | + &prev_ps->eth.tx_bytes, |
---|
4687 | 5480 | &cur_ps->eth.tx_bytes); |
---|
4688 | 5481 | |
---|
4689 | | - ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), |
---|
4690 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, |
---|
| 5482 | + ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, |
---|
| 5483 | + &prev_ps->eth.tx_unicast, |
---|
4691 | 5484 | &cur_ps->eth.tx_unicast); |
---|
4692 | 5485 | |
---|
4693 | | - ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), |
---|
4694 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, |
---|
| 5486 | + ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, |
---|
| 5487 | + &prev_ps->eth.tx_multicast, |
---|
4695 | 5488 | &cur_ps->eth.tx_multicast); |
---|
4696 | 5489 | |
---|
4697 | | - ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), |
---|
4698 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, |
---|
| 5490 | + ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, |
---|
| 5491 | + &prev_ps->eth.tx_broadcast, |
---|
4699 | 5492 | &cur_ps->eth.tx_broadcast); |
---|
4700 | 5493 | |
---|
4701 | | - ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, |
---|
| 5494 | + ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, |
---|
4702 | 5495 | &prev_ps->tx_dropped_link_down, |
---|
4703 | 5496 | &cur_ps->tx_dropped_link_down); |
---|
4704 | 5497 | |
---|
4705 | | - ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), |
---|
4706 | | - pf->stat_prev_loaded, &prev_ps->rx_size_64, |
---|
4707 | | - &cur_ps->rx_size_64); |
---|
| 5498 | + ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, |
---|
| 5499 | + &prev_ps->rx_size_64, &cur_ps->rx_size_64); |
---|
4708 | 5500 | |
---|
4709 | | - ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), |
---|
4710 | | - pf->stat_prev_loaded, &prev_ps->rx_size_127, |
---|
4711 | | - &cur_ps->rx_size_127); |
---|
| 5501 | + ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, |
---|
| 5502 | + &prev_ps->rx_size_127, &cur_ps->rx_size_127); |
---|
4712 | 5503 | |
---|
4713 | | - ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), |
---|
4714 | | - pf->stat_prev_loaded, &prev_ps->rx_size_255, |
---|
4715 | | - &cur_ps->rx_size_255); |
---|
| 5504 | + ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, |
---|
| 5505 | + &prev_ps->rx_size_255, &cur_ps->rx_size_255); |
---|
4716 | 5506 | |
---|
4717 | | - ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), |
---|
4718 | | - pf->stat_prev_loaded, &prev_ps->rx_size_511, |
---|
4719 | | - &cur_ps->rx_size_511); |
---|
| 5507 | + ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, |
---|
| 5508 | + &prev_ps->rx_size_511, &cur_ps->rx_size_511); |
---|
4720 | 5509 | |
---|
4721 | | - ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), |
---|
4722 | | - GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, |
---|
| 5510 | + ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, |
---|
4723 | 5511 | &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); |
---|
4724 | 5512 | |
---|
4725 | | - ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), |
---|
4726 | | - GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, |
---|
| 5513 | + ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, |
---|
4727 | 5514 | &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); |
---|
4728 | 5515 | |
---|
4729 | | - ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), |
---|
4730 | | - GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, |
---|
| 5516 | + ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, |
---|
4731 | 5517 | &prev_ps->rx_size_big, &cur_ps->rx_size_big); |
---|
4732 | 5518 | |
---|
4733 | | - ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), |
---|
4734 | | - pf->stat_prev_loaded, &prev_ps->tx_size_64, |
---|
4735 | | - &cur_ps->tx_size_64); |
---|
| 5519 | + ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, |
---|
| 5520 | + &prev_ps->tx_size_64, &cur_ps->tx_size_64); |
---|
4736 | 5521 | |
---|
4737 | | - ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), |
---|
4738 | | - pf->stat_prev_loaded, &prev_ps->tx_size_127, |
---|
4739 | | - &cur_ps->tx_size_127); |
---|
| 5522 | + ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, |
---|
| 5523 | + &prev_ps->tx_size_127, &cur_ps->tx_size_127); |
---|
4740 | 5524 | |
---|
4741 | | - ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), |
---|
4742 | | - pf->stat_prev_loaded, &prev_ps->tx_size_255, |
---|
4743 | | - &cur_ps->tx_size_255); |
---|
| 5525 | + ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, |
---|
| 5526 | + &prev_ps->tx_size_255, &cur_ps->tx_size_255); |
---|
4744 | 5527 | |
---|
4745 | | - ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), |
---|
4746 | | - pf->stat_prev_loaded, &prev_ps->tx_size_511, |
---|
4747 | | - &cur_ps->tx_size_511); |
---|
| 5528 | + ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, |
---|
| 5529 | + &prev_ps->tx_size_511, &cur_ps->tx_size_511); |
---|
4748 | 5530 | |
---|
4749 | | - ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), |
---|
4750 | | - GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, |
---|
| 5531 | + ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, |
---|
4751 | 5532 | &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); |
---|
4752 | 5533 | |
---|
4753 | | - ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), |
---|
4754 | | - GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, |
---|
| 5534 | + ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, |
---|
4755 | 5535 | &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); |
---|
4756 | 5536 | |
---|
4757 | | - ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), |
---|
4758 | | - GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, |
---|
| 5537 | + ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, |
---|
4759 | 5538 | &prev_ps->tx_size_big, &cur_ps->tx_size_big); |
---|
4760 | 5539 | |
---|
4761 | | - ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, |
---|
| 5540 | + fd_ctr_base = hw->fd_ctr_base; |
---|
| 5541 | + |
---|
| 5542 | + ice_stat_update40(hw, |
---|
| 5543 | + GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), |
---|
| 5544 | + pf->stat_prev_loaded, &prev_ps->fd_sb_match, |
---|
| 5545 | + &cur_ps->fd_sb_match); |
---|
| 5546 | + ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, |
---|
4762 | 5547 | &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); |
---|
4763 | 5548 | |
---|
4764 | | - ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, |
---|
| 5549 | + ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, |
---|
4765 | 5550 | &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); |
---|
4766 | 5551 | |
---|
4767 | | - ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, |
---|
| 5552 | + ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, |
---|
4768 | 5553 | &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); |
---|
4769 | 5554 | |
---|
4770 | | - ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, |
---|
| 5555 | + ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, |
---|
4771 | 5556 | &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); |
---|
4772 | 5557 | |
---|
4773 | | - ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, |
---|
| 5558 | + ice_update_dcb_stats(pf); |
---|
| 5559 | + |
---|
| 5560 | + ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, |
---|
4774 | 5561 | &prev_ps->crc_errors, &cur_ps->crc_errors); |
---|
4775 | 5562 | |
---|
4776 | | - ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, |
---|
| 5563 | + ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, |
---|
4777 | 5564 | &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); |
---|
4778 | 5565 | |
---|
4779 | | - ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, |
---|
| 5566 | + ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, |
---|
4780 | 5567 | &prev_ps->mac_local_faults, |
---|
4781 | 5568 | &cur_ps->mac_local_faults); |
---|
4782 | 5569 | |
---|
4783 | | - ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, |
---|
| 5570 | + ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, |
---|
4784 | 5571 | &prev_ps->mac_remote_faults, |
---|
4785 | 5572 | &cur_ps->mac_remote_faults); |
---|
4786 | 5573 | |
---|
4787 | | - ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, |
---|
| 5574 | + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, |
---|
4788 | 5575 | &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); |
---|
4789 | 5576 | |
---|
4790 | | - ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, |
---|
| 5577 | + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, |
---|
4791 | 5578 | &prev_ps->rx_undersize, &cur_ps->rx_undersize); |
---|
4792 | 5579 | |
---|
4793 | | - ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, |
---|
| 5580 | + ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, |
---|
4794 | 5581 | &prev_ps->rx_fragments, &cur_ps->rx_fragments); |
---|
4795 | 5582 | |
---|
4796 | | - ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, |
---|
| 5583 | + ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, |
---|
4797 | 5584 | &prev_ps->rx_oversize, &cur_ps->rx_oversize); |
---|
4798 | 5585 | |
---|
4799 | | - ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, |
---|
| 5586 | + ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, |
---|
4800 | 5587 | &prev_ps->rx_jabber, &cur_ps->rx_jabber); |
---|
| 5588 | + |
---|
| 5589 | + cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; |
---|
4801 | 5590 | |
---|
4802 | 5591 | pf->stat_prev_loaded = true; |
---|
4803 | 5592 | } |
---|
.. | .. |
---|
4816 | 5605 | |
---|
4817 | 5606 | vsi_stats = &vsi->net_stats; |
---|
4818 | 5607 | |
---|
4819 | | - if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) |
---|
| 5608 | + if (!vsi->num_txq || !vsi->num_rxq) |
---|
4820 | 5609 | return; |
---|
| 5610 | + |
---|
4821 | 5611 | /* netdev packet/byte stats come from ring counter. These are obtained |
---|
4822 | 5612 | * by summing up ring counters (done by ice_update_vsi_ring_stats). |
---|
| 5613 | + * But, only call the update routine and read the registers if VSI is |
---|
| 5614 | + * not down. |
---|
4823 | 5615 | */ |
---|
4824 | | - ice_update_vsi_ring_stats(vsi); |
---|
| 5616 | + if (!test_bit(__ICE_DOWN, vsi->state)) |
---|
| 5617 | + ice_update_vsi_ring_stats(vsi); |
---|
4825 | 5618 | stats->tx_packets = vsi_stats->tx_packets; |
---|
4826 | 5619 | stats->tx_bytes = vsi_stats->tx_bytes; |
---|
4827 | 5620 | stats->rx_packets = vsi_stats->rx_packets; |
---|
.. | .. |
---|
4851 | 5644 | if (!vsi->netdev) |
---|
4852 | 5645 | return; |
---|
4853 | 5646 | |
---|
4854 | | - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { |
---|
| 5647 | + ice_for_each_q_vector(vsi, q_idx) { |
---|
4855 | 5648 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
---|
4856 | 5649 | |
---|
4857 | 5650 | if (q_vector->rx.ring || q_vector->tx.ring) |
---|
.. | .. |
---|
4865 | 5658 | */ |
---|
4866 | 5659 | int ice_down(struct ice_vsi *vsi) |
---|
4867 | 5660 | { |
---|
4868 | | - int i, err; |
---|
| 5661 | + int i, tx_err, rx_err, link_err = 0; |
---|
4869 | 5662 | |
---|
4870 | 5663 | /* Caller of this function is expected to set the |
---|
4871 | 5664 | * vsi->state __ICE_DOWN bit |
---|
.. | .. |
---|
4876 | 5669 | } |
---|
4877 | 5670 | |
---|
4878 | 5671 | ice_vsi_dis_irq(vsi); |
---|
4879 | | - err = ice_vsi_stop_tx_rx_rings(vsi); |
---|
| 5672 | + |
---|
| 5673 | + tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); |
---|
| 5674 | + if (tx_err) |
---|
| 5675 | + netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", |
---|
| 5676 | + vsi->vsi_num, tx_err); |
---|
| 5677 | + if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { |
---|
| 5678 | + tx_err = ice_vsi_stop_xdp_tx_rings(vsi); |
---|
| 5679 | + if (tx_err) |
---|
| 5680 | + netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", |
---|
| 5681 | + vsi->vsi_num, tx_err); |
---|
| 5682 | + } |
---|
| 5683 | + |
---|
| 5684 | + rx_err = ice_vsi_stop_all_rx_rings(vsi); |
---|
| 5685 | + if (rx_err) |
---|
| 5686 | + netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", |
---|
| 5687 | + vsi->vsi_num, rx_err); |
---|
| 5688 | + |
---|
4880 | 5689 | ice_napi_disable_all(vsi); |
---|
| 5690 | + |
---|
| 5691 | + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { |
---|
| 5692 | + link_err = ice_force_phys_link_state(vsi, false); |
---|
| 5693 | + if (link_err) |
---|
| 5694 | + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", |
---|
| 5695 | + vsi->vsi_num, link_err); |
---|
| 5696 | + } |
---|
4881 | 5697 | |
---|
4882 | 5698 | ice_for_each_txq(vsi, i) |
---|
4883 | 5699 | ice_clean_tx_ring(vsi->tx_rings[i]); |
---|
.. | .. |
---|
4885 | 5701 | ice_for_each_rxq(vsi, i) |
---|
4886 | 5702 | ice_clean_rx_ring(vsi->rx_rings[i]); |
---|
4887 | 5703 | |
---|
4888 | | - if (err) |
---|
| 5704 | + if (tx_err || rx_err || link_err) { |
---|
4889 | 5705 | netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", |
---|
4890 | 5706 | vsi->vsi_num, vsi->vsw->sw_id); |
---|
4891 | | - return err; |
---|
| 5707 | + return -EIO; |
---|
| 5708 | + } |
---|
| 5709 | + |
---|
| 5710 | + return 0; |
---|
4892 | 5711 | } |
---|
4893 | 5712 | |
---|
4894 | 5713 | /** |
---|
.. | .. |
---|
4897 | 5716 | * |
---|
4898 | 5717 | * Return 0 on success, negative on failure |
---|
4899 | 5718 | */ |
---|
4900 | | -static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
---|
| 5719 | +int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
---|
4901 | 5720 | { |
---|
4902 | 5721 | int i, err = 0; |
---|
4903 | 5722 | |
---|
4904 | 5723 | if (!vsi->num_txq) { |
---|
4905 | | - dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", |
---|
| 5724 | + dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", |
---|
4906 | 5725 | vsi->vsi_num); |
---|
4907 | 5726 | return -EINVAL; |
---|
4908 | 5727 | } |
---|
4909 | 5728 | |
---|
4910 | 5729 | ice_for_each_txq(vsi, i) { |
---|
4911 | | - err = ice_setup_tx_ring(vsi->tx_rings[i]); |
---|
| 5730 | + struct ice_ring *ring = vsi->tx_rings[i]; |
---|
| 5731 | + |
---|
| 5732 | + if (!ring) |
---|
| 5733 | + return -EINVAL; |
---|
| 5734 | + |
---|
| 5735 | + ring->netdev = vsi->netdev; |
---|
| 5736 | + err = ice_setup_tx_ring(ring); |
---|
4912 | 5737 | if (err) |
---|
4913 | 5738 | break; |
---|
4914 | 5739 | } |
---|
.. | .. |
---|
4922 | 5747 | * |
---|
4923 | 5748 | * Return 0 on success, negative on failure |
---|
4924 | 5749 | */ |
---|
4925 | | -static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
---|
| 5750 | +int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
---|
4926 | 5751 | { |
---|
4927 | 5752 | int i, err = 0; |
---|
4928 | 5753 | |
---|
4929 | 5754 | if (!vsi->num_rxq) { |
---|
4930 | | - dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", |
---|
| 5755 | + dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", |
---|
4931 | 5756 | vsi->vsi_num); |
---|
4932 | 5757 | return -EINVAL; |
---|
4933 | 5758 | } |
---|
4934 | 5759 | |
---|
4935 | 5760 | ice_for_each_rxq(vsi, i) { |
---|
4936 | | - err = ice_setup_rx_ring(vsi->rx_rings[i]); |
---|
| 5761 | + struct ice_ring *ring = vsi->rx_rings[i]; |
---|
| 5762 | + |
---|
| 5763 | + if (!ring) |
---|
| 5764 | + return -EINVAL; |
---|
| 5765 | + |
---|
| 5766 | + ring->netdev = vsi->netdev; |
---|
| 5767 | + err = ice_setup_rx_ring(ring); |
---|
4937 | 5768 | if (err) |
---|
4938 | 5769 | break; |
---|
4939 | 5770 | } |
---|
.. | .. |
---|
4942 | 5773 | } |
---|
4943 | 5774 | |
---|
4944 | 5775 | /** |
---|
4945 | | - * ice_vsi_req_irq - Request IRQ from the OS |
---|
4946 | | - * @vsi: The VSI IRQ is being requested for |
---|
4947 | | - * @basename: name for the vector |
---|
| 5776 | + * ice_vsi_open_ctrl - open control VSI for use |
---|
| 5777 | + * @vsi: the VSI to open |
---|
4948 | 5778 | * |
---|
4949 | | - * Return 0 on success and a negative value on error |
---|
| 5779 | + * Initialization of the Control VSI |
---|
| 5780 | + * |
---|
| 5781 | + * Returns 0 on success, negative value on error |
---|
4950 | 5782 | */ |
---|
4951 | | -static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) |
---|
| 5783 | +int ice_vsi_open_ctrl(struct ice_vsi *vsi) |
---|
4952 | 5784 | { |
---|
| 5785 | + char int_name[ICE_INT_NAME_STR_LEN]; |
---|
4953 | 5786 | struct ice_pf *pf = vsi->back; |
---|
4954 | | - int err = -EINVAL; |
---|
| 5787 | + struct device *dev; |
---|
| 5788 | + int err; |
---|
4955 | 5789 | |
---|
4956 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
---|
4957 | | - err = ice_vsi_req_irq_msix(vsi, basename); |
---|
| 5790 | + dev = ice_pf_to_dev(pf); |
---|
| 5791 | + /* allocate descriptors */ |
---|
| 5792 | + err = ice_vsi_setup_tx_rings(vsi); |
---|
| 5793 | + if (err) |
---|
| 5794 | + goto err_setup_tx; |
---|
| 5795 | + |
---|
| 5796 | + err = ice_vsi_setup_rx_rings(vsi); |
---|
| 5797 | + if (err) |
---|
| 5798 | + goto err_setup_rx; |
---|
| 5799 | + |
---|
| 5800 | + err = ice_vsi_cfg(vsi); |
---|
| 5801 | + if (err) |
---|
| 5802 | + goto err_setup_rx; |
---|
| 5803 | + |
---|
| 5804 | + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", |
---|
| 5805 | + dev_driver_string(dev), dev_name(dev)); |
---|
| 5806 | + err = ice_vsi_req_irq_msix(vsi, int_name); |
---|
| 5807 | + if (err) |
---|
| 5808 | + goto err_setup_rx; |
---|
| 5809 | + |
---|
| 5810 | + ice_vsi_cfg_msix(vsi); |
---|
| 5811 | + |
---|
| 5812 | + err = ice_vsi_start_all_rx_rings(vsi); |
---|
| 5813 | + if (err) |
---|
| 5814 | + goto err_up_complete; |
---|
| 5815 | + |
---|
| 5816 | + clear_bit(__ICE_DOWN, vsi->state); |
---|
| 5817 | + ice_vsi_ena_irq(vsi); |
---|
| 5818 | + |
---|
| 5819 | + return 0; |
---|
| 5820 | + |
---|
| 5821 | +err_up_complete: |
---|
| 5822 | + ice_down(vsi); |
---|
| 5823 | +err_setup_rx: |
---|
| 5824 | + ice_vsi_free_rx_rings(vsi); |
---|
| 5825 | +err_setup_tx: |
---|
| 5826 | + ice_vsi_free_tx_rings(vsi); |
---|
4958 | 5827 | |
---|
4959 | 5828 | return err; |
---|
4960 | | -} |
---|
4961 | | - |
---|
4962 | | -/** |
---|
4963 | | - * ice_vsi_free_tx_rings - Free Tx resources for VSI queues |
---|
4964 | | - * @vsi: the VSI having resources freed |
---|
4965 | | - */ |
---|
4966 | | -static void ice_vsi_free_tx_rings(struct ice_vsi *vsi) |
---|
4967 | | -{ |
---|
4968 | | - int i; |
---|
4969 | | - |
---|
4970 | | - if (!vsi->tx_rings) |
---|
4971 | | - return; |
---|
4972 | | - |
---|
4973 | | - ice_for_each_txq(vsi, i) |
---|
4974 | | - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) |
---|
4975 | | - ice_free_tx_ring(vsi->tx_rings[i]); |
---|
4976 | | -} |
---|
4977 | | - |
---|
4978 | | -/** |
---|
4979 | | - * ice_vsi_free_rx_rings - Free Rx resources for VSI queues |
---|
4980 | | - * @vsi: the VSI having resources freed |
---|
4981 | | - */ |
---|
4982 | | -static void ice_vsi_free_rx_rings(struct ice_vsi *vsi) |
---|
4983 | | -{ |
---|
4984 | | - int i; |
---|
4985 | | - |
---|
4986 | | - if (!vsi->rx_rings) |
---|
4987 | | - return; |
---|
4988 | | - |
---|
4989 | | - ice_for_each_rxq(vsi, i) |
---|
4990 | | - if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) |
---|
4991 | | - ice_free_rx_ring(vsi->rx_rings[i]); |
---|
4992 | 5829 | } |
---|
4993 | 5830 | |
---|
4994 | 5831 | /** |
---|
.. | .. |
---|
5019 | 5856 | goto err_setup_rx; |
---|
5020 | 5857 | |
---|
5021 | 5858 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", |
---|
5022 | | - dev_driver_string(&pf->pdev->dev), vsi->netdev->name); |
---|
5023 | | - err = ice_vsi_req_irq(vsi, int_name); |
---|
| 5859 | + dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); |
---|
| 5860 | + err = ice_vsi_req_irq_msix(vsi, int_name); |
---|
5024 | 5861 | if (err) |
---|
5025 | 5862 | goto err_setup_rx; |
---|
5026 | 5863 | |
---|
.. | .. |
---|
5052 | 5889 | } |
---|
5053 | 5890 | |
---|
5054 | 5891 | /** |
---|
5055 | | - * ice_vsi_close - Shut down a VSI |
---|
5056 | | - * @vsi: the VSI being shut down |
---|
| 5892 | + * ice_vsi_release_all - Delete all VSIs |
---|
| 5893 | + * @pf: PF from which all VSIs are being removed |
---|
5057 | 5894 | */ |
---|
5058 | | -static void ice_vsi_close(struct ice_vsi *vsi) |
---|
| 5895 | +static void ice_vsi_release_all(struct ice_pf *pf) |
---|
5059 | 5896 | { |
---|
5060 | | - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) |
---|
5061 | | - ice_down(vsi); |
---|
| 5897 | + int err, i; |
---|
5062 | 5898 | |
---|
5063 | | - ice_vsi_free_irq(vsi); |
---|
5064 | | - ice_vsi_free_tx_rings(vsi); |
---|
5065 | | - ice_vsi_free_rx_rings(vsi); |
---|
5066 | | -} |
---|
| 5899 | + if (!pf->vsi) |
---|
| 5900 | + return; |
---|
5067 | 5901 | |
---|
5068 | | -/** |
---|
5069 | | - * ice_rss_clean - Delete RSS related VSI structures that hold user inputs |
---|
5070 | | - * @vsi: the VSI being removed |
---|
5071 | | - */ |
---|
5072 | | -static void ice_rss_clean(struct ice_vsi *vsi) |
---|
5073 | | -{ |
---|
5074 | | - struct ice_pf *pf; |
---|
| 5902 | + ice_for_each_vsi(pf, i) { |
---|
| 5903 | + if (!pf->vsi[i]) |
---|
| 5904 | + continue; |
---|
5075 | 5905 | |
---|
5076 | | - pf = vsi->back; |
---|
5077 | | - |
---|
5078 | | - if (vsi->rss_hkey_user) |
---|
5079 | | - devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); |
---|
5080 | | - if (vsi->rss_lut_user) |
---|
5081 | | - devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); |
---|
5082 | | -} |
---|
5083 | | - |
---|
5084 | | -/** |
---|
5085 | | - * ice_vsi_release - Delete a VSI and free its resources |
---|
5086 | | - * @vsi: the VSI being removed |
---|
5087 | | - * |
---|
5088 | | - * Returns 0 on success or < 0 on error |
---|
5089 | | - */ |
---|
5090 | | -static int ice_vsi_release(struct ice_vsi *vsi) |
---|
5091 | | -{ |
---|
5092 | | - struct ice_pf *pf; |
---|
5093 | | - |
---|
5094 | | - if (!vsi->back) |
---|
5095 | | - return -ENODEV; |
---|
5096 | | - pf = vsi->back; |
---|
5097 | | - |
---|
5098 | | - if (vsi->netdev) { |
---|
5099 | | - unregister_netdev(vsi->netdev); |
---|
5100 | | - free_netdev(vsi->netdev); |
---|
5101 | | - vsi->netdev = NULL; |
---|
| 5906 | + err = ice_vsi_release(pf->vsi[i]); |
---|
| 5907 | + if (err) |
---|
| 5908 | + dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", |
---|
| 5909 | + i, err, pf->vsi[i]->vsi_num); |
---|
5102 | 5910 | } |
---|
| 5911 | +} |
---|
5103 | 5912 | |
---|
5104 | | - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
---|
5105 | | - ice_rss_clean(vsi); |
---|
| 5913 | +/** |
---|
| 5914 | + * ice_vsi_rebuild_by_type - Rebuild VSI of a given type |
---|
| 5915 | + * @pf: pointer to the PF instance |
---|
| 5916 | + * @type: VSI type to rebuild |
---|
| 5917 | + * |
---|
| 5918 | + * Iterates through the pf->vsi array and rebuilds VSIs of the requested type |
---|
| 5919 | + */ |
---|
| 5920 | +static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) |
---|
| 5921 | +{ |
---|
| 5922 | + struct device *dev = ice_pf_to_dev(pf); |
---|
| 5923 | + enum ice_status status; |
---|
| 5924 | + int i, err; |
---|
5106 | 5925 | |
---|
5107 | | - /* Disable VSI and free resources */ |
---|
5108 | | - ice_vsi_dis_irq(vsi); |
---|
5109 | | - ice_vsi_close(vsi); |
---|
| 5926 | + ice_for_each_vsi(pf, i) { |
---|
| 5927 | + struct ice_vsi *vsi = pf->vsi[i]; |
---|
5110 | 5928 | |
---|
5111 | | - /* reclaim interrupt vectors back to PF */ |
---|
5112 | | - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); |
---|
5113 | | - pf->num_avail_msix += vsi->num_q_vectors; |
---|
| 5929 | + if (!vsi || vsi->type != type) |
---|
| 5930 | + continue; |
---|
5114 | 5931 | |
---|
5115 | | - ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num); |
---|
5116 | | - ice_vsi_delete(vsi); |
---|
5117 | | - ice_vsi_free_q_vectors(vsi); |
---|
5118 | | - ice_vsi_clear_rings(vsi); |
---|
| 5932 | + /* rebuild the VSI */ |
---|
| 5933 | + err = ice_vsi_rebuild(vsi, true); |
---|
| 5934 | + if (err) { |
---|
| 5935 | + dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", |
---|
| 5936 | + err, vsi->idx, ice_vsi_type_str(type)); |
---|
| 5937 | + return err; |
---|
| 5938 | + } |
---|
5119 | 5939 | |
---|
5120 | | - ice_vsi_put_qs(vsi); |
---|
5121 | | - pf->q_left_tx += vsi->alloc_txq; |
---|
5122 | | - pf->q_left_rx += vsi->alloc_rxq; |
---|
| 5940 | + /* replay filters for the VSI */ |
---|
| 5941 | + status = ice_replay_vsi(&pf->hw, vsi->idx); |
---|
| 5942 | + if (status) { |
---|
| 5943 | + dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", |
---|
| 5944 | + ice_stat_str(status), vsi->idx, |
---|
| 5945 | + ice_vsi_type_str(type)); |
---|
| 5946 | + return -EIO; |
---|
| 5947 | + } |
---|
5123 | 5948 | |
---|
5124 | | - ice_vsi_clear(vsi); |
---|
| 5949 | + /* Re-map HW VSI number, using VSI handle that has been |
---|
| 5950 | + * previously validated in ice_replay_vsi() call above |
---|
| 5951 | + */ |
---|
| 5952 | + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); |
---|
| 5953 | + |
---|
| 5954 | + /* enable the VSI */ |
---|
| 5955 | + err = ice_ena_vsi(vsi, false); |
---|
| 5956 | + if (err) { |
---|
| 5957 | + dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", |
---|
| 5958 | + err, vsi->idx, ice_vsi_type_str(type)); |
---|
| 5959 | + return err; |
---|
| 5960 | + } |
---|
| 5961 | + |
---|
| 5962 | + dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, |
---|
| 5963 | + ice_vsi_type_str(type)); |
---|
| 5964 | + } |
---|
5125 | 5965 | |
---|
5126 | 5966 | return 0; |
---|
5127 | 5967 | } |
---|
5128 | 5968 | |
---|
5129 | 5969 | /** |
---|
5130 | | - * ice_dis_vsi - pause a VSI |
---|
5131 | | - * @vsi: the VSI being paused |
---|
| 5970 | + * ice_update_pf_netdev_link - Update PF netdev link status |
---|
| 5971 | + * @pf: pointer to the PF instance |
---|
5132 | 5972 | */ |
---|
5133 | | -static void ice_dis_vsi(struct ice_vsi *vsi) |
---|
| 5973 | +static void ice_update_pf_netdev_link(struct ice_pf *pf) |
---|
5134 | 5974 | { |
---|
5135 | | - if (test_bit(__ICE_DOWN, vsi->state)) |
---|
5136 | | - return; |
---|
| 5975 | + bool link_up; |
---|
| 5976 | + int i; |
---|
5137 | 5977 | |
---|
5138 | | - set_bit(__ICE_NEEDS_RESTART, vsi->state); |
---|
| 5978 | + ice_for_each_vsi(pf, i) { |
---|
| 5979 | + struct ice_vsi *vsi = pf->vsi[i]; |
---|
5139 | 5980 | |
---|
5140 | | - if (vsi->netdev && netif_running(vsi->netdev) && |
---|
5141 | | - vsi->type == ICE_VSI_PF) |
---|
5142 | | - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); |
---|
| 5981 | + if (!vsi || vsi->type != ICE_VSI_PF) |
---|
| 5982 | + return; |
---|
5143 | 5983 | |
---|
5144 | | - ice_vsi_close(vsi); |
---|
5145 | | -} |
---|
5146 | | - |
---|
5147 | | -/** |
---|
5148 | | - * ice_ena_vsi - resume a VSI |
---|
5149 | | - * @vsi: the VSI being resume |
---|
5150 | | - */ |
---|
5151 | | -static void ice_ena_vsi(struct ice_vsi *vsi) |
---|
5152 | | -{ |
---|
5153 | | - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) |
---|
5154 | | - return; |
---|
5155 | | - |
---|
5156 | | - if (vsi->netdev && netif_running(vsi->netdev)) |
---|
5157 | | - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); |
---|
5158 | | - else if (ice_vsi_open(vsi)) |
---|
5159 | | - /* this clears the DOWN bit */ |
---|
5160 | | - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", |
---|
5161 | | - vsi->vsi_num, vsi->vsw->sw_id); |
---|
5162 | | -} |
---|
5163 | | - |
---|
5164 | | -/** |
---|
5165 | | - * ice_pf_dis_all_vsi - Pause all VSIs on a PF |
---|
5166 | | - * @pf: the PF |
---|
5167 | | - */ |
---|
5168 | | -static void ice_pf_dis_all_vsi(struct ice_pf *pf) |
---|
5169 | | -{ |
---|
5170 | | - int v; |
---|
5171 | | - |
---|
5172 | | - ice_for_each_vsi(pf, v) |
---|
5173 | | - if (pf->vsi[v]) |
---|
5174 | | - ice_dis_vsi(pf->vsi[v]); |
---|
5175 | | -} |
---|
5176 | | - |
---|
5177 | | -/** |
---|
5178 | | - * ice_pf_ena_all_vsi - Resume all VSIs on a PF |
---|
5179 | | - * @pf: the PF |
---|
5180 | | - */ |
---|
5181 | | -static void ice_pf_ena_all_vsi(struct ice_pf *pf) |
---|
5182 | | -{ |
---|
5183 | | - int v; |
---|
5184 | | - |
---|
5185 | | - ice_for_each_vsi(pf, v) |
---|
5186 | | - if (pf->vsi[v]) |
---|
5187 | | - ice_ena_vsi(pf->vsi[v]); |
---|
| 5984 | + ice_get_link_status(pf->vsi[i]->port_info, &link_up); |
---|
| 5985 | + if (link_up) { |
---|
| 5986 | + netif_carrier_on(pf->vsi[i]->netdev); |
---|
| 5987 | + netif_tx_wake_all_queues(pf->vsi[i]->netdev); |
---|
| 5988 | + } else { |
---|
| 5989 | + netif_carrier_off(pf->vsi[i]->netdev); |
---|
| 5990 | + netif_tx_stop_all_queues(pf->vsi[i]->netdev); |
---|
| 5991 | + } |
---|
| 5992 | + } |
---|
5188 | 5993 | } |
---|
5189 | 5994 | |
---|
5190 | 5995 | /** |
---|
5191 | 5996 | * ice_rebuild - rebuild after reset |
---|
5192 | | - * @pf: pf to rebuild |
---|
| 5997 | + * @pf: PF to rebuild |
---|
| 5998 | + * @reset_type: type of reset |
---|
| 5999 | + * |
---|
| 6000 | + * Do not rebuild VF VSI in this flow because that is already handled via |
---|
| 6001 | + * ice_reset_all_vfs(). This is because requirements for resetting a VF after a |
---|
| 6002 | + * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want |
---|
| 6003 | + * to reset/rebuild all the VF VSI twice. |
---|
5193 | 6004 | */ |
---|
5194 | | -static void ice_rebuild(struct ice_pf *pf) |
---|
| 6005 | +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) |
---|
5195 | 6006 | { |
---|
5196 | | - struct device *dev = &pf->pdev->dev; |
---|
| 6007 | + struct device *dev = ice_pf_to_dev(pf); |
---|
5197 | 6008 | struct ice_hw *hw = &pf->hw; |
---|
5198 | 6009 | enum ice_status ret; |
---|
5199 | 6010 | int err; |
---|
.. | .. |
---|
5201 | 6012 | if (test_bit(__ICE_DOWN, pf->state)) |
---|
5202 | 6013 | goto clear_recovery; |
---|
5203 | 6014 | |
---|
5204 | | - dev_dbg(dev, "rebuilding pf\n"); |
---|
| 6015 | + dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); |
---|
5205 | 6016 | |
---|
5206 | 6017 | ret = ice_init_all_ctrlq(hw); |
---|
5207 | 6018 | if (ret) { |
---|
5208 | | - dev_err(dev, "control queues init failed %d\n", ret); |
---|
5209 | | - goto fail_reset; |
---|
| 6019 | + dev_err(dev, "control queues init failed %s\n", |
---|
| 6020 | + ice_stat_str(ret)); |
---|
| 6021 | + goto err_init_ctrlq; |
---|
| 6022 | + } |
---|
| 6023 | + |
---|
| 6024 | + /* if DDP was previously loaded successfully */ |
---|
| 6025 | + if (!ice_is_safe_mode(pf)) { |
---|
| 6026 | + /* reload the SW DB of filter tables */ |
---|
| 6027 | + if (reset_type == ICE_RESET_PFR) |
---|
| 6028 | + ice_fill_blk_tbls(hw); |
---|
| 6029 | + else |
---|
| 6030 | + /* Reload DDP Package after CORER/GLOBR reset */ |
---|
| 6031 | + ice_load_pkg(NULL, pf); |
---|
5210 | 6032 | } |
---|
5211 | 6033 | |
---|
5212 | 6034 | ret = ice_clear_pf_cfg(hw); |
---|
5213 | 6035 | if (ret) { |
---|
5214 | | - dev_err(dev, "clear PF configuration failed %d\n", ret); |
---|
5215 | | - goto fail_reset; |
---|
| 6036 | + dev_err(dev, "clear PF configuration failed %s\n", |
---|
| 6037 | + ice_stat_str(ret)); |
---|
| 6038 | + goto err_init_ctrlq; |
---|
5216 | 6039 | } |
---|
| 6040 | + |
---|
| 6041 | + if (pf->first_sw->dflt_vsi_ena) |
---|
| 6042 | + dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); |
---|
| 6043 | + /* clear the default VSI configuration if it exists */ |
---|
| 6044 | + pf->first_sw->dflt_vsi = NULL; |
---|
| 6045 | + pf->first_sw->dflt_vsi_ena = false; |
---|
5217 | 6046 | |
---|
5218 | 6047 | ice_clear_pxe_mode(hw); |
---|
5219 | 6048 | |
---|
5220 | 6049 | ret = ice_get_caps(hw); |
---|
5221 | 6050 | if (ret) { |
---|
5222 | | - dev_err(dev, "ice_get_caps failed %d\n", ret); |
---|
5223 | | - goto fail_reset; |
---|
| 6051 | + dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); |
---|
| 6052 | + goto err_init_ctrlq; |
---|
5224 | 6053 | } |
---|
5225 | 6054 | |
---|
5226 | | - /* basic nic switch setup */ |
---|
5227 | | - err = ice_setup_pf_sw(pf); |
---|
5228 | | - if (err) { |
---|
5229 | | - dev_err(dev, "ice_setup_pf_sw failed\n"); |
---|
5230 | | - goto fail_reset; |
---|
| 6055 | + ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); |
---|
| 6056 | + if (ret) { |
---|
| 6057 | + dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); |
---|
| 6058 | + goto err_init_ctrlq; |
---|
5231 | 6059 | } |
---|
| 6060 | + |
---|
| 6061 | + err = ice_sched_init_port(hw->port_info); |
---|
| 6062 | + if (err) |
---|
| 6063 | + goto err_sched_init_port; |
---|
5232 | 6064 | |
---|
5233 | 6065 | /* start misc vector */ |
---|
5234 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
---|
5235 | | - err = ice_req_irq_msix_misc(pf); |
---|
5236 | | - if (err) { |
---|
5237 | | - dev_err(dev, "misc vector setup failed: %d\n", err); |
---|
5238 | | - goto fail_reset; |
---|
| 6066 | + err = ice_req_irq_msix_misc(pf); |
---|
| 6067 | + if (err) { |
---|
| 6068 | + dev_err(dev, "misc vector setup failed: %d\n", err); |
---|
| 6069 | + goto err_sched_init_port; |
---|
| 6070 | + } |
---|
| 6071 | + |
---|
| 6072 | + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
---|
| 6073 | + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); |
---|
| 6074 | + if (!rd32(hw, PFQF_FD_SIZE)) { |
---|
| 6075 | + u16 unused, guar, b_effort; |
---|
| 6076 | + |
---|
| 6077 | + guar = hw->func_caps.fd_fltr_guar; |
---|
| 6078 | + b_effort = hw->func_caps.fd_fltr_best_effort; |
---|
| 6079 | + |
---|
| 6080 | + /* force guaranteed filter pool for PF */ |
---|
| 6081 | + ice_alloc_fd_guar_item(hw, &unused, guar); |
---|
| 6082 | + /* force shared filter pool for PF */ |
---|
| 6083 | + ice_alloc_fd_shrd_item(hw, &unused, b_effort); |
---|
5239 | 6084 | } |
---|
5240 | 6085 | } |
---|
5241 | 6086 | |
---|
5242 | | - /* restart the VSIs that were rebuilt and running before the reset */ |
---|
5243 | | - ice_pf_ena_all_vsi(pf); |
---|
| 6087 | + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
---|
| 6088 | + ice_dcb_rebuild(pf); |
---|
5244 | 6089 | |
---|
| 6090 | + /* rebuild PF VSI */ |
---|
| 6091 | + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); |
---|
| 6092 | + if (err) { |
---|
| 6093 | + dev_err(dev, "PF VSI rebuild failed: %d\n", err); |
---|
| 6094 | + goto err_vsi_rebuild; |
---|
| 6095 | + } |
---|
| 6096 | + |
---|
| 6097 | + /* If Flow Director is active */ |
---|
| 6098 | + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
---|
| 6099 | + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); |
---|
| 6100 | + if (err) { |
---|
| 6101 | + dev_err(dev, "control VSI rebuild failed: %d\n", err); |
---|
| 6102 | + goto err_vsi_rebuild; |
---|
| 6103 | + } |
---|
| 6104 | + |
---|
| 6105 | + /* replay HW Flow Director recipes */ |
---|
| 6106 | + if (hw->fdir_prof) |
---|
| 6107 | + ice_fdir_replay_flows(hw); |
---|
| 6108 | + |
---|
| 6109 | + /* replay Flow Director filters */ |
---|
| 6110 | + ice_fdir_replay_fltrs(pf); |
---|
| 6111 | + |
---|
| 6112 | + ice_rebuild_arfs(pf); |
---|
| 6113 | + } |
---|
| 6114 | + |
---|
| 6115 | + ice_update_pf_netdev_link(pf); |
---|
| 6116 | + |
---|
| 6117 | + /* tell the firmware we are up */ |
---|
| 6118 | + ret = ice_send_version(pf); |
---|
| 6119 | + if (ret) { |
---|
| 6120 | + dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", |
---|
| 6121 | + ice_stat_str(ret)); |
---|
| 6122 | + goto err_vsi_rebuild; |
---|
| 6123 | + } |
---|
| 6124 | + |
---|
| 6125 | + ice_replay_post(hw); |
---|
| 6126 | + |
---|
| 6127 | + /* if we get here, reset flow is successful */ |
---|
| 6128 | + clear_bit(__ICE_RESET_FAILED, pf->state); |
---|
5245 | 6129 | return; |
---|
5246 | 6130 | |
---|
5247 | | -fail_reset: |
---|
| 6131 | +err_vsi_rebuild: |
---|
| 6132 | +err_sched_init_port: |
---|
| 6133 | + ice_sched_cleanup_all(hw); |
---|
| 6134 | +err_init_ctrlq: |
---|
5248 | 6135 | ice_shutdown_all_ctrlq(hw); |
---|
5249 | 6136 | set_bit(__ICE_RESET_FAILED, pf->state); |
---|
5250 | 6137 | clear_recovery: |
---|
5251 | | - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
---|
| 6138 | + /* set this bit in PF state to control service task scheduling */ |
---|
| 6139 | + set_bit(__ICE_NEEDS_RESTART, pf->state); |
---|
| 6140 | + dev_err(dev, "Rebuild failed, unload and reload driver\n"); |
---|
| 6141 | +} |
---|
| 6142 | + |
---|
| 6143 | +/** |
---|
| 6144 | + * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP |
---|
| 6145 | + * @vsi: Pointer to VSI structure |
---|
| 6146 | + */ |
---|
| 6147 | +static int ice_max_xdp_frame_size(struct ice_vsi *vsi) |
---|
| 6148 | +{ |
---|
| 6149 | + if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) |
---|
| 6150 | + return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; |
---|
| 6151 | + else |
---|
| 6152 | + return ICE_RXBUF_3072; |
---|
5252 | 6153 | } |
---|
5253 | 6154 | |
---|
5254 | 6155 | /** |
---|
.. | .. |
---|
5265 | 6166 | struct ice_pf *pf = vsi->back; |
---|
5266 | 6167 | u8 count = 0; |
---|
5267 | 6168 | |
---|
5268 | | - if (new_mtu == netdev->mtu) { |
---|
5269 | | - netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); |
---|
| 6169 | + if (new_mtu == (int)netdev->mtu) { |
---|
| 6170 | + netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); |
---|
5270 | 6171 | return 0; |
---|
5271 | 6172 | } |
---|
5272 | 6173 | |
---|
5273 | | - if (new_mtu < netdev->min_mtu) { |
---|
5274 | | - netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", |
---|
| 6174 | + if (ice_is_xdp_ena_vsi(vsi)) { |
---|
| 6175 | + int frame_size = ice_max_xdp_frame_size(vsi); |
---|
| 6176 | + |
---|
| 6177 | + if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { |
---|
| 6178 | + netdev_err(netdev, "max MTU for XDP usage is %d\n", |
---|
| 6179 | + frame_size - ICE_ETH_PKT_HDR_PAD); |
---|
| 6180 | + return -EINVAL; |
---|
| 6181 | + } |
---|
| 6182 | + } |
---|
| 6183 | + |
---|
| 6184 | + if (new_mtu < (int)netdev->min_mtu) { |
---|
| 6185 | + netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", |
---|
5275 | 6186 | netdev->min_mtu); |
---|
5276 | 6187 | return -EINVAL; |
---|
5277 | | - } else if (new_mtu > netdev->max_mtu) { |
---|
5278 | | - netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", |
---|
| 6188 | + } else if (new_mtu > (int)netdev->max_mtu) { |
---|
| 6189 | + netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", |
---|
5279 | 6190 | netdev->min_mtu); |
---|
5280 | 6191 | return -EINVAL; |
---|
5281 | 6192 | } |
---|
5282 | 6193 | /* if a reset is in progress, wait for some time for it to complete */ |
---|
5283 | 6194 | do { |
---|
5284 | | - if (ice_is_reset_recovery_pending(pf->state)) { |
---|
| 6195 | + if (ice_is_reset_in_progress(pf->state)) { |
---|
5285 | 6196 | count++; |
---|
5286 | 6197 | usleep_range(1000, 2000); |
---|
5287 | 6198 | } else { |
---|
.. | .. |
---|
5291 | 6202 | } while (count < 100); |
---|
5292 | 6203 | |
---|
5293 | 6204 | if (count == 100) { |
---|
5294 | | - netdev_err(netdev, "can't change mtu. Device is busy\n"); |
---|
| 6205 | + netdev_err(netdev, "can't change MTU. Device is busy\n"); |
---|
5295 | 6206 | return -EBUSY; |
---|
5296 | 6207 | } |
---|
5297 | 6208 | |
---|
5298 | | - netdev->mtu = new_mtu; |
---|
| 6209 | + netdev->mtu = (unsigned int)new_mtu; |
---|
5299 | 6210 | |
---|
5300 | 6211 | /* if VSI is up, bring it down and then back up */ |
---|
5301 | 6212 | if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { |
---|
.. | .. |
---|
5303 | 6214 | |
---|
5304 | 6215 | err = ice_down(vsi); |
---|
5305 | 6216 | if (err) { |
---|
5306 | | - netdev_err(netdev, "change mtu if_up err %d\n", err); |
---|
| 6217 | + netdev_err(netdev, "change MTU if_up err %d\n", err); |
---|
5307 | 6218 | return err; |
---|
5308 | 6219 | } |
---|
5309 | 6220 | |
---|
5310 | 6221 | err = ice_up(vsi); |
---|
5311 | 6222 | if (err) { |
---|
5312 | | - netdev_err(netdev, "change mtu if_up err %d\n", err); |
---|
| 6223 | + netdev_err(netdev, "change MTU if_up err %d\n", err); |
---|
5313 | 6224 | return err; |
---|
5314 | 6225 | } |
---|
5315 | 6226 | } |
---|
5316 | 6227 | |
---|
5317 | | - netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); |
---|
| 6228 | + netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); |
---|
5318 | 6229 | return 0; |
---|
| 6230 | +} |
---|
| 6231 | + |
---|
| 6232 | +/** |
---|
| 6233 | + * ice_aq_str - convert AQ err code to a string |
---|
| 6234 | + * @aq_err: the AQ error code to convert |
---|
| 6235 | + */ |
---|
| 6236 | +const char *ice_aq_str(enum ice_aq_err aq_err) |
---|
| 6237 | +{ |
---|
| 6238 | + switch (aq_err) { |
---|
| 6239 | + case ICE_AQ_RC_OK: |
---|
| 6240 | + return "OK"; |
---|
| 6241 | + case ICE_AQ_RC_EPERM: |
---|
| 6242 | + return "ICE_AQ_RC_EPERM"; |
---|
| 6243 | + case ICE_AQ_RC_ENOENT: |
---|
| 6244 | + return "ICE_AQ_RC_ENOENT"; |
---|
| 6245 | + case ICE_AQ_RC_ENOMEM: |
---|
| 6246 | + return "ICE_AQ_RC_ENOMEM"; |
---|
| 6247 | + case ICE_AQ_RC_EBUSY: |
---|
| 6248 | + return "ICE_AQ_RC_EBUSY"; |
---|
| 6249 | + case ICE_AQ_RC_EEXIST: |
---|
| 6250 | + return "ICE_AQ_RC_EEXIST"; |
---|
| 6251 | + case ICE_AQ_RC_EINVAL: |
---|
| 6252 | + return "ICE_AQ_RC_EINVAL"; |
---|
| 6253 | + case ICE_AQ_RC_ENOSPC: |
---|
| 6254 | + return "ICE_AQ_RC_ENOSPC"; |
---|
| 6255 | + case ICE_AQ_RC_ENOSYS: |
---|
| 6256 | + return "ICE_AQ_RC_ENOSYS"; |
---|
| 6257 | + case ICE_AQ_RC_EMODE: |
---|
| 6258 | + return "ICE_AQ_RC_EMODE"; |
---|
| 6259 | + case ICE_AQ_RC_ENOSEC: |
---|
| 6260 | + return "ICE_AQ_RC_ENOSEC"; |
---|
| 6261 | + case ICE_AQ_RC_EBADSIG: |
---|
| 6262 | + return "ICE_AQ_RC_EBADSIG"; |
---|
| 6263 | + case ICE_AQ_RC_ESVN: |
---|
| 6264 | + return "ICE_AQ_RC_ESVN"; |
---|
| 6265 | + case ICE_AQ_RC_EBADMAN: |
---|
| 6266 | + return "ICE_AQ_RC_EBADMAN"; |
---|
| 6267 | + case ICE_AQ_RC_EBADBUF: |
---|
| 6268 | + return "ICE_AQ_RC_EBADBUF"; |
---|
| 6269 | + } |
---|
| 6270 | + |
---|
| 6271 | + return "ICE_AQ_RC_UNKNOWN"; |
---|
| 6272 | +} |
---|
| 6273 | + |
---|
| 6274 | +/** |
---|
| 6275 | + * ice_stat_str - convert status err code to a string |
---|
| 6276 | + * @stat_err: the status error code to convert |
---|
| 6277 | + */ |
---|
| 6278 | +const char *ice_stat_str(enum ice_status stat_err) |
---|
| 6279 | +{ |
---|
| 6280 | + switch (stat_err) { |
---|
| 6281 | + case ICE_SUCCESS: |
---|
| 6282 | + return "OK"; |
---|
| 6283 | + case ICE_ERR_PARAM: |
---|
| 6284 | + return "ICE_ERR_PARAM"; |
---|
| 6285 | + case ICE_ERR_NOT_IMPL: |
---|
| 6286 | + return "ICE_ERR_NOT_IMPL"; |
---|
| 6287 | + case ICE_ERR_NOT_READY: |
---|
| 6288 | + return "ICE_ERR_NOT_READY"; |
---|
| 6289 | + case ICE_ERR_NOT_SUPPORTED: |
---|
| 6290 | + return "ICE_ERR_NOT_SUPPORTED"; |
---|
| 6291 | + case ICE_ERR_BAD_PTR: |
---|
| 6292 | + return "ICE_ERR_BAD_PTR"; |
---|
| 6293 | + case ICE_ERR_INVAL_SIZE: |
---|
| 6294 | + return "ICE_ERR_INVAL_SIZE"; |
---|
| 6295 | + case ICE_ERR_DEVICE_NOT_SUPPORTED: |
---|
| 6296 | + return "ICE_ERR_DEVICE_NOT_SUPPORTED"; |
---|
| 6297 | + case ICE_ERR_RESET_FAILED: |
---|
| 6298 | + return "ICE_ERR_RESET_FAILED"; |
---|
| 6299 | + case ICE_ERR_FW_API_VER: |
---|
| 6300 | + return "ICE_ERR_FW_API_VER"; |
---|
| 6301 | + case ICE_ERR_NO_MEMORY: |
---|
| 6302 | + return "ICE_ERR_NO_MEMORY"; |
---|
| 6303 | + case ICE_ERR_CFG: |
---|
| 6304 | + return "ICE_ERR_CFG"; |
---|
| 6305 | + case ICE_ERR_OUT_OF_RANGE: |
---|
| 6306 | + return "ICE_ERR_OUT_OF_RANGE"; |
---|
| 6307 | + case ICE_ERR_ALREADY_EXISTS: |
---|
| 6308 | + return "ICE_ERR_ALREADY_EXISTS"; |
---|
| 6309 | + case ICE_ERR_NVM_CHECKSUM: |
---|
| 6310 | + return "ICE_ERR_NVM_CHECKSUM"; |
---|
| 6311 | + case ICE_ERR_BUF_TOO_SHORT: |
---|
| 6312 | + return "ICE_ERR_BUF_TOO_SHORT"; |
---|
| 6313 | + case ICE_ERR_NVM_BLANK_MODE: |
---|
| 6314 | + return "ICE_ERR_NVM_BLANK_MODE"; |
---|
| 6315 | + case ICE_ERR_IN_USE: |
---|
| 6316 | + return "ICE_ERR_IN_USE"; |
---|
| 6317 | + case ICE_ERR_MAX_LIMIT: |
---|
| 6318 | + return "ICE_ERR_MAX_LIMIT"; |
---|
| 6319 | + case ICE_ERR_RESET_ONGOING: |
---|
| 6320 | + return "ICE_ERR_RESET_ONGOING"; |
---|
| 6321 | + case ICE_ERR_HW_TABLE: |
---|
| 6322 | + return "ICE_ERR_HW_TABLE"; |
---|
| 6323 | + case ICE_ERR_DOES_NOT_EXIST: |
---|
| 6324 | + return "ICE_ERR_DOES_NOT_EXIST"; |
---|
| 6325 | + case ICE_ERR_FW_DDP_MISMATCH: |
---|
| 6326 | + return "ICE_ERR_FW_DDP_MISMATCH"; |
---|
| 6327 | + case ICE_ERR_AQ_ERROR: |
---|
| 6328 | + return "ICE_ERR_AQ_ERROR"; |
---|
| 6329 | + case ICE_ERR_AQ_TIMEOUT: |
---|
| 6330 | + return "ICE_ERR_AQ_TIMEOUT"; |
---|
| 6331 | + case ICE_ERR_AQ_FULL: |
---|
| 6332 | + return "ICE_ERR_AQ_FULL"; |
---|
| 6333 | + case ICE_ERR_AQ_NO_WORK: |
---|
| 6334 | + return "ICE_ERR_AQ_NO_WORK"; |
---|
| 6335 | + case ICE_ERR_AQ_EMPTY: |
---|
| 6336 | + return "ICE_ERR_AQ_EMPTY"; |
---|
| 6337 | + case ICE_ERR_AQ_FW_CRITICAL: |
---|
| 6338 | + return "ICE_ERR_AQ_FW_CRITICAL"; |
---|
| 6339 | + } |
---|
| 6340 | + |
---|
| 6341 | + return "ICE_ERR_UNKNOWN"; |
---|
5319 | 6342 | } |
---|
5320 | 6343 | |
---|
5321 | 6344 | /** |
---|
.. | .. |
---|
5332 | 6355 | struct ice_pf *pf = vsi->back; |
---|
5333 | 6356 | struct ice_hw *hw = &pf->hw; |
---|
5334 | 6357 | enum ice_status status; |
---|
| 6358 | + struct device *dev; |
---|
5335 | 6359 | |
---|
| 6360 | + dev = ice_pf_to_dev(pf); |
---|
5336 | 6361 | if (seed) { |
---|
5337 | 6362 | struct ice_aqc_get_set_rss_keys *buf = |
---|
5338 | 6363 | (struct ice_aqc_get_set_rss_keys *)seed; |
---|
5339 | 6364 | |
---|
5340 | | - status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf); |
---|
| 6365 | + status = ice_aq_set_rss_key(hw, vsi->idx, buf); |
---|
5341 | 6366 | |
---|
5342 | 6367 | if (status) { |
---|
5343 | | - dev_err(&pf->pdev->dev, |
---|
5344 | | - "Cannot set RSS key, err %d aq_err %d\n", |
---|
5345 | | - status, hw->adminq.rq_last_status); |
---|
| 6368 | + dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n", |
---|
| 6369 | + ice_stat_str(status), |
---|
| 6370 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
5346 | 6371 | return -EIO; |
---|
5347 | 6372 | } |
---|
5348 | 6373 | } |
---|
5349 | 6374 | |
---|
5350 | 6375 | if (lut) { |
---|
5351 | | - status = ice_aq_set_rss_lut(hw, vsi->vsi_num, |
---|
5352 | | - vsi->rss_lut_type, lut, lut_size); |
---|
| 6376 | + status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, |
---|
| 6377 | + lut, lut_size); |
---|
5353 | 6378 | if (status) { |
---|
5354 | | - dev_err(&pf->pdev->dev, |
---|
5355 | | - "Cannot set RSS lut, err %d aq_err %d\n", |
---|
5356 | | - status, hw->adminq.rq_last_status); |
---|
| 6379 | + dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", |
---|
| 6380 | + ice_stat_str(status), |
---|
| 6381 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
5357 | 6382 | return -EIO; |
---|
5358 | 6383 | } |
---|
5359 | 6384 | } |
---|
.. | .. |
---|
5375 | 6400 | struct ice_pf *pf = vsi->back; |
---|
5376 | 6401 | struct ice_hw *hw = &pf->hw; |
---|
5377 | 6402 | enum ice_status status; |
---|
| 6403 | + struct device *dev; |
---|
5378 | 6404 | |
---|
| 6405 | + dev = ice_pf_to_dev(pf); |
---|
5379 | 6406 | if (seed) { |
---|
5380 | 6407 | struct ice_aqc_get_set_rss_keys *buf = |
---|
5381 | 6408 | (struct ice_aqc_get_set_rss_keys *)seed; |
---|
5382 | 6409 | |
---|
5383 | | - status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf); |
---|
| 6410 | + status = ice_aq_get_rss_key(hw, vsi->idx, buf); |
---|
5384 | 6411 | if (status) { |
---|
5385 | | - dev_err(&pf->pdev->dev, |
---|
5386 | | - "Cannot get RSS key, err %d aq_err %d\n", |
---|
5387 | | - status, hw->adminq.rq_last_status); |
---|
| 6412 | + dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n", |
---|
| 6413 | + ice_stat_str(status), |
---|
| 6414 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
5388 | 6415 | return -EIO; |
---|
5389 | 6416 | } |
---|
5390 | 6417 | } |
---|
5391 | 6418 | |
---|
5392 | 6419 | if (lut) { |
---|
5393 | | - status = ice_aq_get_rss_lut(hw, vsi->vsi_num, |
---|
5394 | | - vsi->rss_lut_type, lut, lut_size); |
---|
| 6420 | + status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, |
---|
| 6421 | + lut, lut_size); |
---|
5395 | 6422 | if (status) { |
---|
5396 | | - dev_err(&pf->pdev->dev, |
---|
5397 | | - "Cannot get RSS lut, err %d aq_err %d\n", |
---|
5398 | | - status, hw->adminq.rq_last_status); |
---|
| 6423 | + dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n", |
---|
| 6424 | + ice_stat_str(status), |
---|
| 6425 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
5399 | 6426 | return -EIO; |
---|
5400 | 6427 | } |
---|
5401 | 6428 | } |
---|
.. | .. |
---|
5404 | 6431 | } |
---|
5405 | 6432 | |
---|
5406 | 6433 | /** |
---|
| 6434 | + * ice_bridge_getlink - Get the hardware bridge mode |
---|
| 6435 | + * @skb: skb buff |
---|
| 6436 | + * @pid: process ID |
---|
| 6437 | + * @seq: RTNL message seq |
---|
| 6438 | + * @dev: the netdev being configured |
---|
| 6439 | + * @filter_mask: filter mask passed in |
---|
| 6440 | + * @nlflags: netlink flags passed in |
---|
| 6441 | + * |
---|
| 6442 | + * Return the bridge mode (VEB/VEPA) |
---|
| 6443 | + */ |
---|
| 6444 | +static int |
---|
| 6445 | +ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
---|
| 6446 | + struct net_device *dev, u32 filter_mask, int nlflags) |
---|
| 6447 | +{ |
---|
| 6448 | + struct ice_netdev_priv *np = netdev_priv(dev); |
---|
| 6449 | + struct ice_vsi *vsi = np->vsi; |
---|
| 6450 | + struct ice_pf *pf = vsi->back; |
---|
| 6451 | + u16 bmode; |
---|
| 6452 | + |
---|
| 6453 | + bmode = pf->first_sw->bridge_mode; |
---|
| 6454 | + |
---|
| 6455 | + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, |
---|
| 6456 | + filter_mask, NULL); |
---|
| 6457 | +} |
---|
| 6458 | + |
---|
| 6459 | +/** |
---|
| 6460 | + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) |
---|
| 6461 | + * @vsi: Pointer to VSI structure |
---|
| 6462 | + * @bmode: Hardware bridge mode (VEB/VEPA) |
---|
| 6463 | + * |
---|
| 6464 | + * Returns 0 on success, negative on failure |
---|
| 6465 | + */ |
---|
| 6466 | +static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) |
---|
| 6467 | +{ |
---|
| 6468 | + struct ice_aqc_vsi_props *vsi_props; |
---|
| 6469 | + struct ice_hw *hw = &vsi->back->hw; |
---|
| 6470 | + struct ice_vsi_ctx *ctxt; |
---|
| 6471 | + enum ice_status status; |
---|
| 6472 | + int ret = 0; |
---|
| 6473 | + |
---|
| 6474 | + vsi_props = &vsi->info; |
---|
| 6475 | + |
---|
| 6476 | + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
---|
| 6477 | + if (!ctxt) |
---|
| 6478 | + return -ENOMEM; |
---|
| 6479 | + |
---|
| 6480 | + ctxt->info = vsi->info; |
---|
| 6481 | + |
---|
| 6482 | + if (bmode == BRIDGE_MODE_VEB) |
---|
| 6483 | + /* change from VEPA to VEB mode */ |
---|
| 6484 | + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
---|
| 6485 | + else |
---|
| 6486 | + /* change from VEB to VEPA mode */ |
---|
| 6487 | + ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
---|
| 6488 | + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); |
---|
| 6489 | + |
---|
| 6490 | + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
---|
| 6491 | + if (status) { |
---|
| 6492 | + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", |
---|
| 6493 | + bmode, ice_stat_str(status), |
---|
| 6494 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
| 6495 | + ret = -EIO; |
---|
| 6496 | + goto out; |
---|
| 6497 | + } |
---|
| 6498 | + /* Update sw flags for book keeping */ |
---|
| 6499 | + vsi_props->sw_flags = ctxt->info.sw_flags; |
---|
| 6500 | + |
---|
| 6501 | +out: |
---|
| 6502 | + kfree(ctxt); |
---|
| 6503 | + return ret; |
---|
| 6504 | +} |
---|
| 6505 | + |
---|
| 6506 | +/** |
---|
| 6507 | + * ice_bridge_setlink - Set the hardware bridge mode |
---|
| 6508 | + * @dev: the netdev being configured |
---|
| 6509 | + * @nlh: RTNL message |
---|
| 6510 | + * @flags: bridge setlink flags |
---|
| 6511 | + * @extack: netlink extended ack |
---|
| 6512 | + * |
---|
| 6513 | + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is |
---|
| 6514 | + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if |
---|
| 6515 | + * not already set for all VSIs connected to this switch. And also update the |
---|
| 6516 | + * unicast switch filter rules for the corresponding switch of the netdev. |
---|
| 6517 | + */ |
---|
| 6518 | +static int |
---|
| 6519 | +ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
---|
| 6520 | + u16 __always_unused flags, |
---|
| 6521 | + struct netlink_ext_ack __always_unused *extack) |
---|
| 6522 | +{ |
---|
| 6523 | + struct ice_netdev_priv *np = netdev_priv(dev); |
---|
| 6524 | + struct ice_pf *pf = np->vsi->back; |
---|
| 6525 | + struct nlattr *attr, *br_spec; |
---|
| 6526 | + struct ice_hw *hw = &pf->hw; |
---|
| 6527 | + enum ice_status status; |
---|
| 6528 | + struct ice_sw *pf_sw; |
---|
| 6529 | + int rem, v, err = 0; |
---|
| 6530 | + |
---|
| 6531 | + pf_sw = pf->first_sw; |
---|
| 6532 | + /* find the attribute in the netlink message */ |
---|
| 6533 | + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
---|
| 6534 | + |
---|
| 6535 | + nla_for_each_nested(attr, br_spec, rem) { |
---|
| 6536 | + __u16 mode; |
---|
| 6537 | + |
---|
| 6538 | + if (nla_type(attr) != IFLA_BRIDGE_MODE) |
---|
| 6539 | + continue; |
---|
| 6540 | + mode = nla_get_u16(attr); |
---|
| 6541 | + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) |
---|
| 6542 | + return -EINVAL; |
---|
| 6543 | + /* Continue if bridge mode is not being flipped */ |
---|
| 6544 | + if (mode == pf_sw->bridge_mode) |
---|
| 6545 | + continue; |
---|
| 6546 | + /* Iterates through the PF VSI list and update the loopback |
---|
| 6547 | + * mode of the VSI |
---|
| 6548 | + */ |
---|
| 6549 | + ice_for_each_vsi(pf, v) { |
---|
| 6550 | + if (!pf->vsi[v]) |
---|
| 6551 | + continue; |
---|
| 6552 | + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); |
---|
| 6553 | + if (err) |
---|
| 6554 | + return err; |
---|
| 6555 | + } |
---|
| 6556 | + |
---|
| 6557 | + hw->evb_veb = (mode == BRIDGE_MODE_VEB); |
---|
| 6558 | + /* Update the unicast switch filter rules for the corresponding |
---|
| 6559 | + * switch of the netdev |
---|
| 6560 | + */ |
---|
| 6561 | + status = ice_update_sw_rule_bridge_mode(hw); |
---|
| 6562 | + if (status) { |
---|
| 6563 | + netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", |
---|
| 6564 | + mode, ice_stat_str(status), |
---|
| 6565 | + ice_aq_str(hw->adminq.sq_last_status)); |
---|
| 6566 | + /* revert hw->evb_veb */ |
---|
| 6567 | + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); |
---|
| 6568 | + return -EIO; |
---|
| 6569 | + } |
---|
| 6570 | + |
---|
| 6571 | + pf_sw->bridge_mode = mode; |
---|
| 6572 | + } |
---|
| 6573 | + |
---|
| 6574 | + return 0; |
---|
| 6575 | +} |
---|
| 6576 | + |
---|
| 6577 | +/** |
---|
| 6578 | + * ice_tx_timeout - Respond to a Tx Hang |
---|
| 6579 | + * @netdev: network interface device structure |
---|
| 6580 | + * @txqueue: Tx queue |
---|
| 6581 | + */ |
---|
| 6582 | +static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
---|
| 6583 | +{ |
---|
| 6584 | + struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
| 6585 | + struct ice_ring *tx_ring = NULL; |
---|
| 6586 | + struct ice_vsi *vsi = np->vsi; |
---|
| 6587 | + struct ice_pf *pf = vsi->back; |
---|
| 6588 | + u32 i; |
---|
| 6589 | + |
---|
| 6590 | + pf->tx_timeout_count++; |
---|
| 6591 | + |
---|
| 6592 | + /* Check if PFC is enabled for the TC to which the queue belongs |
---|
| 6593 | + * to. If yes then Tx timeout is not caused by a hung queue, no |
---|
| 6594 | + * need to reset and rebuild |
---|
| 6595 | + */ |
---|
| 6596 | + if (ice_is_pfc_causing_hung_q(pf, txqueue)) { |
---|
| 6597 | + dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", |
---|
| 6598 | + txqueue); |
---|
| 6599 | + return; |
---|
| 6600 | + } |
---|
| 6601 | + |
---|
| 6602 | + /* now that we have an index, find the tx_ring struct */ |
---|
| 6603 | + for (i = 0; i < vsi->num_txq; i++) |
---|
| 6604 | + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) |
---|
| 6605 | + if (txqueue == vsi->tx_rings[i]->q_index) { |
---|
| 6606 | + tx_ring = vsi->tx_rings[i]; |
---|
| 6607 | + break; |
---|
| 6608 | + } |
---|
| 6609 | + |
---|
| 6610 | + /* Reset recovery level if enough time has elapsed after last timeout. |
---|
| 6611 | + * Also ensure no new reset action happens before next timeout period. |
---|
| 6612 | + */ |
---|
| 6613 | + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) |
---|
| 6614 | + pf->tx_timeout_recovery_level = 1; |
---|
| 6615 | + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + |
---|
| 6616 | + netdev->watchdog_timeo))) |
---|
| 6617 | + return; |
---|
| 6618 | + |
---|
| 6619 | + if (tx_ring) { |
---|
| 6620 | + struct ice_hw *hw = &pf->hw; |
---|
| 6621 | + u32 head, val = 0; |
---|
| 6622 | + |
---|
| 6623 | + head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & |
---|
| 6624 | + QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; |
---|
| 6625 | + /* Read interrupt register */ |
---|
| 6626 | + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); |
---|
| 6627 | + |
---|
| 6628 | + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", |
---|
| 6629 | + vsi->vsi_num, txqueue, tx_ring->next_to_clean, |
---|
| 6630 | + head, tx_ring->next_to_use, val); |
---|
| 6631 | + } |
---|
| 6632 | + |
---|
| 6633 | + pf->tx_timeout_last_recovery = jiffies; |
---|
| 6634 | + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", |
---|
| 6635 | + pf->tx_timeout_recovery_level, txqueue); |
---|
| 6636 | + |
---|
| 6637 | + switch (pf->tx_timeout_recovery_level) { |
---|
| 6638 | + case 1: |
---|
| 6639 | + set_bit(__ICE_PFR_REQ, pf->state); |
---|
| 6640 | + break; |
---|
| 6641 | + case 2: |
---|
| 6642 | + set_bit(__ICE_CORER_REQ, pf->state); |
---|
| 6643 | + break; |
---|
| 6644 | + case 3: |
---|
| 6645 | + set_bit(__ICE_GLOBR_REQ, pf->state); |
---|
| 6646 | + break; |
---|
| 6647 | + default: |
---|
| 6648 | + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); |
---|
| 6649 | + set_bit(__ICE_DOWN, pf->state); |
---|
| 6650 | + set_bit(__ICE_NEEDS_RESTART, vsi->state); |
---|
| 6651 | + set_bit(__ICE_SERVICE_DIS, pf->state); |
---|
| 6652 | + break; |
---|
| 6653 | + } |
---|
| 6654 | + |
---|
| 6655 | + ice_service_task_schedule(pf); |
---|
| 6656 | + pf->tx_timeout_recovery_level++; |
---|
| 6657 | +} |
---|
| 6658 | + |
---|
| 6659 | +/** |
---|
5407 | 6660 | * ice_open - Called when a network interface becomes active |
---|
5408 | 6661 | * @netdev: network interface device structure |
---|
5409 | 6662 | * |
---|
5410 | 6663 | * The open entry point is called when a network interface is made |
---|
5411 | | - * active by the system (IFF_UP). At this point all resources needed |
---|
| 6664 | + * active by the system (IFF_UP). At this point all resources needed |
---|
5412 | 6665 | * for transmit and receive operations are allocated, the interrupt |
---|
5413 | 6666 | * handler is registered with the OS, the netdev watchdog is enabled, |
---|
5414 | 6667 | * and the stack is notified that the interface is ready. |
---|
5415 | 6668 | * |
---|
5416 | 6669 | * Returns 0 on success, negative value on failure |
---|
5417 | 6670 | */ |
---|
5418 | | -static int ice_open(struct net_device *netdev) |
---|
| 6671 | +int ice_open(struct net_device *netdev) |
---|
| 6672 | +{ |
---|
| 6673 | + struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
| 6674 | + struct ice_pf *pf = np->vsi->back; |
---|
| 6675 | + |
---|
| 6676 | + if (ice_is_reset_in_progress(pf->state)) { |
---|
| 6677 | + netdev_err(netdev, "can't open net device while reset is in progress"); |
---|
| 6678 | + return -EBUSY; |
---|
| 6679 | + } |
---|
| 6680 | + |
---|
| 6681 | + return ice_open_internal(netdev); |
---|
| 6682 | +} |
---|
| 6683 | + |
---|
| 6684 | +/** |
---|
| 6685 | + * ice_open_internal - Called when a network interface becomes active |
---|
| 6686 | + * @netdev: network interface device structure |
---|
| 6687 | + * |
---|
| 6688 | + * Internal ice_open implementation. Should not be used directly except for ice_open and reset |
---|
| 6689 | + * handling routine |
---|
| 6690 | + * |
---|
| 6691 | + * Returns 0 on success, negative value on failure |
---|
| 6692 | + */ |
---|
| 6693 | +int ice_open_internal(struct net_device *netdev) |
---|
5419 | 6694 | { |
---|
5420 | 6695 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
5421 | 6696 | struct ice_vsi *vsi = np->vsi; |
---|
| 6697 | + struct ice_pf *pf = vsi->back; |
---|
| 6698 | + struct ice_port_info *pi; |
---|
5422 | 6699 | int err; |
---|
| 6700 | + |
---|
| 6701 | + if (test_bit(__ICE_NEEDS_RESTART, pf->state)) { |
---|
| 6702 | + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); |
---|
| 6703 | + return -EIO; |
---|
| 6704 | + } |
---|
| 6705 | + |
---|
| 6706 | + if (test_bit(__ICE_DOWN, pf->state)) { |
---|
| 6707 | + netdev_err(netdev, "device is not ready yet\n"); |
---|
| 6708 | + return -EBUSY; |
---|
| 6709 | + } |
---|
5423 | 6710 | |
---|
5424 | 6711 | netif_carrier_off(netdev); |
---|
5425 | 6712 | |
---|
5426 | | - err = ice_vsi_open(vsi); |
---|
| 6713 | + pi = vsi->port_info; |
---|
| 6714 | + err = ice_update_link_info(pi); |
---|
| 6715 | + if (err) { |
---|
| 6716 | + netdev_err(netdev, "Failed to get link info, error %d\n", |
---|
| 6717 | + err); |
---|
| 6718 | + return err; |
---|
| 6719 | + } |
---|
5427 | 6720 | |
---|
| 6721 | + /* Set PHY if there is media, otherwise, turn off PHY */ |
---|
| 6722 | + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
---|
| 6723 | + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
---|
| 6724 | + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { |
---|
| 6725 | + err = ice_init_phy_user_cfg(pi); |
---|
| 6726 | + if (err) { |
---|
| 6727 | + netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", |
---|
| 6728 | + err); |
---|
| 6729 | + return err; |
---|
| 6730 | + } |
---|
| 6731 | + } |
---|
| 6732 | + |
---|
| 6733 | + err = ice_configure_phy(vsi); |
---|
| 6734 | + if (err) { |
---|
| 6735 | + netdev_err(netdev, "Failed to set physical link up, error %d\n", |
---|
| 6736 | + err); |
---|
| 6737 | + return err; |
---|
| 6738 | + } |
---|
| 6739 | + } else { |
---|
| 6740 | + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
---|
| 6741 | + err = ice_aq_set_link_restart_an(pi, false, NULL); |
---|
| 6742 | + if (err) { |
---|
| 6743 | + netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", |
---|
| 6744 | + vsi->vsi_num, err); |
---|
| 6745 | + return err; |
---|
| 6746 | + } |
---|
| 6747 | + } |
---|
| 6748 | + |
---|
| 6749 | + err = ice_vsi_open(vsi); |
---|
5428 | 6750 | if (err) |
---|
5429 | 6751 | netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", |
---|
5430 | 6752 | vsi->vsi_num, vsi->vsw->sw_id); |
---|
| 6753 | + |
---|
| 6754 | + /* Update existing tunnels information */ |
---|
| 6755 | + udp_tunnel_get_rx_info(netdev); |
---|
| 6756 | + |
---|
5431 | 6757 | return err; |
---|
5432 | 6758 | } |
---|
5433 | 6759 | |
---|
.. | .. |
---|
5436 | 6762 | * @netdev: network interface device structure |
---|
5437 | 6763 | * |
---|
5438 | 6764 | * The stop entry point is called when an interface is de-activated by the OS, |
---|
5439 | | - * and the netdevice enters the DOWN state. The hardware is still under the |
---|
| 6765 | + * and the netdevice enters the DOWN state. The hardware is still under the |
---|
5440 | 6766 | * driver's control, but the netdev interface is disabled. |
---|
5441 | 6767 | * |
---|
5442 | 6768 | * Returns success only - not allowed to fail |
---|
5443 | 6769 | */ |
---|
5444 | | -static int ice_stop(struct net_device *netdev) |
---|
| 6770 | +int ice_stop(struct net_device *netdev) |
---|
5445 | 6771 | { |
---|
5446 | 6772 | struct ice_netdev_priv *np = netdev_priv(netdev); |
---|
5447 | 6773 | struct ice_vsi *vsi = np->vsi; |
---|
| 6774 | + struct ice_pf *pf = vsi->back; |
---|
| 6775 | + |
---|
| 6776 | + if (ice_is_reset_in_progress(pf->state)) { |
---|
| 6777 | + netdev_err(netdev, "can't stop net device while reset is in progress"); |
---|
| 6778 | + return -EBUSY; |
---|
| 6779 | + } |
---|
5448 | 6780 | |
---|
5449 | 6781 | ice_vsi_close(vsi); |
---|
5450 | 6782 | |
---|
.. | .. |
---|
5462 | 6794 | struct net_device __always_unused *netdev, |
---|
5463 | 6795 | netdev_features_t features) |
---|
5464 | 6796 | { |
---|
| 6797 | + bool gso = skb_is_gso(skb); |
---|
5465 | 6798 | size_t len; |
---|
5466 | 6799 | |
---|
5467 | 6800 | /* No point in doing any of this if neither checksum nor GSO are |
---|
5468 | | - * being requested for this frame. We can rule out both by just |
---|
| 6801 | + * being requested for this frame. We can rule out both by just |
---|
5469 | 6802 | * checking for CHECKSUM_PARTIAL |
---|
5470 | 6803 | */ |
---|
5471 | 6804 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
---|
5472 | 6805 | return features; |
---|
5473 | 6806 | |
---|
5474 | 6807 | /* We cannot support GSO if the MSS is going to be less than |
---|
5475 | | - * 64 bytes. If it is then we need to drop support for GSO. |
---|
| 6808 | + * 64 bytes. If it is then we need to drop support for GSO. |
---|
5476 | 6809 | */ |
---|
5477 | | - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) |
---|
| 6810 | + if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) |
---|
5478 | 6811 | features &= ~NETIF_F_GSO_MASK; |
---|
5479 | 6812 | |
---|
5480 | | - len = skb_network_header(skb) - skb->data; |
---|
5481 | | - if (len & ~(ICE_TXD_MACLEN_MAX)) |
---|
| 6813 | + len = skb_network_offset(skb); |
---|
| 6814 | + if (len > ICE_TXD_MACLEN_MAX || len & 0x1) |
---|
5482 | 6815 | goto out_rm_features; |
---|
5483 | 6816 | |
---|
5484 | | - len = skb_transport_header(skb) - skb_network_header(skb); |
---|
5485 | | - if (len & ~(ICE_TXD_IPLEN_MAX)) |
---|
| 6817 | + len = skb_network_header_len(skb); |
---|
| 6818 | + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
---|
5486 | 6819 | goto out_rm_features; |
---|
5487 | 6820 | |
---|
5488 | 6821 | if (skb->encapsulation) { |
---|
5489 | | - len = skb_inner_network_header(skb) - skb_transport_header(skb); |
---|
5490 | | - if (len & ~(ICE_TXD_L4LEN_MAX)) |
---|
5491 | | - goto out_rm_features; |
---|
| 6822 | + /* this must work for VXLAN frames AND IPIP/SIT frames, and in |
---|
| 6823 | + * the case of IPIP frames, the transport header pointer is |
---|
| 6824 | + * after the inner header! So check to make sure that this |
---|
| 6825 | + * is a GRE or UDP_TUNNEL frame before doing that math. |
---|
| 6826 | + */ |
---|
| 6827 | + if (gso && (skb_shinfo(skb)->gso_type & |
---|
| 6828 | + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { |
---|
| 6829 | + len = skb_inner_network_header(skb) - |
---|
| 6830 | + skb_transport_header(skb); |
---|
| 6831 | + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) |
---|
| 6832 | + goto out_rm_features; |
---|
| 6833 | + } |
---|
5492 | 6834 | |
---|
5493 | | - len = skb_inner_transport_header(skb) - |
---|
5494 | | - skb_inner_network_header(skb); |
---|
5495 | | - if (len & ~(ICE_TXD_IPLEN_MAX)) |
---|
| 6835 | + len = skb_inner_network_header_len(skb); |
---|
| 6836 | + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
---|
5496 | 6837 | goto out_rm_features; |
---|
5497 | 6838 | } |
---|
5498 | 6839 | |
---|
.. | .. |
---|
5500 | 6841 | out_rm_features: |
---|
5501 | 6842 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
---|
5502 | 6843 | } |
---|
| 6844 | + |
---|
| 6845 | +static const struct net_device_ops ice_netdev_safe_mode_ops = { |
---|
| 6846 | + .ndo_open = ice_open, |
---|
| 6847 | + .ndo_stop = ice_stop, |
---|
| 6848 | + .ndo_start_xmit = ice_start_xmit, |
---|
| 6849 | + .ndo_set_mac_address = ice_set_mac_address, |
---|
| 6850 | + .ndo_validate_addr = eth_validate_addr, |
---|
| 6851 | + .ndo_change_mtu = ice_change_mtu, |
---|
| 6852 | + .ndo_get_stats64 = ice_get_stats64, |
---|
| 6853 | + .ndo_tx_timeout = ice_tx_timeout, |
---|
| 6854 | + .ndo_bpf = ice_xdp_safe_mode, |
---|
| 6855 | +}; |
---|
5503 | 6856 | |
---|
5504 | 6857 | static const struct net_device_ops ice_netdev_ops = { |
---|
5505 | 6858 | .ndo_open = ice_open, |
---|
.. | .. |
---|
5511 | 6864 | .ndo_validate_addr = eth_validate_addr, |
---|
5512 | 6865 | .ndo_change_mtu = ice_change_mtu, |
---|
5513 | 6866 | .ndo_get_stats64 = ice_get_stats64, |
---|
| 6867 | + .ndo_set_tx_maxrate = ice_set_tx_maxrate, |
---|
| 6868 | + .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, |
---|
| 6869 | + .ndo_set_vf_mac = ice_set_vf_mac, |
---|
| 6870 | + .ndo_get_vf_config = ice_get_vf_cfg, |
---|
| 6871 | + .ndo_set_vf_trust = ice_set_vf_trust, |
---|
| 6872 | + .ndo_set_vf_vlan = ice_set_vf_port_vlan, |
---|
| 6873 | + .ndo_set_vf_link_state = ice_set_vf_link_state, |
---|
| 6874 | + .ndo_get_vf_stats = ice_get_vf_stats, |
---|
5514 | 6875 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
---|
5515 | 6876 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, |
---|
5516 | 6877 | .ndo_set_features = ice_set_features, |
---|
| 6878 | + .ndo_bridge_getlink = ice_bridge_getlink, |
---|
| 6879 | + .ndo_bridge_setlink = ice_bridge_setlink, |
---|
5517 | 6880 | .ndo_fdb_add = ice_fdb_add, |
---|
5518 | 6881 | .ndo_fdb_del = ice_fdb_del, |
---|
| 6882 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 6883 | + .ndo_rx_flow_steer = ice_rx_flow_steer, |
---|
| 6884 | +#endif |
---|
| 6885 | + .ndo_tx_timeout = ice_tx_timeout, |
---|
| 6886 | + .ndo_bpf = ice_xdp, |
---|
| 6887 | + .ndo_xdp_xmit = ice_xdp_xmit, |
---|
| 6888 | + .ndo_xsk_wakeup = ice_xsk_wakeup, |
---|
| 6889 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 6890 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
5519 | 6891 | }; |
---|