| .. | .. |
|---|
| 5 | 5 | |
|---|
| 6 | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| 7 | 7 | |
|---|
| 8 | +#include <generated/utsrelease.h> |
|---|
| 8 | 9 | #include "ice.h" |
|---|
| 10 | +#include "ice_base.h" |
|---|
| 11 | +#include "ice_lib.h" |
|---|
| 12 | +#include "ice_fltr.h" |
|---|
| 13 | +#include "ice_dcb_lib.h" |
|---|
| 14 | +#include "ice_dcb_nl.h" |
|---|
| 15 | +#include "ice_devlink.h" |
|---|
| 9 | 16 | |
|---|
| 10 | | -#define DRV_VERSION "0.7.1-k" |
|---|
| 11 | 17 | #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" |
|---|
| 12 | | -const char ice_drv_ver[] = DRV_VERSION; |
|---|
| 13 | 18 | static const char ice_driver_string[] = DRV_SUMMARY; |
|---|
| 14 | 19 | static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; |
|---|
| 15 | 20 | |
|---|
| 21 | +/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ |
|---|
| 22 | +#define ICE_DDP_PKG_PATH "intel/ice/ddp/" |
|---|
| 23 | +#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" |
|---|
| 24 | + |
|---|
| 16 | 25 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
|---|
| 17 | 26 | MODULE_DESCRIPTION(DRV_SUMMARY); |
|---|
| 18 | | -MODULE_LICENSE("GPL"); |
|---|
| 19 | | -MODULE_VERSION(DRV_VERSION); |
|---|
| 27 | +MODULE_LICENSE("GPL v2"); |
|---|
| 28 | +MODULE_FIRMWARE(ICE_DDP_PKG_FILE); |
|---|
| 20 | 29 | |
|---|
| 21 | 30 | static int debug = -1; |
|---|
| 22 | 31 | module_param(debug, int, 0644); |
|---|
| .. | .. |
|---|
| 27 | 36 | #endif /* !CONFIG_DYNAMIC_DEBUG */ |
|---|
| 28 | 37 | |
|---|
| 29 | 38 | static struct workqueue_struct *ice_wq; |
|---|
| 39 | +static const struct net_device_ops ice_netdev_safe_mode_ops; |
|---|
| 30 | 40 | static const struct net_device_ops ice_netdev_ops; |
|---|
| 41 | +static int ice_vsi_open(struct ice_vsi *vsi); |
|---|
| 31 | 42 | |
|---|
| 32 | | -static void ice_pf_dis_all_vsi(struct ice_pf *pf); |
|---|
| 33 | | -static void ice_rebuild(struct ice_pf *pf); |
|---|
| 34 | | -static int ice_vsi_release(struct ice_vsi *vsi); |
|---|
| 35 | | -static void ice_update_vsi_stats(struct ice_vsi *vsi); |
|---|
| 36 | | -static void ice_update_pf_stats(struct ice_pf *pf); |
|---|
| 43 | +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); |
|---|
| 44 | + |
|---|
| 45 | +static void ice_vsi_release_all(struct ice_pf *pf); |
|---|
| 37 | 46 | |
|---|
| 38 | 47 | /** |
|---|
| 39 | | - * ice_get_free_slot - get the next non-NULL location index in array |
|---|
| 40 | | - * @array: array to search |
|---|
| 41 | | - * @size: size of the array |
|---|
| 42 | | - * @curr: last known occupied index to be used as a search hint |
|---|
| 43 | | - * |
|---|
| 44 | | - * void * is being used to keep the functionality generic. This lets us use this |
|---|
| 45 | | - * function on any array of pointers. |
|---|
| 48 | + * ice_get_tx_pending - returns number of Tx descriptors not processed |
|---|
| 49 | + * @ring: the ring of descriptors |
|---|
| 46 | 50 | */ |
|---|
| 47 | | -static int ice_get_free_slot(void *array, int size, int curr) |
|---|
| 51 | +static u16 ice_get_tx_pending(struct ice_ring *ring) |
|---|
| 48 | 52 | { |
|---|
| 49 | | - int **tmp_array = (int **)array; |
|---|
| 50 | | - int next; |
|---|
| 53 | + u16 head, tail; |
|---|
| 51 | 54 | |
|---|
| 52 | | - if (curr < (size - 1) && !tmp_array[curr + 1]) { |
|---|
| 53 | | - next = curr + 1; |
|---|
| 54 | | - } else { |
|---|
| 55 | | - int i = 0; |
|---|
| 55 | + head = ring->next_to_clean; |
|---|
| 56 | + tail = ring->next_to_use; |
|---|
| 56 | 57 | |
|---|
| 57 | | - while ((i < size) && (tmp_array[i])) |
|---|
| 58 | | - i++; |
|---|
| 59 | | - if (i == size) |
|---|
| 60 | | - next = ICE_NO_VSI; |
|---|
| 61 | | - else |
|---|
| 62 | | - next = i; |
|---|
| 63 | | - } |
|---|
| 64 | | - return next; |
|---|
| 65 | | -} |
|---|
| 66 | | - |
|---|
| 67 | | -/** |
|---|
| 68 | | - * ice_search_res - Search the tracker for a block of resources |
|---|
| 69 | | - * @res: pointer to the resource |
|---|
| 70 | | - * @needed: size of the block needed |
|---|
| 71 | | - * @id: identifier to track owner |
|---|
| 72 | | - * Returns the base item index of the block, or -ENOMEM for error |
|---|
| 73 | | - */ |
|---|
| 74 | | -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) |
|---|
| 75 | | -{ |
|---|
| 76 | | - int start = res->search_hint; |
|---|
| 77 | | - int end = start; |
|---|
| 78 | | - |
|---|
| 79 | | - id |= ICE_RES_VALID_BIT; |
|---|
| 80 | | - |
|---|
| 81 | | - do { |
|---|
| 82 | | - /* skip already allocated entries */ |
|---|
| 83 | | - if (res->list[end++] & ICE_RES_VALID_BIT) { |
|---|
| 84 | | - start = end; |
|---|
| 85 | | - if ((start + needed) > res->num_entries) |
|---|
| 86 | | - break; |
|---|
| 87 | | - } |
|---|
| 88 | | - |
|---|
| 89 | | - if (end == (start + needed)) { |
|---|
| 90 | | - int i = start; |
|---|
| 91 | | - |
|---|
| 92 | | - /* there was enough, so assign it to the requestor */ |
|---|
| 93 | | - while (i != end) |
|---|
| 94 | | - res->list[i++] = id; |
|---|
| 95 | | - |
|---|
| 96 | | - if (end == res->num_entries) |
|---|
| 97 | | - end = 0; |
|---|
| 98 | | - |
|---|
| 99 | | - res->search_hint = end; |
|---|
| 100 | | - return start; |
|---|
| 101 | | - } |
|---|
| 102 | | - } while (1); |
|---|
| 103 | | - |
|---|
| 104 | | - return -ENOMEM; |
|---|
| 105 | | -} |
|---|
| 106 | | - |
|---|
| 107 | | -/** |
|---|
| 108 | | - * ice_get_res - get a block of resources |
|---|
| 109 | | - * @pf: board private structure |
|---|
| 110 | | - * @res: pointer to the resource |
|---|
| 111 | | - * @needed: size of the block needed |
|---|
| 112 | | - * @id: identifier to track owner |
|---|
| 113 | | - * |
|---|
| 114 | | - * Returns the base item index of the block, or -ENOMEM for error |
|---|
| 115 | | - * The search_hint trick and lack of advanced fit-finding only works |
|---|
| 116 | | - * because we're highly likely to have all the same sized requests. |
|---|
| 117 | | - * Linear search time and any fragmentation should be minimal. |
|---|
| 118 | | - */ |
|---|
| 119 | | -static int |
|---|
| 120 | | -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) |
|---|
| 121 | | -{ |
|---|
| 122 | | - int ret; |
|---|
| 123 | | - |
|---|
| 124 | | - if (!res || !pf) |
|---|
| 125 | | - return -EINVAL; |
|---|
| 126 | | - |
|---|
| 127 | | - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { |
|---|
| 128 | | - dev_err(&pf->pdev->dev, |
|---|
| 129 | | - "param err: needed=%d, num_entries = %d id=0x%04x\n", |
|---|
| 130 | | - needed, res->num_entries, id); |
|---|
| 131 | | - return -EINVAL; |
|---|
| 132 | | - } |
|---|
| 133 | | - |
|---|
| 134 | | - /* search based on search_hint */ |
|---|
| 135 | | - ret = ice_search_res(res, needed, id); |
|---|
| 136 | | - |
|---|
| 137 | | - if (ret < 0) { |
|---|
| 138 | | - /* previous search failed. Reset search hint and try again */ |
|---|
| 139 | | - res->search_hint = 0; |
|---|
| 140 | | - ret = ice_search_res(res, needed, id); |
|---|
| 141 | | - } |
|---|
| 142 | | - |
|---|
| 143 | | - return ret; |
|---|
| 144 | | -} |
|---|
| 145 | | - |
|---|
| 146 | | -/** |
|---|
| 147 | | - * ice_free_res - free a block of resources |
|---|
| 148 | | - * @res: pointer to the resource |
|---|
| 149 | | - * @index: starting index previously returned by ice_get_res |
|---|
| 150 | | - * @id: identifier to track owner |
|---|
| 151 | | - * Returns number of resources freed |
|---|
| 152 | | - */ |
|---|
| 153 | | -static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) |
|---|
| 154 | | -{ |
|---|
| 155 | | - int count = 0; |
|---|
| 156 | | - int i; |
|---|
| 157 | | - |
|---|
| 158 | | - if (!res || index >= res->num_entries) |
|---|
| 159 | | - return -EINVAL; |
|---|
| 160 | | - |
|---|
| 161 | | - id |= ICE_RES_VALID_BIT; |
|---|
| 162 | | - for (i = index; i < res->num_entries && res->list[i] == id; i++) { |
|---|
| 163 | | - res->list[i] = 0; |
|---|
| 164 | | - count++; |
|---|
| 165 | | - } |
|---|
| 166 | | - |
|---|
| 167 | | - return count; |
|---|
| 168 | | -} |
|---|
| 169 | | - |
|---|
| 170 | | -/** |
|---|
| 171 | | - * ice_add_mac_to_list - Add a mac address filter entry to the list |
|---|
| 172 | | - * @vsi: the VSI to be forwarded to |
|---|
| 173 | | - * @add_list: pointer to the list which contains MAC filter entries |
|---|
| 174 | | - * @macaddr: the MAC address to be added. |
|---|
| 175 | | - * |
|---|
| 176 | | - * Adds mac address filter entry to the temp list |
|---|
| 177 | | - * |
|---|
| 178 | | - * Returns 0 on success or ENOMEM on failure. |
|---|
| 179 | | - */ |
|---|
| 180 | | -static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, |
|---|
| 181 | | - const u8 *macaddr) |
|---|
| 182 | | -{ |
|---|
| 183 | | - struct ice_fltr_list_entry *tmp; |
|---|
| 184 | | - struct ice_pf *pf = vsi->back; |
|---|
| 185 | | - |
|---|
| 186 | | - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); |
|---|
| 187 | | - if (!tmp) |
|---|
| 188 | | - return -ENOMEM; |
|---|
| 189 | | - |
|---|
| 190 | | - tmp->fltr_info.flag = ICE_FLTR_TX; |
|---|
| 191 | | - tmp->fltr_info.src = vsi->vsi_num; |
|---|
| 192 | | - tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; |
|---|
| 193 | | - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; |
|---|
| 194 | | - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; |
|---|
| 195 | | - ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); |
|---|
| 196 | | - |
|---|
| 197 | | - INIT_LIST_HEAD(&tmp->list_entry); |
|---|
| 198 | | - list_add(&tmp->list_entry, add_list); |
|---|
| 199 | | - |
|---|
| 58 | + if (head != tail) |
|---|
| 59 | + return (head < tail) ? |
|---|
| 60 | + tail - head : (tail + ring->count - head); |
|---|
| 200 | 61 | return 0; |
|---|
| 201 | 62 | } |
|---|
| 202 | 63 | |
|---|
| 203 | 64 | /** |
|---|
| 204 | | - * ice_add_mac_to_sync_list - creates list of mac addresses to be synced |
|---|
| 65 | + * ice_check_for_hang_subtask - check for and recover hung queues |
|---|
| 66 | + * @pf: pointer to PF struct |
|---|
| 67 | + */ |
|---|
| 68 | +static void ice_check_for_hang_subtask(struct ice_pf *pf) |
|---|
| 69 | +{ |
|---|
| 70 | + struct ice_vsi *vsi = NULL; |
|---|
| 71 | + struct ice_hw *hw; |
|---|
| 72 | + unsigned int i; |
|---|
| 73 | + int packets; |
|---|
| 74 | + u32 v; |
|---|
| 75 | + |
|---|
| 76 | + ice_for_each_vsi(pf, v) |
|---|
| 77 | + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { |
|---|
| 78 | + vsi = pf->vsi[v]; |
|---|
| 79 | + break; |
|---|
| 80 | + } |
|---|
| 81 | + |
|---|
| 82 | + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) |
|---|
| 83 | + return; |
|---|
| 84 | + |
|---|
| 85 | + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) |
|---|
| 86 | + return; |
|---|
| 87 | + |
|---|
| 88 | + hw = &vsi->back->hw; |
|---|
| 89 | + |
|---|
| 90 | + for (i = 0; i < vsi->num_txq; i++) { |
|---|
| 91 | + struct ice_ring *tx_ring = vsi->tx_rings[i]; |
|---|
| 92 | + |
|---|
| 93 | + if (tx_ring && tx_ring->desc) { |
|---|
| 94 | + /* If packet counter has not changed the queue is |
|---|
| 95 | + * likely stalled, so force an interrupt for this |
|---|
| 96 | + * queue. |
|---|
| 97 | + * |
|---|
| 98 | + * prev_pkt would be negative if there was no |
|---|
| 99 | + * pending work. |
|---|
| 100 | + */ |
|---|
| 101 | + packets = tx_ring->stats.pkts & INT_MAX; |
|---|
| 102 | + if (tx_ring->tx_stats.prev_pkt == packets) { |
|---|
| 103 | + /* Trigger sw interrupt to revive the queue */ |
|---|
| 104 | + ice_trigger_sw_intr(hw, tx_ring->q_vector); |
|---|
| 105 | + continue; |
|---|
| 106 | + } |
|---|
| 107 | + |
|---|
| 108 | + /* Memory barrier between read of packet count and call |
|---|
| 109 | + * to ice_get_tx_pending() |
|---|
| 110 | + */ |
|---|
| 111 | + smp_rmb(); |
|---|
| 112 | + tx_ring->tx_stats.prev_pkt = |
|---|
| 113 | + ice_get_tx_pending(tx_ring) ? packets : -1; |
|---|
| 114 | + } |
|---|
| 115 | + } |
|---|
| 116 | +} |
|---|
| 117 | + |
|---|
| 118 | +/** |
|---|
| 119 | + * ice_init_mac_fltr - Set initial MAC filters |
|---|
| 120 | + * @pf: board private structure |
|---|
| 121 | + * |
|---|
| 122 | + * Set initial set of MAC filters for PF VSI; configure filters for permanent |
|---|
| 123 | + * address and broadcast address. If an error is encountered, netdevice will be |
|---|
| 124 | + * unregistered. |
|---|
| 125 | + */ |
|---|
| 126 | +static int ice_init_mac_fltr(struct ice_pf *pf) |
|---|
| 127 | +{ |
|---|
| 128 | + enum ice_status status; |
|---|
| 129 | + struct ice_vsi *vsi; |
|---|
| 130 | + u8 *perm_addr; |
|---|
| 131 | + |
|---|
| 132 | + vsi = ice_get_main_vsi(pf); |
|---|
| 133 | + if (!vsi) |
|---|
| 134 | + return -EINVAL; |
|---|
| 135 | + |
|---|
| 136 | + perm_addr = vsi->port_info->mac.perm_addr; |
|---|
| 137 | + status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); |
|---|
| 138 | + if (!status) |
|---|
| 139 | + return 0; |
|---|
| 140 | + |
|---|
| 141 | + /* We aren't useful with no MAC filters, so unregister if we |
|---|
| 142 | + * had an error |
|---|
| 143 | + */ |
|---|
| 144 | + if (vsi->netdev->reg_state == NETREG_REGISTERED) { |
|---|
| 145 | + dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n", |
|---|
| 146 | + ice_stat_str(status)); |
|---|
| 147 | + unregister_netdev(vsi->netdev); |
|---|
| 148 | + free_netdev(vsi->netdev); |
|---|
| 149 | + vsi->netdev = NULL; |
|---|
| 150 | + } |
|---|
| 151 | + |
|---|
| 152 | + return -EIO; |
|---|
| 153 | +} |
|---|
| 154 | + |
|---|
| 155 | +/** |
|---|
| 156 | + * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced |
|---|
| 205 | 157 | * @netdev: the net device on which the sync is happening |
|---|
| 206 | | - * @addr: mac address to sync |
|---|
| 158 | + * @addr: MAC address to sync |
|---|
| 207 | 159 | * |
|---|
| 208 | 160 | * This is a callback function which is called by the in kernel device sync |
|---|
| 209 | 161 | * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only |
|---|
| 210 | 162 | * populates the tmp_sync_list, which is later used by ice_add_mac to add the |
|---|
| 211 | | - * mac filters from the hardware. |
|---|
| 163 | + * MAC filters from the hardware. |
|---|
| 212 | 164 | */ |
|---|
| 213 | 165 | static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) |
|---|
| 214 | 166 | { |
|---|
| 215 | 167 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 216 | 168 | struct ice_vsi *vsi = np->vsi; |
|---|
| 217 | 169 | |
|---|
| 218 | | - if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) |
|---|
| 170 | + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, |
|---|
| 171 | + ICE_FWD_TO_VSI)) |
|---|
| 219 | 172 | return -EINVAL; |
|---|
| 220 | 173 | |
|---|
| 221 | 174 | return 0; |
|---|
| 222 | 175 | } |
|---|
| 223 | 176 | |
|---|
| 224 | 177 | /** |
|---|
| 225 | | - * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced |
|---|
| 178 | + * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced |
|---|
| 226 | 179 | * @netdev: the net device on which the unsync is happening |
|---|
| 227 | | - * @addr: mac address to unsync |
|---|
| 180 | + * @addr: MAC address to unsync |
|---|
| 228 | 181 | * |
|---|
| 229 | 182 | * This is a callback function which is called by the in kernel device unsync |
|---|
| 230 | 183 | * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only |
|---|
| 231 | 184 | * populates the tmp_unsync_list, which is later used by ice_remove_mac to |
|---|
| 232 | | - * delete the mac filters from the hardware. |
|---|
| 185 | + * delete the MAC filters from the hardware. |
|---|
| 233 | 186 | */ |
|---|
| 234 | 187 | static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) |
|---|
| 235 | 188 | { |
|---|
| 236 | 189 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 237 | 190 | struct ice_vsi *vsi = np->vsi; |
|---|
| 238 | 191 | |
|---|
| 239 | | - if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) |
|---|
| 192 | + /* Under some circumstances, we might receive a request to delete our |
|---|
| 193 | + * own device address from our uc list. Because we store the device |
|---|
| 194 | + * address in the VSI's MAC filter list, we need to ignore such |
|---|
| 195 | + * requests and not delete our device address from this list. |
|---|
| 196 | + */ |
|---|
| 197 | + if (ether_addr_equal(addr, netdev->dev_addr)) |
|---|
| 198 | + return 0; |
|---|
| 199 | + |
|---|
| 200 | + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, |
|---|
| 201 | + ICE_FWD_TO_VSI)) |
|---|
| 240 | 202 | return -EINVAL; |
|---|
| 241 | 203 | |
|---|
| 242 | 204 | return 0; |
|---|
| 243 | | -} |
|---|
| 244 | | - |
|---|
| 245 | | -/** |
|---|
| 246 | | - * ice_free_fltr_list - free filter lists helper |
|---|
| 247 | | - * @dev: pointer to the device struct |
|---|
| 248 | | - * @h: pointer to the list head to be freed |
|---|
| 249 | | - * |
|---|
| 250 | | - * Helper function to free filter lists previously created using |
|---|
| 251 | | - * ice_add_mac_to_list |
|---|
| 252 | | - */ |
|---|
| 253 | | -static void ice_free_fltr_list(struct device *dev, struct list_head *h) |
|---|
| 254 | | -{ |
|---|
| 255 | | - struct ice_fltr_list_entry *e, *tmp; |
|---|
| 256 | | - |
|---|
| 257 | | - list_for_each_entry_safe(e, tmp, h, list_entry) { |
|---|
| 258 | | - list_del(&e->list_entry); |
|---|
| 259 | | - devm_kfree(dev, e); |
|---|
| 260 | | - } |
|---|
| 261 | 205 | } |
|---|
| 262 | 206 | |
|---|
| 263 | 207 | /** |
|---|
| .. | .. |
|---|
| 274 | 218 | } |
|---|
| 275 | 219 | |
|---|
| 276 | 220 | /** |
|---|
| 221 | + * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF |
|---|
| 222 | + * @vsi: the VSI being configured |
|---|
| 223 | + * @promisc_m: mask of promiscuous config bits |
|---|
| 224 | + * @set_promisc: enable or disable promisc flag request |
|---|
| 225 | + * |
|---|
| 226 | + */ |
|---|
| 227 | +static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) |
|---|
| 228 | +{ |
|---|
| 229 | + struct ice_hw *hw = &vsi->back->hw; |
|---|
| 230 | + enum ice_status status = 0; |
|---|
| 231 | + |
|---|
| 232 | + if (vsi->type != ICE_VSI_PF) |
|---|
| 233 | + return 0; |
|---|
| 234 | + |
|---|
| 235 | + if (vsi->vlan_ena) { |
|---|
| 236 | + status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, |
|---|
| 237 | + set_promisc); |
|---|
| 238 | + } else { |
|---|
| 239 | + if (set_promisc) |
|---|
| 240 | + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, |
|---|
| 241 | + 0); |
|---|
| 242 | + else |
|---|
| 243 | + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, |
|---|
| 244 | + 0); |
|---|
| 245 | + } |
|---|
| 246 | + |
|---|
| 247 | + if (status) |
|---|
| 248 | + return -EIO; |
|---|
| 249 | + |
|---|
| 250 | + return 0; |
|---|
| 251 | +} |
|---|
| 252 | + |
|---|
| 253 | +/** |
|---|
| 277 | 254 | * ice_vsi_sync_fltr - Update the VSI filter list to the HW |
|---|
| 278 | 255 | * @vsi: ptr to the VSI |
|---|
| 279 | 256 | * |
|---|
| .. | .. |
|---|
| 281 | 258 | */ |
|---|
| 282 | 259 | static int ice_vsi_sync_fltr(struct ice_vsi *vsi) |
|---|
| 283 | 260 | { |
|---|
| 284 | | - struct device *dev = &vsi->back->pdev->dev; |
|---|
| 261 | + struct device *dev = ice_pf_to_dev(vsi->back); |
|---|
| 285 | 262 | struct net_device *netdev = vsi->netdev; |
|---|
| 286 | 263 | bool promisc_forced_on = false; |
|---|
| 287 | 264 | struct ice_pf *pf = vsi->back; |
|---|
| 288 | 265 | struct ice_hw *hw = &pf->hw; |
|---|
| 289 | 266 | enum ice_status status = 0; |
|---|
| 290 | 267 | u32 changed_flags = 0; |
|---|
| 268 | + u8 promisc_m; |
|---|
| 291 | 269 | int err = 0; |
|---|
| 292 | 270 | |
|---|
| 293 | 271 | if (!vsi->netdev) |
|---|
| .. | .. |
|---|
| 317 | 295 | netif_addr_unlock_bh(netdev); |
|---|
| 318 | 296 | } |
|---|
| 319 | 297 | |
|---|
| 320 | | - /* Remove mac addresses in the unsync list */ |
|---|
| 321 | | - status = ice_remove_mac(hw, &vsi->tmp_unsync_list); |
|---|
| 322 | | - ice_free_fltr_list(dev, &vsi->tmp_unsync_list); |
|---|
| 298 | + /* Remove MAC addresses in the unsync list */ |
|---|
| 299 | + status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); |
|---|
| 300 | + ice_fltr_free_list(dev, &vsi->tmp_unsync_list); |
|---|
| 323 | 301 | if (status) { |
|---|
| 324 | 302 | netdev_err(netdev, "Failed to delete MAC filters\n"); |
|---|
| 325 | 303 | /* if we failed because of alloc failures, just bail */ |
|---|
| .. | .. |
|---|
| 329 | 307 | } |
|---|
| 330 | 308 | } |
|---|
| 331 | 309 | |
|---|
| 332 | | - /* Add mac addresses in the sync list */ |
|---|
| 333 | | - status = ice_add_mac(hw, &vsi->tmp_sync_list); |
|---|
| 334 | | - ice_free_fltr_list(dev, &vsi->tmp_sync_list); |
|---|
| 335 | | - if (status) { |
|---|
| 310 | + /* Add MAC addresses in the sync list */ |
|---|
| 311 | + status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); |
|---|
| 312 | + ice_fltr_free_list(dev, &vsi->tmp_sync_list); |
|---|
| 313 | + /* If filter is added successfully or already exists, do not go into |
|---|
| 314 | + * 'if' condition and report it as error. Instead continue processing |
|---|
| 315 | + * rest of the function. |
|---|
| 316 | + */ |
|---|
| 317 | + if (status && status != ICE_ERR_ALREADY_EXISTS) { |
|---|
| 336 | 318 | netdev_err(netdev, "Failed to add MAC filters\n"); |
|---|
| 337 | | - /* If there is no more space for new umac filters, vsi |
|---|
| 319 | + /* If there is no more space for new umac filters, VSI |
|---|
| 338 | 320 | * should go into promiscuous mode. There should be some |
|---|
| 339 | 321 | * space reserved for promiscuous filters. |
|---|
| 340 | 322 | */ |
|---|
| .. | .. |
|---|
| 342 | 324 | !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, |
|---|
| 343 | 325 | vsi->state)) { |
|---|
| 344 | 326 | promisc_forced_on = true; |
|---|
| 345 | | - netdev_warn(netdev, |
|---|
| 346 | | - "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
|---|
| 327 | + netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
|---|
| 347 | 328 | vsi->vsi_num); |
|---|
| 348 | 329 | } else { |
|---|
| 349 | 330 | err = -EIO; |
|---|
| .. | .. |
|---|
| 351 | 332 | } |
|---|
| 352 | 333 | } |
|---|
| 353 | 334 | /* check for changes in promiscuous modes */ |
|---|
| 354 | | - if (changed_flags & IFF_ALLMULTI) |
|---|
| 355 | | - netdev_warn(netdev, "Unsupported configuration\n"); |
|---|
| 335 | + if (changed_flags & IFF_ALLMULTI) { |
|---|
| 336 | + if (vsi->current_netdev_flags & IFF_ALLMULTI) { |
|---|
| 337 | + if (vsi->vlan_ena) |
|---|
| 338 | + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; |
|---|
| 339 | + else |
|---|
| 340 | + promisc_m = ICE_MCAST_PROMISC_BITS; |
|---|
| 341 | + |
|---|
| 342 | + err = ice_cfg_promisc(vsi, promisc_m, true); |
|---|
| 343 | + if (err) { |
|---|
| 344 | + netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", |
|---|
| 345 | + vsi->vsi_num); |
|---|
| 346 | + vsi->current_netdev_flags &= ~IFF_ALLMULTI; |
|---|
| 347 | + goto out_promisc; |
|---|
| 348 | + } |
|---|
| 349 | + } else { |
|---|
| 350 | + /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ |
|---|
| 351 | + if (vsi->vlan_ena) |
|---|
| 352 | + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; |
|---|
| 353 | + else |
|---|
| 354 | + promisc_m = ICE_MCAST_PROMISC_BITS; |
|---|
| 355 | + |
|---|
| 356 | + err = ice_cfg_promisc(vsi, promisc_m, false); |
|---|
| 357 | + if (err) { |
|---|
| 358 | + netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", |
|---|
| 359 | + vsi->vsi_num); |
|---|
| 360 | + vsi->current_netdev_flags |= IFF_ALLMULTI; |
|---|
| 361 | + goto out_promisc; |
|---|
| 362 | + } |
|---|
| 363 | + } |
|---|
| 364 | + } |
|---|
| 356 | 365 | |
|---|
| 357 | 366 | if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || |
|---|
| 358 | 367 | test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { |
|---|
| 359 | 368 | clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); |
|---|
| 360 | 369 | if (vsi->current_netdev_flags & IFF_PROMISC) { |
|---|
| 361 | | - /* Apply TX filter rule to get traffic from VMs */ |
|---|
| 362 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, |
|---|
| 363 | | - ICE_FLTR_TX); |
|---|
| 364 | | - if (status) { |
|---|
| 365 | | - netdev_err(netdev, "Error setting default VSI %i tx rule\n", |
|---|
| 366 | | - vsi->vsi_num); |
|---|
| 367 | | - vsi->current_netdev_flags &= ~IFF_PROMISC; |
|---|
| 368 | | - err = -EIO; |
|---|
| 369 | | - goto out_promisc; |
|---|
| 370 | | - } |
|---|
| 371 | | - /* Apply RX filter rule to get traffic from wire */ |
|---|
| 372 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, |
|---|
| 373 | | - ICE_FLTR_RX); |
|---|
| 374 | | - if (status) { |
|---|
| 375 | | - netdev_err(netdev, "Error setting default VSI %i rx rule\n", |
|---|
| 376 | | - vsi->vsi_num); |
|---|
| 377 | | - vsi->current_netdev_flags &= ~IFF_PROMISC; |
|---|
| 378 | | - err = -EIO; |
|---|
| 379 | | - goto out_promisc; |
|---|
| 370 | + /* Apply Rx filter rule to get traffic from wire */ |
|---|
| 371 | + if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { |
|---|
| 372 | + err = ice_set_dflt_vsi(pf->first_sw, vsi); |
|---|
| 373 | + if (err && err != -EEXIST) { |
|---|
| 374 | + netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", |
|---|
| 375 | + err, vsi->vsi_num); |
|---|
| 376 | + vsi->current_netdev_flags &= |
|---|
| 377 | + ~IFF_PROMISC; |
|---|
| 378 | + goto out_promisc; |
|---|
| 379 | + } |
|---|
| 380 | + ice_cfg_vlan_pruning(vsi, false, false); |
|---|
| 380 | 381 | } |
|---|
| 381 | 382 | } else { |
|---|
| 382 | | - /* Clear TX filter rule to stop traffic from VMs */ |
|---|
| 383 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, |
|---|
| 384 | | - ICE_FLTR_TX); |
|---|
| 385 | | - if (status) { |
|---|
| 386 | | - netdev_err(netdev, "Error clearing default VSI %i tx rule\n", |
|---|
| 387 | | - vsi->vsi_num); |
|---|
| 388 | | - vsi->current_netdev_flags |= IFF_PROMISC; |
|---|
| 389 | | - err = -EIO; |
|---|
| 390 | | - goto out_promisc; |
|---|
| 391 | | - } |
|---|
| 392 | | - /* Clear filter RX to remove traffic from wire */ |
|---|
| 393 | | - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, |
|---|
| 394 | | - ICE_FLTR_RX); |
|---|
| 395 | | - if (status) { |
|---|
| 396 | | - netdev_err(netdev, "Error clearing default VSI %i rx rule\n", |
|---|
| 397 | | - vsi->vsi_num); |
|---|
| 398 | | - vsi->current_netdev_flags |= IFF_PROMISC; |
|---|
| 399 | | - err = -EIO; |
|---|
| 400 | | - goto out_promisc; |
|---|
| 383 | + /* Clear Rx filter to remove traffic from wire */ |
|---|
| 384 | + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { |
|---|
| 385 | + err = ice_clear_dflt_vsi(pf->first_sw); |
|---|
| 386 | + if (err) { |
|---|
| 387 | + netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", |
|---|
| 388 | + err, vsi->vsi_num); |
|---|
| 389 | + vsi->current_netdev_flags |= |
|---|
| 390 | + IFF_PROMISC; |
|---|
| 391 | + goto out_promisc; |
|---|
| 392 | + } |
|---|
| 393 | + if (vsi->num_vlan > 1) |
|---|
| 394 | + ice_cfg_vlan_pruning(vsi, true, false); |
|---|
| 401 | 395 | } |
|---|
| 402 | 396 | } |
|---|
| 403 | 397 | } |
|---|
| .. | .. |
|---|
| 428 | 422 | |
|---|
| 429 | 423 | clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); |
|---|
| 430 | 424 | |
|---|
| 431 | | - for (v = 0; v < pf->num_alloc_vsi; v++) |
|---|
| 425 | + ice_for_each_vsi(pf, v) |
|---|
| 432 | 426 | if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && |
|---|
| 433 | 427 | ice_vsi_sync_fltr(pf->vsi[v])) { |
|---|
| 434 | 428 | /* come back and try again later */ |
|---|
| .. | .. |
|---|
| 438 | 432 | } |
|---|
| 439 | 433 | |
|---|
| 440 | 434 | /** |
|---|
| 441 | | - * ice_is_reset_recovery_pending - schedule a reset |
|---|
| 442 | | - * @state: pf state field |
|---|
| 435 | + * ice_pf_dis_all_vsi - Pause all VSIs on a PF |
|---|
| 436 | + * @pf: the PF |
|---|
| 437 | + * @locked: is the rtnl_lock already held |
|---|
| 443 | 438 | */ |
|---|
| 444 | | -static bool ice_is_reset_recovery_pending(unsigned long int *state) |
|---|
| 439 | +static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) |
|---|
| 445 | 440 | { |
|---|
| 446 | | - return test_bit(__ICE_RESET_RECOVERY_PENDING, state); |
|---|
| 441 | + int v; |
|---|
| 442 | + |
|---|
| 443 | + ice_for_each_vsi(pf, v) |
|---|
| 444 | + if (pf->vsi[v]) |
|---|
| 445 | + ice_dis_vsi(pf->vsi[v], locked); |
|---|
| 447 | 446 | } |
|---|
| 448 | 447 | |
|---|
| 449 | 448 | /** |
|---|
| .. | .. |
|---|
| 456 | 455 | ice_prepare_for_reset(struct ice_pf *pf) |
|---|
| 457 | 456 | { |
|---|
| 458 | 457 | struct ice_hw *hw = &pf->hw; |
|---|
| 459 | | - u32 v; |
|---|
| 458 | + unsigned int i; |
|---|
| 460 | 459 | |
|---|
| 461 | | - ice_for_each_vsi(pf, v) |
|---|
| 462 | | - if (pf->vsi[v]) |
|---|
| 463 | | - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); |
|---|
| 460 | + /* already prepared for reset */ |
|---|
| 461 | + if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) |
|---|
| 462 | + return; |
|---|
| 464 | 463 | |
|---|
| 465 | | - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); |
|---|
| 464 | + /* Notify VFs of impending reset */ |
|---|
| 465 | + if (ice_check_sq_alive(hw, &hw->mailboxq)) |
|---|
| 466 | + ice_vc_notify_reset(pf); |
|---|
| 466 | 467 | |
|---|
| 468 | + /* Disable VFs until reset is completed */ |
|---|
| 469 | + ice_for_each_vf(pf, i) |
|---|
| 470 | + ice_set_vf_state_qs_dis(&pf->vf[i]); |
|---|
| 471 | + |
|---|
| 472 | + /* clear SW filtering DB */ |
|---|
| 473 | + ice_clear_hw_tbls(hw); |
|---|
| 467 | 474 | /* disable the VSIs and their queues that are not already DOWN */ |
|---|
| 468 | | - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ |
|---|
| 469 | | - ice_pf_dis_all_vsi(pf); |
|---|
| 475 | + ice_pf_dis_all_vsi(pf, false); |
|---|
| 470 | 476 | |
|---|
| 471 | | - ice_for_each_vsi(pf, v) |
|---|
| 472 | | - if (pf->vsi[v]) |
|---|
| 473 | | - pf->vsi[v]->vsi_num = 0; |
|---|
| 477 | + if (hw->port_info) |
|---|
| 478 | + ice_sched_clear_port(hw->port_info); |
|---|
| 474 | 479 | |
|---|
| 475 | 480 | ice_shutdown_all_ctrlq(hw); |
|---|
| 481 | + |
|---|
| 482 | + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
|---|
| 476 | 483 | } |
|---|
| 477 | 484 | |
|---|
| 478 | 485 | /** |
|---|
| .. | .. |
|---|
| 483 | 490 | */ |
|---|
| 484 | 491 | static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) |
|---|
| 485 | 492 | { |
|---|
| 486 | | - struct device *dev = &pf->pdev->dev; |
|---|
| 493 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 487 | 494 | struct ice_hw *hw = &pf->hw; |
|---|
| 488 | 495 | |
|---|
| 489 | 496 | dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); |
|---|
| 490 | | - WARN_ON(in_interrupt()); |
|---|
| 491 | 497 | |
|---|
| 492 | | - /* PFR is a bit of a special case because it doesn't result in an OICR |
|---|
| 493 | | - * interrupt. So for PFR, we prepare for reset, issue the reset and |
|---|
| 494 | | - * rebuild sequentially. |
|---|
| 495 | | - */ |
|---|
| 496 | | - if (reset_type == ICE_RESET_PFR) { |
|---|
| 497 | | - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
|---|
| 498 | | - ice_prepare_for_reset(pf); |
|---|
| 499 | | - } |
|---|
| 498 | + ice_prepare_for_reset(pf); |
|---|
| 500 | 499 | |
|---|
| 501 | 500 | /* trigger the reset */ |
|---|
| 502 | 501 | if (ice_reset(hw, reset_type)) { |
|---|
| 503 | 502 | dev_err(dev, "reset %d failed\n", reset_type); |
|---|
| 504 | 503 | set_bit(__ICE_RESET_FAILED, pf->state); |
|---|
| 505 | | - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
|---|
| 504 | + clear_bit(__ICE_RESET_OICR_RECV, pf->state); |
|---|
| 505 | + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
|---|
| 506 | + clear_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 507 | + clear_bit(__ICE_CORER_REQ, pf->state); |
|---|
| 508 | + clear_bit(__ICE_GLOBR_REQ, pf->state); |
|---|
| 506 | 509 | return; |
|---|
| 507 | 510 | } |
|---|
| 508 | 511 | |
|---|
| 512 | + /* PFR is a bit of a special case because it doesn't result in an OICR |
|---|
| 513 | + * interrupt. So for PFR, rebuild after the reset and clear the reset- |
|---|
| 514 | + * associated state bits. |
|---|
| 515 | + */ |
|---|
| 509 | 516 | if (reset_type == ICE_RESET_PFR) { |
|---|
| 510 | 517 | pf->pfr_count++; |
|---|
| 511 | | - ice_rebuild(pf); |
|---|
| 512 | | - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
|---|
| 518 | + ice_rebuild(pf, reset_type); |
|---|
| 519 | + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
|---|
| 520 | + clear_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 521 | + ice_reset_all_vfs(pf, true); |
|---|
| 513 | 522 | } |
|---|
| 514 | 523 | } |
|---|
| 515 | 524 | |
|---|
| .. | .. |
|---|
| 519 | 528 | */ |
|---|
| 520 | 529 | static void ice_reset_subtask(struct ice_pf *pf) |
|---|
| 521 | 530 | { |
|---|
| 522 | | - enum ice_reset_req reset_type; |
|---|
| 523 | | - |
|---|
| 524 | | - rtnl_lock(); |
|---|
| 531 | + enum ice_reset_req reset_type = ICE_RESET_INVAL; |
|---|
| 525 | 532 | |
|---|
| 526 | 533 | /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an |
|---|
| 527 | | - * OICR interrupt. The OICR handler (ice_misc_intr) determines what |
|---|
| 528 | | - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in |
|---|
| 529 | | - * pf->state. So if reset/recovery is pending (as indicated by this bit) |
|---|
| 530 | | - * we do a rebuild and return. |
|---|
| 534 | + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type |
|---|
| 535 | + * of reset is pending and sets bits in pf->state indicating the reset |
|---|
| 536 | + * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set |
|---|
| 537 | + * prepare for pending reset if not already (for PF software-initiated |
|---|
| 538 | + * global resets the software should already be prepared for it as |
|---|
| 539 | + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated |
|---|
| 540 | + * by firmware or software on other PFs, that bit is not set so prepare |
|---|
| 541 | + * for the reset now), poll for reset done, rebuild and return. |
|---|
| 531 | 542 | */ |
|---|
| 532 | | - if (ice_is_reset_recovery_pending(pf->state)) { |
|---|
| 533 | | - clear_bit(__ICE_GLOBR_RECV, pf->state); |
|---|
| 534 | | - clear_bit(__ICE_CORER_RECV, pf->state); |
|---|
| 543 | + if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { |
|---|
| 544 | + /* Perform the largest reset requested */ |
|---|
| 545 | + if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) |
|---|
| 546 | + reset_type = ICE_RESET_CORER; |
|---|
| 547 | + if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) |
|---|
| 548 | + reset_type = ICE_RESET_GLOBR; |
|---|
| 549 | + if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) |
|---|
| 550 | + reset_type = ICE_RESET_EMPR; |
|---|
| 551 | + /* return if no valid reset type requested */ |
|---|
| 552 | + if (reset_type == ICE_RESET_INVAL) |
|---|
| 553 | + return; |
|---|
| 535 | 554 | ice_prepare_for_reset(pf); |
|---|
| 536 | 555 | |
|---|
| 537 | 556 | /* make sure we are ready to rebuild */ |
|---|
| .. | .. |
|---|
| 540 | 559 | } else { |
|---|
| 541 | 560 | /* done with reset. start rebuild */ |
|---|
| 542 | 561 | pf->hw.reset_ongoing = false; |
|---|
| 543 | | - ice_rebuild(pf); |
|---|
| 562 | + ice_rebuild(pf, reset_type); |
|---|
| 563 | + /* clear bit to resume normal operations, but |
|---|
| 564 | + * ICE_NEEDS_RESTART bit is set in case rebuild failed |
|---|
| 565 | + */ |
|---|
| 566 | + clear_bit(__ICE_RESET_OICR_RECV, pf->state); |
|---|
| 567 | + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); |
|---|
| 568 | + clear_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 569 | + clear_bit(__ICE_CORER_REQ, pf->state); |
|---|
| 570 | + clear_bit(__ICE_GLOBR_REQ, pf->state); |
|---|
| 571 | + ice_reset_all_vfs(pf, true); |
|---|
| 544 | 572 | } |
|---|
| 545 | | - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
|---|
| 546 | | - goto unlock; |
|---|
| 573 | + |
|---|
| 574 | + return; |
|---|
| 547 | 575 | } |
|---|
| 548 | 576 | |
|---|
| 549 | 577 | /* No pending resets to finish processing. Check for new resets */ |
|---|
| 550 | | - if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) |
|---|
| 551 | | - reset_type = ICE_RESET_GLOBR; |
|---|
| 552 | | - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) |
|---|
| 553 | | - reset_type = ICE_RESET_CORER; |
|---|
| 554 | | - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) |
|---|
| 578 | + if (test_bit(__ICE_PFR_REQ, pf->state)) |
|---|
| 555 | 579 | reset_type = ICE_RESET_PFR; |
|---|
| 556 | | - else |
|---|
| 557 | | - goto unlock; |
|---|
| 580 | + if (test_bit(__ICE_CORER_REQ, pf->state)) |
|---|
| 581 | + reset_type = ICE_RESET_CORER; |
|---|
| 582 | + if (test_bit(__ICE_GLOBR_REQ, pf->state)) |
|---|
| 583 | + reset_type = ICE_RESET_GLOBR; |
|---|
| 584 | + /* If no valid reset type requested just return */ |
|---|
| 585 | + if (reset_type == ICE_RESET_INVAL) |
|---|
| 586 | + return; |
|---|
| 558 | 587 | |
|---|
| 559 | | - /* reset if not already down or resetting */ |
|---|
| 588 | + /* reset if not already down or busy */ |
|---|
| 560 | 589 | if (!test_bit(__ICE_DOWN, pf->state) && |
|---|
| 561 | 590 | !test_bit(__ICE_CFG_BUSY, pf->state)) { |
|---|
| 562 | 591 | ice_do_reset(pf, reset_type); |
|---|
| 563 | 592 | } |
|---|
| 564 | | - |
|---|
| 565 | | -unlock: |
|---|
| 566 | | - rtnl_unlock(); |
|---|
| 567 | 593 | } |
|---|
| 568 | 594 | |
|---|
| 569 | 595 | /** |
|---|
| 570 | | - * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
|---|
| 571 | | - * @pf: board private structure |
|---|
| 596 | + * ice_print_topo_conflict - print topology conflict message |
|---|
| 597 | + * @vsi: the VSI whose topology status is being checked |
|---|
| 572 | 598 | */ |
|---|
| 573 | | -static void ice_watchdog_subtask(struct ice_pf *pf) |
|---|
| 599 | +static void ice_print_topo_conflict(struct ice_vsi *vsi) |
|---|
| 574 | 600 | { |
|---|
| 575 | | - int i; |
|---|
| 576 | | - |
|---|
| 577 | | - /* if interface is down do nothing */ |
|---|
| 578 | | - if (test_bit(__ICE_DOWN, pf->state) || |
|---|
| 579 | | - test_bit(__ICE_CFG_BUSY, pf->state)) |
|---|
| 580 | | - return; |
|---|
| 581 | | - |
|---|
| 582 | | - /* make sure we don't do these things too often */ |
|---|
| 583 | | - if (time_before(jiffies, |
|---|
| 584 | | - pf->serv_tmr_prev + pf->serv_tmr_period)) |
|---|
| 585 | | - return; |
|---|
| 586 | | - |
|---|
| 587 | | - pf->serv_tmr_prev = jiffies; |
|---|
| 588 | | - |
|---|
| 589 | | - /* Update the stats for active netdevs so the network stack |
|---|
| 590 | | - * can look at updated numbers whenever it cares to |
|---|
| 591 | | - */ |
|---|
| 592 | | - ice_update_pf_stats(pf); |
|---|
| 593 | | - for (i = 0; i < pf->num_alloc_vsi; i++) |
|---|
| 594 | | - if (pf->vsi[i] && pf->vsi[i]->netdev) |
|---|
| 595 | | - ice_update_vsi_stats(pf->vsi[i]); |
|---|
| 601 | + switch (vsi->port_info->phy.link_info.topo_media_conflict) { |
|---|
| 602 | + case ICE_AQ_LINK_TOPO_CONFLICT: |
|---|
| 603 | + case ICE_AQ_LINK_MEDIA_CONFLICT: |
|---|
| 604 | + case ICE_AQ_LINK_TOPO_UNREACH_PRT: |
|---|
| 605 | + case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: |
|---|
| 606 | + case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: |
|---|
| 607 | + netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); |
|---|
| 608 | + break; |
|---|
| 609 | + case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: |
|---|
| 610 | + netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); |
|---|
| 611 | + break; |
|---|
| 612 | + default: |
|---|
| 613 | + break; |
|---|
| 614 | + } |
|---|
| 596 | 615 | } |
|---|
| 597 | 616 | |
|---|
| 598 | 617 | /** |
|---|
| .. | .. |
|---|
| 602 | 621 | */ |
|---|
| 603 | 622 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup) |
|---|
| 604 | 623 | { |
|---|
| 624 | + struct ice_aqc_get_phy_caps_data *caps; |
|---|
| 625 | + const char *an_advertised; |
|---|
| 626 | + enum ice_status status; |
|---|
| 627 | + const char *fec_req; |
|---|
| 605 | 628 | const char *speed; |
|---|
| 629 | + const char *fec; |
|---|
| 606 | 630 | const char *fc; |
|---|
| 631 | + const char *an; |
|---|
| 632 | + |
|---|
| 633 | + if (!vsi) |
|---|
| 634 | + return; |
|---|
| 607 | 635 | |
|---|
| 608 | 636 | if (vsi->current_isup == isup) |
|---|
| 609 | 637 | return; |
|---|
| .. | .. |
|---|
| 616 | 644 | } |
|---|
| 617 | 645 | |
|---|
| 618 | 646 | switch (vsi->port_info->phy.link_info.link_speed) { |
|---|
| 647 | + case ICE_AQ_LINK_SPEED_100GB: |
|---|
| 648 | + speed = "100 G"; |
|---|
| 649 | + break; |
|---|
| 650 | + case ICE_AQ_LINK_SPEED_50GB: |
|---|
| 651 | + speed = "50 G"; |
|---|
| 652 | + break; |
|---|
| 619 | 653 | case ICE_AQ_LINK_SPEED_40GB: |
|---|
| 620 | 654 | speed = "40 G"; |
|---|
| 621 | 655 | break; |
|---|
| .. | .. |
|---|
| 647 | 681 | |
|---|
| 648 | 682 | switch (vsi->port_info->fc.current_mode) { |
|---|
| 649 | 683 | case ICE_FC_FULL: |
|---|
| 650 | | - fc = "RX/TX"; |
|---|
| 684 | + fc = "Rx/Tx"; |
|---|
| 651 | 685 | break; |
|---|
| 652 | 686 | case ICE_FC_TX_PAUSE: |
|---|
| 653 | | - fc = "TX"; |
|---|
| 687 | + fc = "Tx"; |
|---|
| 654 | 688 | break; |
|---|
| 655 | 689 | case ICE_FC_RX_PAUSE: |
|---|
| 656 | | - fc = "RX"; |
|---|
| 690 | + fc = "Rx"; |
|---|
| 657 | 691 | break; |
|---|
| 658 | 692 | case ICE_FC_NONE: |
|---|
| 659 | 693 | fc = "None"; |
|---|
| .. | .. |
|---|
| 663 | 697 | break; |
|---|
| 664 | 698 | } |
|---|
| 665 | 699 | |
|---|
| 666 | | - netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", |
|---|
| 667 | | - speed, fc); |
|---|
| 700 | + /* Get FEC mode based on negotiated link info */ |
|---|
| 701 | + switch (vsi->port_info->phy.link_info.fec_info) { |
|---|
| 702 | + case ICE_AQ_LINK_25G_RS_528_FEC_EN: |
|---|
| 703 | + case ICE_AQ_LINK_25G_RS_544_FEC_EN: |
|---|
| 704 | + fec = "RS-FEC"; |
|---|
| 705 | + break; |
|---|
| 706 | + case ICE_AQ_LINK_25G_KR_FEC_EN: |
|---|
| 707 | + fec = "FC-FEC/BASE-R"; |
|---|
| 708 | + break; |
|---|
| 709 | + default: |
|---|
| 710 | + fec = "NONE"; |
|---|
| 711 | + break; |
|---|
| 712 | + } |
|---|
| 713 | + |
|---|
| 714 | + /* check if autoneg completed, might be false due to not supported */ |
|---|
| 715 | + if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) |
|---|
| 716 | + an = "True"; |
|---|
| 717 | + else |
|---|
| 718 | + an = "False"; |
|---|
| 719 | + |
|---|
| 720 | + /* Get FEC mode requested based on PHY caps last SW configuration */ |
|---|
| 721 | + caps = kzalloc(sizeof(*caps), GFP_KERNEL); |
|---|
| 722 | + if (!caps) { |
|---|
| 723 | + fec_req = "Unknown"; |
|---|
| 724 | + an_advertised = "Unknown"; |
|---|
| 725 | + goto done; |
|---|
| 726 | + } |
|---|
| 727 | + |
|---|
| 728 | + status = ice_aq_get_phy_caps(vsi->port_info, false, |
|---|
| 729 | + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); |
|---|
| 730 | + if (status) |
|---|
| 731 | + netdev_info(vsi->netdev, "Get phy capability failed.\n"); |
|---|
| 732 | + |
|---|
| 733 | + an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; |
|---|
| 734 | + |
|---|
| 735 | + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || |
|---|
| 736 | + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) |
|---|
| 737 | + fec_req = "RS-FEC"; |
|---|
| 738 | + else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || |
|---|
| 739 | + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) |
|---|
| 740 | + fec_req = "FC-FEC/BASE-R"; |
|---|
| 741 | + else |
|---|
| 742 | + fec_req = "NONE"; |
|---|
| 743 | + |
|---|
| 744 | + kfree(caps); |
|---|
| 745 | + |
|---|
| 746 | +done: |
|---|
| 747 | + netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", |
|---|
| 748 | + speed, fec_req, fec, an_advertised, an, fc); |
|---|
| 749 | + ice_print_topo_conflict(vsi); |
|---|
| 750 | +} |
|---|
| 751 | + |
|---|
| 752 | +/** |
|---|
| 753 | + * ice_vsi_link_event - update the VSI's netdev |
|---|
| 754 | + * @vsi: the VSI on which the link event occurred |
|---|
| 755 | + * @link_up: whether or not the VSI needs to be set up or down |
|---|
| 756 | + */ |
|---|
| 757 | +static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) |
|---|
| 758 | +{ |
|---|
| 759 | + if (!vsi) |
|---|
| 760 | + return; |
|---|
| 761 | + |
|---|
| 762 | + if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev) |
|---|
| 763 | + return; |
|---|
| 764 | + |
|---|
| 765 | + if (vsi->type == ICE_VSI_PF) { |
|---|
| 766 | + if (link_up == netif_carrier_ok(vsi->netdev)) |
|---|
| 767 | + return; |
|---|
| 768 | + |
|---|
| 769 | + if (link_up) { |
|---|
| 770 | + netif_carrier_on(vsi->netdev); |
|---|
| 771 | + netif_tx_wake_all_queues(vsi->netdev); |
|---|
| 772 | + } else { |
|---|
| 773 | + netif_carrier_off(vsi->netdev); |
|---|
| 774 | + netif_tx_stop_all_queues(vsi->netdev); |
|---|
| 775 | + } |
|---|
| 776 | + } |
|---|
| 777 | +} |
|---|
| 778 | + |
|---|
| 779 | +/** |
|---|
| 780 | + * ice_set_dflt_mib - send a default config MIB to the FW |
|---|
| 781 | + * @pf: private PF struct |
|---|
| 782 | + * |
|---|
| 783 | + * This function sends a default configuration MIB to the FW. |
|---|
| 784 | + * |
|---|
| 785 | + * If this function errors out at any point, the driver is still able to |
|---|
| 786 | + * function. The main impact is that LFC may not operate as expected. |
|---|
| 787 | + * Therefore an error state in this function should be treated with a DBG |
|---|
| 788 | + * message and continue on with driver rebuild/reenable. |
|---|
| 789 | + */ |
|---|
| 790 | +static void ice_set_dflt_mib(struct ice_pf *pf) |
|---|
| 791 | +{ |
|---|
| 792 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 793 | + u8 mib_type, *buf, *lldpmib = NULL; |
|---|
| 794 | + u16 len, typelen, offset = 0; |
|---|
| 795 | + struct ice_lldp_org_tlv *tlv; |
|---|
| 796 | + struct ice_hw *hw; |
|---|
| 797 | + u32 ouisubtype; |
|---|
| 798 | + |
|---|
| 799 | + if (!pf) { |
|---|
| 800 | + dev_dbg(dev, "%s NULL pf pointer\n", __func__); |
|---|
| 801 | + return; |
|---|
| 802 | + } |
|---|
| 803 | + |
|---|
| 804 | + hw = &pf->hw; |
|---|
| 805 | + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; |
|---|
| 806 | + lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); |
|---|
| 807 | + if (!lldpmib) { |
|---|
| 808 | + dev_dbg(dev, "%s Failed to allocate MIB memory\n", |
|---|
| 809 | + __func__); |
|---|
| 810 | + return; |
|---|
| 811 | + } |
|---|
| 812 | + |
|---|
| 813 | + /* Add ETS CFG TLV */ |
|---|
| 814 | + tlv = (struct ice_lldp_org_tlv *)lldpmib; |
|---|
| 815 | + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | |
|---|
| 816 | + ICE_IEEE_ETS_TLV_LEN); |
|---|
| 817 | + tlv->typelen = htons(typelen); |
|---|
| 818 | + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
|---|
| 819 | + ICE_IEEE_SUBTYPE_ETS_CFG); |
|---|
| 820 | + tlv->ouisubtype = htonl(ouisubtype); |
|---|
| 821 | + |
|---|
| 822 | + buf = tlv->tlvinfo; |
|---|
| 823 | + buf[0] = 0; |
|---|
| 824 | + |
|---|
| 825 | + /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. |
|---|
| 826 | + * Octets 5 - 12 are BW values, set octet 5 to 100% BW. |
|---|
| 827 | + * Octets 13 - 20 are TSA values - leave as zeros |
|---|
| 828 | + */ |
|---|
| 829 | + buf[5] = 0x64; |
|---|
| 830 | + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; |
|---|
| 831 | + offset += len + 2; |
|---|
| 832 | + tlv = (struct ice_lldp_org_tlv *) |
|---|
| 833 | + ((char *)tlv + sizeof(tlv->typelen) + len); |
|---|
| 834 | + |
|---|
| 835 | + /* Add ETS REC TLV */ |
|---|
| 836 | + buf = tlv->tlvinfo; |
|---|
| 837 | + tlv->typelen = htons(typelen); |
|---|
| 838 | + |
|---|
| 839 | + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
|---|
| 840 | + ICE_IEEE_SUBTYPE_ETS_REC); |
|---|
| 841 | + tlv->ouisubtype = htonl(ouisubtype); |
|---|
| 842 | + |
|---|
| 843 | + /* First octet of buf is reserved |
|---|
| 844 | + * Octets 1 - 4 map UP to TC - all UPs map to zero |
|---|
| 845 | + * Octets 5 - 12 are BW values - set TC 0 to 100%. |
|---|
| 846 | + * Octets 13 - 20 are TSA value - leave as zeros |
|---|
| 847 | + */ |
|---|
| 848 | + buf[5] = 0x64; |
|---|
| 849 | + offset += len + 2; |
|---|
| 850 | + tlv = (struct ice_lldp_org_tlv *) |
|---|
| 851 | + ((char *)tlv + sizeof(tlv->typelen) + len); |
|---|
| 852 | + |
|---|
| 853 | + /* Add PFC CFG TLV */ |
|---|
| 854 | + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | |
|---|
| 855 | + ICE_IEEE_PFC_TLV_LEN); |
|---|
| 856 | + tlv->typelen = htons(typelen); |
|---|
| 857 | + |
|---|
| 858 | + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
|---|
| 859 | + ICE_IEEE_SUBTYPE_PFC_CFG); |
|---|
| 860 | + tlv->ouisubtype = htonl(ouisubtype); |
|---|
| 861 | + |
|---|
| 862 | + /* Octet 1 left as all zeros - PFC disabled */ |
|---|
| 863 | + buf[0] = 0x08; |
|---|
| 864 | + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; |
|---|
| 865 | + offset += len + 2; |
|---|
| 866 | + |
|---|
| 867 | + if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) |
|---|
| 868 | + dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); |
|---|
| 869 | + |
|---|
| 870 | + kfree(lldpmib); |
|---|
| 871 | +} |
|---|
| 872 | + |
|---|
| 873 | +/** |
|---|
| 874 | + * ice_link_event - process the link event |
|---|
| 875 | + * @pf: PF that the link event is associated with |
|---|
| 876 | + * @pi: port_info for the port that the link event is associated with |
|---|
| 877 | + * @link_up: true if the physical link is up and false if it is down |
|---|
| 878 | + * @link_speed: current link speed received from the link event |
|---|
| 879 | + * |
|---|
| 880 | + * Returns 0 on success and negative on failure |
|---|
| 881 | + */ |
|---|
| 882 | +static int |
|---|
| 883 | +ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, |
|---|
| 884 | + u16 link_speed) |
|---|
| 885 | +{ |
|---|
| 886 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 887 | + struct ice_phy_info *phy_info; |
|---|
| 888 | + struct ice_vsi *vsi; |
|---|
| 889 | + u16 old_link_speed; |
|---|
| 890 | + bool old_link; |
|---|
| 891 | + int result; |
|---|
| 892 | + |
|---|
| 893 | + phy_info = &pi->phy; |
|---|
| 894 | + phy_info->link_info_old = phy_info->link_info; |
|---|
| 895 | + |
|---|
| 896 | + old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
|---|
| 897 | + old_link_speed = phy_info->link_info_old.link_speed; |
|---|
| 898 | + |
|---|
| 899 | + /* update the link info structures and re-enable link events, |
|---|
| 900 | + * don't bail on failure due to other book keeping needed |
|---|
| 901 | + */ |
|---|
| 902 | + result = ice_update_link_info(pi); |
|---|
| 903 | + if (result) |
|---|
| 904 | + dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", |
|---|
| 905 | + pi->lport); |
|---|
| 906 | + |
|---|
| 907 | + /* Check if the link state is up after updating link info, and treat |
|---|
| 908 | + * this event as an UP event since the link is actually UP now. |
|---|
| 909 | + */ |
|---|
| 910 | + if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) |
|---|
| 911 | + link_up = true; |
|---|
| 912 | + |
|---|
| 913 | + vsi = ice_get_main_vsi(pf); |
|---|
| 914 | + if (!vsi || !vsi->port_info) |
|---|
| 915 | + return -EINVAL; |
|---|
| 916 | + |
|---|
| 917 | + /* turn off PHY if media was removed */ |
|---|
| 918 | + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && |
|---|
| 919 | + !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { |
|---|
| 920 | + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
|---|
| 921 | + |
|---|
| 922 | + result = ice_aq_set_link_restart_an(pi, false, NULL); |
|---|
| 923 | + if (result) { |
|---|
| 924 | + dev_dbg(dev, "Failed to set link down, VSI %d error %d\n", |
|---|
| 925 | + vsi->vsi_num, result); |
|---|
| 926 | + return result; |
|---|
| 927 | + } |
|---|
| 928 | + } |
|---|
| 929 | + |
|---|
| 930 | + /* if the old link up/down and speed is the same as the new */ |
|---|
| 931 | + if (link_up == old_link && link_speed == old_link_speed) |
|---|
| 932 | + return result; |
|---|
| 933 | + |
|---|
| 934 | + if (ice_is_dcb_active(pf)) { |
|---|
| 935 | + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
|---|
| 936 | + ice_dcb_rebuild(pf); |
|---|
| 937 | + } else { |
|---|
| 938 | + if (link_up) |
|---|
| 939 | + ice_set_dflt_mib(pf); |
|---|
| 940 | + } |
|---|
| 941 | + ice_vsi_link_event(vsi, link_up); |
|---|
| 942 | + ice_print_link_msg(vsi, link_up); |
|---|
| 943 | + |
|---|
| 944 | + ice_vc_notify_link_state(pf); |
|---|
| 945 | + |
|---|
| 946 | + return result; |
|---|
| 947 | +} |
|---|
| 948 | + |
|---|
| 949 | +/** |
|---|
| 950 | + * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
|---|
| 951 | + * @pf: board private structure |
|---|
| 952 | + */ |
|---|
| 953 | +static void ice_watchdog_subtask(struct ice_pf *pf) |
|---|
| 954 | +{ |
|---|
| 955 | + int i; |
|---|
| 956 | + |
|---|
| 957 | + /* if interface is down do nothing */ |
|---|
| 958 | + if (test_bit(__ICE_DOWN, pf->state) || |
|---|
| 959 | + test_bit(__ICE_CFG_BUSY, pf->state)) |
|---|
| 960 | + return; |
|---|
| 961 | + |
|---|
| 962 | + /* make sure we don't do these things too often */ |
|---|
| 963 | + if (time_before(jiffies, |
|---|
| 964 | + pf->serv_tmr_prev + pf->serv_tmr_period)) |
|---|
| 965 | + return; |
|---|
| 966 | + |
|---|
| 967 | + pf->serv_tmr_prev = jiffies; |
|---|
| 968 | + |
|---|
| 969 | + /* Update the stats for active netdevs so the network stack |
|---|
| 970 | + * can look at updated numbers whenever it cares to |
|---|
| 971 | + */ |
|---|
| 972 | + ice_update_pf_stats(pf); |
|---|
| 973 | + ice_for_each_vsi(pf, i) |
|---|
| 974 | + if (pf->vsi[i] && pf->vsi[i]->netdev) |
|---|
| 975 | + ice_update_vsi_stats(pf->vsi[i]); |
|---|
| 668 | 976 | } |
|---|
| 669 | 977 | |
|---|
| 670 | 978 | /** |
|---|
| .. | .. |
|---|
| 681 | 989 | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); |
|---|
| 682 | 990 | |
|---|
| 683 | 991 | if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { |
|---|
| 684 | | - dev_dbg(ice_hw_to_dev(pi->hw), |
|---|
| 685 | | - "Failed to set link event mask for port %d\n", |
|---|
| 992 | + dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", |
|---|
| 686 | 993 | pi->lport); |
|---|
| 687 | 994 | return -EIO; |
|---|
| 688 | 995 | } |
|---|
| 689 | 996 | |
|---|
| 690 | 997 | if (ice_aq_get_link_info(pi, true, NULL, NULL)) { |
|---|
| 691 | | - dev_dbg(ice_hw_to_dev(pi->hw), |
|---|
| 692 | | - "Failed to enable link events for port %d\n", |
|---|
| 998 | + dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", |
|---|
| 693 | 999 | pi->lport); |
|---|
| 694 | 1000 | return -EIO; |
|---|
| 695 | | - } |
|---|
| 696 | | - |
|---|
| 697 | | - return 0; |
|---|
| 698 | | -} |
|---|
| 699 | | - |
|---|
| 700 | | -/** |
|---|
| 701 | | - * ice_vsi_link_event - update the vsi's netdev |
|---|
| 702 | | - * @vsi: the vsi on which the link event occurred |
|---|
| 703 | | - * @link_up: whether or not the vsi needs to be set up or down |
|---|
| 704 | | - */ |
|---|
| 705 | | -static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) |
|---|
| 706 | | -{ |
|---|
| 707 | | - if (!vsi || test_bit(__ICE_DOWN, vsi->state)) |
|---|
| 708 | | - return; |
|---|
| 709 | | - |
|---|
| 710 | | - if (vsi->type == ICE_VSI_PF) { |
|---|
| 711 | | - if (!vsi->netdev) { |
|---|
| 712 | | - dev_dbg(&vsi->back->pdev->dev, |
|---|
| 713 | | - "vsi->netdev is not initialized!\n"); |
|---|
| 714 | | - return; |
|---|
| 715 | | - } |
|---|
| 716 | | - if (link_up) { |
|---|
| 717 | | - netif_carrier_on(vsi->netdev); |
|---|
| 718 | | - netif_tx_wake_all_queues(vsi->netdev); |
|---|
| 719 | | - } else { |
|---|
| 720 | | - netif_carrier_off(vsi->netdev); |
|---|
| 721 | | - netif_tx_stop_all_queues(vsi->netdev); |
|---|
| 722 | | - } |
|---|
| 723 | | - } |
|---|
| 724 | | -} |
|---|
| 725 | | - |
|---|
| 726 | | -/** |
|---|
| 727 | | - * ice_link_event - process the link event |
|---|
| 728 | | - * @pf: pf that the link event is associated with |
|---|
| 729 | | - * @pi: port_info for the port that the link event is associated with |
|---|
| 730 | | - * |
|---|
| 731 | | - * Returns -EIO if ice_get_link_status() fails |
|---|
| 732 | | - * Returns 0 on success |
|---|
| 733 | | - */ |
|---|
| 734 | | -static int |
|---|
| 735 | | -ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) |
|---|
| 736 | | -{ |
|---|
| 737 | | - u8 new_link_speed, old_link_speed; |
|---|
| 738 | | - struct ice_phy_info *phy_info; |
|---|
| 739 | | - bool new_link_same_as_old; |
|---|
| 740 | | - bool new_link, old_link; |
|---|
| 741 | | - u8 lport; |
|---|
| 742 | | - u16 v; |
|---|
| 743 | | - |
|---|
| 744 | | - phy_info = &pi->phy; |
|---|
| 745 | | - phy_info->link_info_old = phy_info->link_info; |
|---|
| 746 | | - /* Force ice_get_link_status() to update link info */ |
|---|
| 747 | | - phy_info->get_link_info = true; |
|---|
| 748 | | - |
|---|
| 749 | | - old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
|---|
| 750 | | - old_link_speed = phy_info->link_info_old.link_speed; |
|---|
| 751 | | - |
|---|
| 752 | | - lport = pi->lport; |
|---|
| 753 | | - if (ice_get_link_status(pi, &new_link)) { |
|---|
| 754 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 755 | | - "Could not get link status for port %d\n", lport); |
|---|
| 756 | | - return -EIO; |
|---|
| 757 | | - } |
|---|
| 758 | | - |
|---|
| 759 | | - new_link_speed = phy_info->link_info.link_speed; |
|---|
| 760 | | - |
|---|
| 761 | | - new_link_same_as_old = (new_link == old_link && |
|---|
| 762 | | - new_link_speed == old_link_speed); |
|---|
| 763 | | - |
|---|
| 764 | | - ice_for_each_vsi(pf, v) { |
|---|
| 765 | | - struct ice_vsi *vsi = pf->vsi[v]; |
|---|
| 766 | | - |
|---|
| 767 | | - if (!vsi || !vsi->port_info) |
|---|
| 768 | | - continue; |
|---|
| 769 | | - |
|---|
| 770 | | - if (new_link_same_as_old && |
|---|
| 771 | | - (test_bit(__ICE_DOWN, vsi->state) || |
|---|
| 772 | | - new_link == netif_carrier_ok(vsi->netdev))) |
|---|
| 773 | | - continue; |
|---|
| 774 | | - |
|---|
| 775 | | - if (vsi->port_info->lport == lport) { |
|---|
| 776 | | - ice_print_link_msg(vsi, new_link); |
|---|
| 777 | | - ice_vsi_link_event(vsi, new_link); |
|---|
| 778 | | - } |
|---|
| 779 | 1001 | } |
|---|
| 780 | 1002 | |
|---|
| 781 | 1003 | return 0; |
|---|
| .. | .. |
|---|
| 783 | 1005 | |
|---|
| 784 | 1006 | /** |
|---|
| 785 | 1007 | * ice_handle_link_event - handle link event via ARQ |
|---|
| 786 | | - * @pf: pf that the link event is associated with |
|---|
| 787 | | - * |
|---|
| 788 | | - * Return -EINVAL if port_info is null |
|---|
| 789 | | - * Return status on succes |
|---|
| 1008 | + * @pf: PF that the link event is associated with |
|---|
| 1009 | + * @event: event structure containing link status info |
|---|
| 790 | 1010 | */ |
|---|
| 791 | | -static int ice_handle_link_event(struct ice_pf *pf) |
|---|
| 1011 | +static int |
|---|
| 1012 | +ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) |
|---|
| 792 | 1013 | { |
|---|
| 1014 | + struct ice_aqc_get_link_status_data *link_data; |
|---|
| 793 | 1015 | struct ice_port_info *port_info; |
|---|
| 794 | 1016 | int status; |
|---|
| 795 | 1017 | |
|---|
| 1018 | + link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; |
|---|
| 796 | 1019 | port_info = pf->hw.port_info; |
|---|
| 797 | 1020 | if (!port_info) |
|---|
| 798 | 1021 | return -EINVAL; |
|---|
| 799 | 1022 | |
|---|
| 800 | | - status = ice_link_event(pf, port_info); |
|---|
| 1023 | + status = ice_link_event(pf, port_info, |
|---|
| 1024 | + !!(link_data->link_info & ICE_AQ_LINK_UP), |
|---|
| 1025 | + le16_to_cpu(link_data->link_speed)); |
|---|
| 801 | 1026 | if (status) |
|---|
| 802 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 803 | | - "Could not process link event, error %d\n", status); |
|---|
| 1027 | + dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", |
|---|
| 1028 | + status); |
|---|
| 804 | 1029 | |
|---|
| 805 | 1030 | return status; |
|---|
| 1031 | +} |
|---|
| 1032 | + |
|---|
| 1033 | +enum ice_aq_task_state { |
|---|
| 1034 | + ICE_AQ_TASK_WAITING = 0, |
|---|
| 1035 | + ICE_AQ_TASK_COMPLETE, |
|---|
| 1036 | + ICE_AQ_TASK_CANCELED, |
|---|
| 1037 | +}; |
|---|
| 1038 | + |
|---|
| 1039 | +struct ice_aq_task { |
|---|
| 1040 | + struct hlist_node entry; |
|---|
| 1041 | + |
|---|
| 1042 | + u16 opcode; |
|---|
| 1043 | + struct ice_rq_event_info *event; |
|---|
| 1044 | + enum ice_aq_task_state state; |
|---|
| 1045 | +}; |
|---|
| 1046 | + |
|---|
| 1047 | +/** |
|---|
| 1048 | + * ice_wait_for_aq_event - Wait for an AdminQ event from firmware |
|---|
| 1049 | + * @pf: pointer to the PF private structure |
|---|
| 1050 | + * @opcode: the opcode to wait for |
|---|
| 1051 | + * @timeout: how long to wait, in jiffies |
|---|
| 1052 | + * @event: storage for the event info |
|---|
| 1053 | + * |
|---|
| 1054 | + * Waits for a specific AdminQ completion event on the ARQ for a given PF. The |
|---|
| 1055 | + * current thread will be put to sleep until the specified event occurs or |
|---|
| 1056 | + * until the given timeout is reached. |
|---|
| 1057 | + * |
|---|
| 1058 | + * To obtain only the descriptor contents, pass an event without an allocated |
|---|
| 1059 | + * msg_buf. If the complete data buffer is desired, allocate the |
|---|
| 1060 | + * event->msg_buf with enough space ahead of time. |
|---|
| 1061 | + * |
|---|
| 1062 | + * Returns: zero on success, or a negative error code on failure. |
|---|
| 1063 | + */ |
|---|
| 1064 | +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, |
|---|
| 1065 | + struct ice_rq_event_info *event) |
|---|
| 1066 | +{ |
|---|
| 1067 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 1068 | + struct ice_aq_task *task; |
|---|
| 1069 | + unsigned long start; |
|---|
| 1070 | + long ret; |
|---|
| 1071 | + int err; |
|---|
| 1072 | + |
|---|
| 1073 | + task = kzalloc(sizeof(*task), GFP_KERNEL); |
|---|
| 1074 | + if (!task) |
|---|
| 1075 | + return -ENOMEM; |
|---|
| 1076 | + |
|---|
| 1077 | + INIT_HLIST_NODE(&task->entry); |
|---|
| 1078 | + task->opcode = opcode; |
|---|
| 1079 | + task->event = event; |
|---|
| 1080 | + task->state = ICE_AQ_TASK_WAITING; |
|---|
| 1081 | + |
|---|
| 1082 | + spin_lock_bh(&pf->aq_wait_lock); |
|---|
| 1083 | + hlist_add_head(&task->entry, &pf->aq_wait_list); |
|---|
| 1084 | + spin_unlock_bh(&pf->aq_wait_lock); |
|---|
| 1085 | + |
|---|
| 1086 | + start = jiffies; |
|---|
| 1087 | + |
|---|
| 1088 | + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, |
|---|
| 1089 | + timeout); |
|---|
| 1090 | + switch (task->state) { |
|---|
| 1091 | + case ICE_AQ_TASK_WAITING: |
|---|
| 1092 | + err = ret < 0 ? ret : -ETIMEDOUT; |
|---|
| 1093 | + break; |
|---|
| 1094 | + case ICE_AQ_TASK_CANCELED: |
|---|
| 1095 | + err = ret < 0 ? ret : -ECANCELED; |
|---|
| 1096 | + break; |
|---|
| 1097 | + case ICE_AQ_TASK_COMPLETE: |
|---|
| 1098 | + err = ret < 0 ? ret : 0; |
|---|
| 1099 | + break; |
|---|
| 1100 | + default: |
|---|
| 1101 | + WARN(1, "Unexpected AdminQ wait task state %u", task->state); |
|---|
| 1102 | + err = -EINVAL; |
|---|
| 1103 | + break; |
|---|
| 1104 | + } |
|---|
| 1105 | + |
|---|
| 1106 | + dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", |
|---|
| 1107 | + jiffies_to_msecs(jiffies - start), |
|---|
| 1108 | + jiffies_to_msecs(timeout), |
|---|
| 1109 | + opcode); |
|---|
| 1110 | + |
|---|
| 1111 | + spin_lock_bh(&pf->aq_wait_lock); |
|---|
| 1112 | + hlist_del(&task->entry); |
|---|
| 1113 | + spin_unlock_bh(&pf->aq_wait_lock); |
|---|
| 1114 | + kfree(task); |
|---|
| 1115 | + |
|---|
| 1116 | + return err; |
|---|
| 1117 | +} |
|---|
| 1118 | + |
|---|
| 1119 | +/** |
|---|
| 1120 | + * ice_aq_check_events - Check if any thread is waiting for an AdminQ event |
|---|
| 1121 | + * @pf: pointer to the PF private structure |
|---|
| 1122 | + * @opcode: the opcode of the event |
|---|
| 1123 | + * @event: the event to check |
|---|
| 1124 | + * |
|---|
| 1125 | + * Loops over the current list of pending threads waiting for an AdminQ event. |
|---|
| 1126 | + * For each matching task, copy the contents of the event into the task |
|---|
| 1127 | + * structure and wake up the thread. |
|---|
| 1128 | + * |
|---|
| 1129 | + * If multiple threads wait for the same opcode, they will all be woken up. |
|---|
| 1130 | + * |
|---|
| 1131 | + * Note that event->msg_buf will only be duplicated if the event has a buffer |
|---|
| 1132 | + * with enough space already allocated. Otherwise, only the descriptor and |
|---|
| 1133 | + * message length will be copied. |
|---|
| 1134 | + * |
|---|
| 1135 | + * Returns: true if an event was found, false otherwise |
|---|
| 1136 | + */ |
|---|
| 1137 | +static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, |
|---|
| 1138 | + struct ice_rq_event_info *event) |
|---|
| 1139 | +{ |
|---|
| 1140 | + struct ice_rq_event_info *task_ev; |
|---|
| 1141 | + struct ice_aq_task *task; |
|---|
| 1142 | + bool found = false; |
|---|
| 1143 | + |
|---|
| 1144 | + spin_lock_bh(&pf->aq_wait_lock); |
|---|
| 1145 | + hlist_for_each_entry(task, &pf->aq_wait_list, entry) { |
|---|
| 1146 | + if (task->state || task->opcode != opcode) |
|---|
| 1147 | + continue; |
|---|
| 1148 | + |
|---|
| 1149 | + task_ev = task->event; |
|---|
| 1150 | + memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); |
|---|
| 1151 | + task_ev->msg_len = event->msg_len; |
|---|
| 1152 | + |
|---|
| 1153 | + /* Only copy the data buffer if a destination was set */ |
|---|
| 1154 | + if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { |
|---|
| 1155 | + memcpy(task_ev->msg_buf, event->msg_buf, |
|---|
| 1156 | + event->buf_len); |
|---|
| 1157 | + task_ev->buf_len = event->buf_len; |
|---|
| 1158 | + } |
|---|
| 1159 | + |
|---|
| 1160 | + task->state = ICE_AQ_TASK_COMPLETE; |
|---|
| 1161 | + found = true; |
|---|
| 1162 | + } |
|---|
| 1163 | + spin_unlock_bh(&pf->aq_wait_lock); |
|---|
| 1164 | + |
|---|
| 1165 | + if (found) |
|---|
| 1166 | + wake_up(&pf->aq_wait_queue); |
|---|
| 1167 | +} |
|---|
| 1168 | + |
|---|
| 1169 | +/** |
|---|
| 1170 | + * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks |
|---|
| 1171 | + * @pf: the PF private structure |
|---|
| 1172 | + * |
|---|
| 1173 | + * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. |
|---|
| 1174 | + * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. |
|---|
| 1175 | + */ |
|---|
| 1176 | +static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) |
|---|
| 1177 | +{ |
|---|
| 1178 | + struct ice_aq_task *task; |
|---|
| 1179 | + |
|---|
| 1180 | + spin_lock_bh(&pf->aq_wait_lock); |
|---|
| 1181 | + hlist_for_each_entry(task, &pf->aq_wait_list, entry) |
|---|
| 1182 | + task->state = ICE_AQ_TASK_CANCELED; |
|---|
| 1183 | + spin_unlock_bh(&pf->aq_wait_lock); |
|---|
| 1184 | + |
|---|
| 1185 | + wake_up(&pf->aq_wait_queue); |
|---|
| 806 | 1186 | } |
|---|
| 807 | 1187 | |
|---|
| 808 | 1188 | /** |
|---|
| .. | .. |
|---|
| 812 | 1192 | */ |
|---|
| 813 | 1193 | static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) |
|---|
| 814 | 1194 | { |
|---|
| 1195 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 815 | 1196 | struct ice_rq_event_info event; |
|---|
| 816 | 1197 | struct ice_hw *hw = &pf->hw; |
|---|
| 817 | 1198 | struct ice_ctl_q_info *cq; |
|---|
| .. | .. |
|---|
| 828 | 1209 | cq = &hw->adminq; |
|---|
| 829 | 1210 | qtype = "Admin"; |
|---|
| 830 | 1211 | break; |
|---|
| 1212 | + case ICE_CTL_Q_MAILBOX: |
|---|
| 1213 | + cq = &hw->mailboxq; |
|---|
| 1214 | + qtype = "Mailbox"; |
|---|
| 1215 | + break; |
|---|
| 831 | 1216 | default: |
|---|
| 832 | | - dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", |
|---|
| 833 | | - q_type); |
|---|
| 1217 | + dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); |
|---|
| 834 | 1218 | return 0; |
|---|
| 835 | 1219 | } |
|---|
| 836 | 1220 | |
|---|
| .. | .. |
|---|
| 842 | 1226 | PF_FW_ARQLEN_ARQCRIT_M)) { |
|---|
| 843 | 1227 | oldval = val; |
|---|
| 844 | 1228 | if (val & PF_FW_ARQLEN_ARQVFE_M) |
|---|
| 845 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 846 | | - "%s Receive Queue VF Error detected\n", qtype); |
|---|
| 1229 | + dev_dbg(dev, "%s Receive Queue VF Error detected\n", |
|---|
| 1230 | + qtype); |
|---|
| 847 | 1231 | if (val & PF_FW_ARQLEN_ARQOVFL_M) { |
|---|
| 848 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 849 | | - "%s Receive Queue Overflow Error detected\n", |
|---|
| 1232 | + dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", |
|---|
| 850 | 1233 | qtype); |
|---|
| 851 | 1234 | } |
|---|
| 852 | 1235 | if (val & PF_FW_ARQLEN_ARQCRIT_M) |
|---|
| 853 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 854 | | - "%s Receive Queue Critical Error detected\n", |
|---|
| 1236 | + dev_dbg(dev, "%s Receive Queue Critical Error detected\n", |
|---|
| 855 | 1237 | qtype); |
|---|
| 856 | 1238 | val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | |
|---|
| 857 | 1239 | PF_FW_ARQLEN_ARQCRIT_M); |
|---|
| .. | .. |
|---|
| 864 | 1246 | PF_FW_ATQLEN_ATQCRIT_M)) { |
|---|
| 865 | 1247 | oldval = val; |
|---|
| 866 | 1248 | if (val & PF_FW_ATQLEN_ATQVFE_M) |
|---|
| 867 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 868 | | - "%s Send Queue VF Error detected\n", qtype); |
|---|
| 1249 | + dev_dbg(dev, "%s Send Queue VF Error detected\n", |
|---|
| 1250 | + qtype); |
|---|
| 869 | 1251 | if (val & PF_FW_ATQLEN_ATQOVFL_M) { |
|---|
| 870 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 871 | | - "%s Send Queue Overflow Error detected\n", |
|---|
| 1252 | + dev_dbg(dev, "%s Send Queue Overflow Error detected\n", |
|---|
| 872 | 1253 | qtype); |
|---|
| 873 | 1254 | } |
|---|
| 874 | 1255 | if (val & PF_FW_ATQLEN_ATQCRIT_M) |
|---|
| 875 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 876 | | - "%s Send Queue Critical Error detected\n", |
|---|
| 1256 | + dev_dbg(dev, "%s Send Queue Critical Error detected\n", |
|---|
| 877 | 1257 | qtype); |
|---|
| 878 | 1258 | val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | |
|---|
| 879 | 1259 | PF_FW_ATQLEN_ATQCRIT_M); |
|---|
| .. | .. |
|---|
| 882 | 1262 | } |
|---|
| 883 | 1263 | |
|---|
| 884 | 1264 | event.buf_len = cq->rq_buf_size; |
|---|
| 885 | | - event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, |
|---|
| 886 | | - GFP_KERNEL); |
|---|
| 1265 | + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); |
|---|
| 887 | 1266 | if (!event.msg_buf) |
|---|
| 888 | 1267 | return 0; |
|---|
| 889 | 1268 | |
|---|
| .. | .. |
|---|
| 895 | 1274 | if (ret == ICE_ERR_AQ_NO_WORK) |
|---|
| 896 | 1275 | break; |
|---|
| 897 | 1276 | if (ret) { |
|---|
| 898 | | - dev_err(&pf->pdev->dev, |
|---|
| 899 | | - "%s Receive Queue event error %d\n", qtype, |
|---|
| 900 | | - ret); |
|---|
| 1277 | + dev_err(dev, "%s Receive Queue event error %s\n", qtype, |
|---|
| 1278 | + ice_stat_str(ret)); |
|---|
| 901 | 1279 | break; |
|---|
| 902 | 1280 | } |
|---|
| 903 | 1281 | |
|---|
| 904 | 1282 | opcode = le16_to_cpu(event.desc.opcode); |
|---|
| 905 | 1283 | |
|---|
| 1284 | + /* Notify any thread that might be waiting for this event */ |
|---|
| 1285 | + ice_aq_check_events(pf, opcode, &event); |
|---|
| 1286 | + |
|---|
| 906 | 1287 | switch (opcode) { |
|---|
| 907 | 1288 | case ice_aqc_opc_get_link_status: |
|---|
| 908 | | - if (ice_handle_link_event(pf)) |
|---|
| 909 | | - dev_err(&pf->pdev->dev, |
|---|
| 910 | | - "Could not handle link event\n"); |
|---|
| 1289 | + if (ice_handle_link_event(pf, &event)) |
|---|
| 1290 | + dev_err(dev, "Could not handle link event\n"); |
|---|
| 1291 | + break; |
|---|
| 1292 | + case ice_aqc_opc_event_lan_overflow: |
|---|
| 1293 | + ice_vf_lan_overflow_event(pf, &event); |
|---|
| 1294 | + break; |
|---|
| 1295 | + case ice_mbx_opc_send_msg_to_pf: |
|---|
| 1296 | + ice_vc_process_vf_msg(pf, &event); |
|---|
| 1297 | + break; |
|---|
| 1298 | + case ice_aqc_opc_fw_logging: |
|---|
| 1299 | + ice_output_fw_log(hw, &event.desc, event.msg_buf); |
|---|
| 1300 | + break; |
|---|
| 1301 | + case ice_aqc_opc_lldp_set_mib_change: |
|---|
| 1302 | + ice_dcb_process_lldp_set_mib_change(pf, &event); |
|---|
| 911 | 1303 | break; |
|---|
| 912 | 1304 | default: |
|---|
| 913 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 914 | | - "%s Receive Queue unknown event 0x%04x ignored\n", |
|---|
| 1305 | + dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", |
|---|
| 915 | 1306 | qtype, opcode); |
|---|
| 916 | 1307 | break; |
|---|
| 917 | 1308 | } |
|---|
| 918 | 1309 | } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); |
|---|
| 919 | 1310 | |
|---|
| 920 | | - devm_kfree(&pf->pdev->dev, event.msg_buf); |
|---|
| 1311 | + kfree(event.msg_buf); |
|---|
| 921 | 1312 | |
|---|
| 922 | 1313 | return pending && (i == ICE_DFLT_IRQ_WORK); |
|---|
| 923 | 1314 | } |
|---|
| .. | .. |
|---|
| 965 | 1356 | } |
|---|
| 966 | 1357 | |
|---|
| 967 | 1358 | /** |
|---|
| 1359 | + * ice_clean_mailboxq_subtask - clean the MailboxQ rings |
|---|
| 1360 | + * @pf: board private structure |
|---|
| 1361 | + */ |
|---|
| 1362 | +static void ice_clean_mailboxq_subtask(struct ice_pf *pf) |
|---|
| 1363 | +{ |
|---|
| 1364 | + struct ice_hw *hw = &pf->hw; |
|---|
| 1365 | + |
|---|
| 1366 | + if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) |
|---|
| 1367 | + return; |
|---|
| 1368 | + |
|---|
| 1369 | + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) |
|---|
| 1370 | + return; |
|---|
| 1371 | + |
|---|
| 1372 | + clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
|---|
| 1373 | + |
|---|
| 1374 | + if (ice_ctrlq_pending(hw, &hw->mailboxq)) |
|---|
| 1375 | + __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); |
|---|
| 1376 | + |
|---|
| 1377 | + ice_flush(hw); |
|---|
| 1378 | +} |
|---|
| 1379 | + |
|---|
| 1380 | +/** |
|---|
| 968 | 1381 | * ice_service_task_schedule - schedule the service task to wake up |
|---|
| 969 | 1382 | * @pf: board private structure |
|---|
| 970 | 1383 | * |
|---|
| 971 | 1384 | * If not already scheduled, this puts the task into the work queue. |
|---|
| 972 | 1385 | */ |
|---|
| 973 | | -static void ice_service_task_schedule(struct ice_pf *pf) |
|---|
| 1386 | +void ice_service_task_schedule(struct ice_pf *pf) |
|---|
| 974 | 1387 | { |
|---|
| 975 | | - if (!test_bit(__ICE_DOWN, pf->state) && |
|---|
| 976 | | - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) |
|---|
| 1388 | + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && |
|---|
| 1389 | + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && |
|---|
| 1390 | + !test_bit(__ICE_NEEDS_RESTART, pf->state)) |
|---|
| 977 | 1391 | queue_work(ice_wq, &pf->serv_task); |
|---|
| 978 | 1392 | } |
|---|
| 979 | 1393 | |
|---|
| .. | .. |
|---|
| 991 | 1405 | } |
|---|
| 992 | 1406 | |
|---|
| 993 | 1407 | /** |
|---|
| 1408 | + * ice_service_task_stop - stop service task and cancel works |
|---|
| 1409 | + * @pf: board private structure |
|---|
| 1410 | + * |
|---|
| 1411 | + * Return 0 if the __ICE_SERVICE_DIS bit was not already set, |
|---|
| 1412 | + * 1 otherwise. |
|---|
| 1413 | + */ |
|---|
| 1414 | +static int ice_service_task_stop(struct ice_pf *pf) |
|---|
| 1415 | +{ |
|---|
| 1416 | + int ret; |
|---|
| 1417 | + |
|---|
| 1418 | + ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 1419 | + |
|---|
| 1420 | + if (pf->serv_tmr.function) |
|---|
| 1421 | + del_timer_sync(&pf->serv_tmr); |
|---|
| 1422 | + if (pf->serv_task.func) |
|---|
| 1423 | + cancel_work_sync(&pf->serv_task); |
|---|
| 1424 | + |
|---|
| 1425 | + clear_bit(__ICE_SERVICE_SCHED, pf->state); |
|---|
| 1426 | + return ret; |
|---|
| 1427 | +} |
|---|
| 1428 | + |
|---|
| 1429 | +/** |
|---|
| 1430 | + * ice_service_task_restart - restart service task and schedule works |
|---|
| 1431 | + * @pf: board private structure |
|---|
| 1432 | + * |
|---|
| 1433 | + * This function is needed for suspend and resume works (e.g WoL scenario) |
|---|
| 1434 | + */ |
|---|
| 1435 | +static void ice_service_task_restart(struct ice_pf *pf) |
|---|
| 1436 | +{ |
|---|
| 1437 | + clear_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 1438 | + ice_service_task_schedule(pf); |
|---|
| 1439 | +} |
|---|
| 1440 | + |
|---|
| 1441 | +/** |
|---|
| 994 | 1442 | * ice_service_timer - timer callback to schedule service task |
|---|
| 995 | 1443 | * @t: pointer to timer_list |
|---|
| 996 | 1444 | */ |
|---|
| .. | .. |
|---|
| 1000 | 1448 | |
|---|
| 1001 | 1449 | mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); |
|---|
| 1002 | 1450 | ice_service_task_schedule(pf); |
|---|
| 1451 | +} |
|---|
| 1452 | + |
|---|
| 1453 | +/** |
|---|
| 1454 | + * ice_handle_mdd_event - handle malicious driver detect event |
|---|
| 1455 | + * @pf: pointer to the PF structure |
|---|
| 1456 | + * |
|---|
| 1457 | + * Called from service task. OICR interrupt handler indicates MDD event. |
|---|
| 1458 | + * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log |
|---|
| 1459 | + * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events |
|---|
| 1460 | + * disable the queue, the PF can be configured to reset the VF using ethtool |
|---|
| 1461 | + * private flag mdd-auto-reset-vf. |
|---|
| 1462 | + */ |
|---|
| 1463 | +static void ice_handle_mdd_event(struct ice_pf *pf) |
|---|
| 1464 | +{ |
|---|
| 1465 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 1466 | + struct ice_hw *hw = &pf->hw; |
|---|
| 1467 | + unsigned int i; |
|---|
| 1468 | + u32 reg; |
|---|
| 1469 | + |
|---|
| 1470 | + if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { |
|---|
| 1471 | + /* Since the VF MDD event logging is rate limited, check if |
|---|
| 1472 | + * there are pending MDD events. |
|---|
| 1473 | + */ |
|---|
| 1474 | + ice_print_vfs_mdd_events(pf); |
|---|
| 1475 | + return; |
|---|
| 1476 | + } |
|---|
| 1477 | + |
|---|
| 1478 | + /* find what triggered an MDD event */ |
|---|
| 1479 | + reg = rd32(hw, GL_MDET_TX_PQM); |
|---|
| 1480 | + if (reg & GL_MDET_TX_PQM_VALID_M) { |
|---|
| 1481 | + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> |
|---|
| 1482 | + GL_MDET_TX_PQM_PF_NUM_S; |
|---|
| 1483 | + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> |
|---|
| 1484 | + GL_MDET_TX_PQM_VF_NUM_S; |
|---|
| 1485 | + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> |
|---|
| 1486 | + GL_MDET_TX_PQM_MAL_TYPE_S; |
|---|
| 1487 | + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> |
|---|
| 1488 | + GL_MDET_TX_PQM_QNUM_S); |
|---|
| 1489 | + |
|---|
| 1490 | + if (netif_msg_tx_err(pf)) |
|---|
| 1491 | + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
|---|
| 1492 | + event, queue, pf_num, vf_num); |
|---|
| 1493 | + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); |
|---|
| 1494 | + } |
|---|
| 1495 | + |
|---|
| 1496 | + reg = rd32(hw, GL_MDET_TX_TCLAN); |
|---|
| 1497 | + if (reg & GL_MDET_TX_TCLAN_VALID_M) { |
|---|
| 1498 | + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> |
|---|
| 1499 | + GL_MDET_TX_TCLAN_PF_NUM_S; |
|---|
| 1500 | + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> |
|---|
| 1501 | + GL_MDET_TX_TCLAN_VF_NUM_S; |
|---|
| 1502 | + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> |
|---|
| 1503 | + GL_MDET_TX_TCLAN_MAL_TYPE_S; |
|---|
| 1504 | + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> |
|---|
| 1505 | + GL_MDET_TX_TCLAN_QNUM_S); |
|---|
| 1506 | + |
|---|
| 1507 | + if (netif_msg_tx_err(pf)) |
|---|
| 1508 | + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
|---|
| 1509 | + event, queue, pf_num, vf_num); |
|---|
| 1510 | + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); |
|---|
| 1511 | + } |
|---|
| 1512 | + |
|---|
| 1513 | + reg = rd32(hw, GL_MDET_RX); |
|---|
| 1514 | + if (reg & GL_MDET_RX_VALID_M) { |
|---|
| 1515 | + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> |
|---|
| 1516 | + GL_MDET_RX_PF_NUM_S; |
|---|
| 1517 | + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> |
|---|
| 1518 | + GL_MDET_RX_VF_NUM_S; |
|---|
| 1519 | + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> |
|---|
| 1520 | + GL_MDET_RX_MAL_TYPE_S; |
|---|
| 1521 | + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> |
|---|
| 1522 | + GL_MDET_RX_QNUM_S); |
|---|
| 1523 | + |
|---|
| 1524 | + if (netif_msg_rx_err(pf)) |
|---|
| 1525 | + dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", |
|---|
| 1526 | + event, queue, pf_num, vf_num); |
|---|
| 1527 | + wr32(hw, GL_MDET_RX, 0xffffffff); |
|---|
| 1528 | + } |
|---|
| 1529 | + |
|---|
| 1530 | + /* check to see if this PF caused an MDD event */ |
|---|
| 1531 | + reg = rd32(hw, PF_MDET_TX_PQM); |
|---|
| 1532 | + if (reg & PF_MDET_TX_PQM_VALID_M) { |
|---|
| 1533 | + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); |
|---|
| 1534 | + if (netif_msg_tx_err(pf)) |
|---|
| 1535 | + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); |
|---|
| 1536 | + } |
|---|
| 1537 | + |
|---|
| 1538 | + reg = rd32(hw, PF_MDET_TX_TCLAN); |
|---|
| 1539 | + if (reg & PF_MDET_TX_TCLAN_VALID_M) { |
|---|
| 1540 | + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); |
|---|
| 1541 | + if (netif_msg_tx_err(pf)) |
|---|
| 1542 | + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); |
|---|
| 1543 | + } |
|---|
| 1544 | + |
|---|
| 1545 | + reg = rd32(hw, PF_MDET_RX); |
|---|
| 1546 | + if (reg & PF_MDET_RX_VALID_M) { |
|---|
| 1547 | + wr32(hw, PF_MDET_RX, 0xFFFF); |
|---|
| 1548 | + if (netif_msg_rx_err(pf)) |
|---|
| 1549 | + dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); |
|---|
| 1550 | + } |
|---|
| 1551 | + |
|---|
| 1552 | + /* Check to see if one of the VFs caused an MDD event, and then |
|---|
| 1553 | + * increment counters and set print pending |
|---|
| 1554 | + */ |
|---|
| 1555 | + ice_for_each_vf(pf, i) { |
|---|
| 1556 | + struct ice_vf *vf = &pf->vf[i]; |
|---|
| 1557 | + |
|---|
| 1558 | + reg = rd32(hw, VP_MDET_TX_PQM(i)); |
|---|
| 1559 | + if (reg & VP_MDET_TX_PQM_VALID_M) { |
|---|
| 1560 | + wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); |
|---|
| 1561 | + vf->mdd_tx_events.count++; |
|---|
| 1562 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
|---|
| 1563 | + if (netif_msg_tx_err(pf)) |
|---|
| 1564 | + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", |
|---|
| 1565 | + i); |
|---|
| 1566 | + } |
|---|
| 1567 | + |
|---|
| 1568 | + reg = rd32(hw, VP_MDET_TX_TCLAN(i)); |
|---|
| 1569 | + if (reg & VP_MDET_TX_TCLAN_VALID_M) { |
|---|
| 1570 | + wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); |
|---|
| 1571 | + vf->mdd_tx_events.count++; |
|---|
| 1572 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
|---|
| 1573 | + if (netif_msg_tx_err(pf)) |
|---|
| 1574 | + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", |
|---|
| 1575 | + i); |
|---|
| 1576 | + } |
|---|
| 1577 | + |
|---|
| 1578 | + reg = rd32(hw, VP_MDET_TX_TDPU(i)); |
|---|
| 1579 | + if (reg & VP_MDET_TX_TDPU_VALID_M) { |
|---|
| 1580 | + wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); |
|---|
| 1581 | + vf->mdd_tx_events.count++; |
|---|
| 1582 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
|---|
| 1583 | + if (netif_msg_tx_err(pf)) |
|---|
| 1584 | + dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", |
|---|
| 1585 | + i); |
|---|
| 1586 | + } |
|---|
| 1587 | + |
|---|
| 1588 | + reg = rd32(hw, VP_MDET_RX(i)); |
|---|
| 1589 | + if (reg & VP_MDET_RX_VALID_M) { |
|---|
| 1590 | + wr32(hw, VP_MDET_RX(i), 0xFFFF); |
|---|
| 1591 | + vf->mdd_rx_events.count++; |
|---|
| 1592 | + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); |
|---|
| 1593 | + if (netif_msg_rx_err(pf)) |
|---|
| 1594 | + dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", |
|---|
| 1595 | + i); |
|---|
| 1596 | + |
|---|
| 1597 | + /* Since the queue is disabled on VF Rx MDD events, the |
|---|
| 1598 | + * PF can be configured to reset the VF through ethtool |
|---|
| 1599 | + * private flag mdd-auto-reset-vf. |
|---|
| 1600 | + */ |
|---|
| 1601 | + if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { |
|---|
| 1602 | + /* VF MDD event counters will be cleared by |
|---|
| 1603 | + * reset, so print the event prior to reset. |
|---|
| 1604 | + */ |
|---|
| 1605 | + ice_print_vf_rx_mdd_event(vf); |
|---|
| 1606 | + mutex_lock(&pf->vf[i].cfg_lock); |
|---|
| 1607 | + ice_reset_vf(&pf->vf[i], false); |
|---|
| 1608 | + mutex_unlock(&pf->vf[i].cfg_lock); |
|---|
| 1609 | + } |
|---|
| 1610 | + } |
|---|
| 1611 | + } |
|---|
| 1612 | + |
|---|
| 1613 | + ice_print_vfs_mdd_events(pf); |
|---|
| 1614 | +} |
|---|
| 1615 | + |
|---|
| 1616 | +/** |
|---|
| 1617 | + * ice_force_phys_link_state - Force the physical link state |
|---|
| 1618 | + * @vsi: VSI to force the physical link state to up/down |
|---|
| 1619 | + * @link_up: true/false indicates to set the physical link to up/down |
|---|
| 1620 | + * |
|---|
| 1621 | + * Force the physical link state by getting the current PHY capabilities from |
|---|
| 1622 | + * hardware and setting the PHY config based on the determined capabilities. If |
|---|
| 1623 | + * link changes a link event will be triggered because both the Enable Automatic |
|---|
| 1624 | + * Link Update and LESM Enable bits are set when setting the PHY capabilities. |
|---|
| 1625 | + * |
|---|
| 1626 | + * Returns 0 on success, negative on failure |
|---|
| 1627 | + */ |
|---|
| 1628 | +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) |
|---|
| 1629 | +{ |
|---|
| 1630 | + struct ice_aqc_get_phy_caps_data *pcaps; |
|---|
| 1631 | + struct ice_aqc_set_phy_cfg_data *cfg; |
|---|
| 1632 | + struct ice_port_info *pi; |
|---|
| 1633 | + struct device *dev; |
|---|
| 1634 | + int retcode; |
|---|
| 1635 | + |
|---|
| 1636 | + if (!vsi || !vsi->port_info || !vsi->back) |
|---|
| 1637 | + return -EINVAL; |
|---|
| 1638 | + if (vsi->type != ICE_VSI_PF) |
|---|
| 1639 | + return 0; |
|---|
| 1640 | + |
|---|
| 1641 | + dev = ice_pf_to_dev(vsi->back); |
|---|
| 1642 | + |
|---|
| 1643 | + pi = vsi->port_info; |
|---|
| 1644 | + |
|---|
| 1645 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
|---|
| 1646 | + if (!pcaps) |
|---|
| 1647 | + return -ENOMEM; |
|---|
| 1648 | + |
|---|
| 1649 | + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
|---|
| 1650 | + NULL); |
|---|
| 1651 | + if (retcode) { |
|---|
| 1652 | + dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", |
|---|
| 1653 | + vsi->vsi_num, retcode); |
|---|
| 1654 | + retcode = -EIO; |
|---|
| 1655 | + goto out; |
|---|
| 1656 | + } |
|---|
| 1657 | + |
|---|
| 1658 | + /* No change in link */ |
|---|
| 1659 | + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && |
|---|
| 1660 | + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) |
|---|
| 1661 | + goto out; |
|---|
| 1662 | + |
|---|
| 1663 | + /* Use the current user PHY configuration. The current user PHY |
|---|
| 1664 | + * configuration is initialized during probe from PHY capabilities |
|---|
| 1665 | + * software mode, and updated on set PHY configuration. |
|---|
| 1666 | + */ |
|---|
| 1667 | + cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); |
|---|
| 1668 | + if (!cfg) { |
|---|
| 1669 | + retcode = -ENOMEM; |
|---|
| 1670 | + goto out; |
|---|
| 1671 | + } |
|---|
| 1672 | + |
|---|
| 1673 | + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; |
|---|
| 1674 | + if (link_up) |
|---|
| 1675 | + cfg->caps |= ICE_AQ_PHY_ENA_LINK; |
|---|
| 1676 | + else |
|---|
| 1677 | + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; |
|---|
| 1678 | + |
|---|
| 1679 | + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
|---|
| 1680 | + if (retcode) { |
|---|
| 1681 | + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", |
|---|
| 1682 | + vsi->vsi_num, retcode); |
|---|
| 1683 | + retcode = -EIO; |
|---|
| 1684 | + } |
|---|
| 1685 | + |
|---|
| 1686 | + kfree(cfg); |
|---|
| 1687 | +out: |
|---|
| 1688 | + kfree(pcaps); |
|---|
| 1689 | + return retcode; |
|---|
| 1690 | +} |
|---|
| 1691 | + |
|---|
| 1692 | +/** |
|---|
| 1693 | + * ice_init_nvm_phy_type - Initialize the NVM PHY type |
|---|
| 1694 | + * @pi: port info structure |
|---|
| 1695 | + * |
|---|
| 1696 | + * Initialize nvm_phy_type_[low|high] for link lenient mode support |
|---|
| 1697 | + */ |
|---|
| 1698 | +static int ice_init_nvm_phy_type(struct ice_port_info *pi) |
|---|
| 1699 | +{ |
|---|
| 1700 | + struct ice_aqc_get_phy_caps_data *pcaps; |
|---|
| 1701 | + struct ice_pf *pf = pi->hw->back; |
|---|
| 1702 | + enum ice_status status; |
|---|
| 1703 | + int err = 0; |
|---|
| 1704 | + |
|---|
| 1705 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
|---|
| 1706 | + if (!pcaps) |
|---|
| 1707 | + return -ENOMEM; |
|---|
| 1708 | + |
|---|
| 1709 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, |
|---|
| 1710 | + NULL); |
|---|
| 1711 | + |
|---|
| 1712 | + if (status) { |
|---|
| 1713 | + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
|---|
| 1714 | + err = -EIO; |
|---|
| 1715 | + goto out; |
|---|
| 1716 | + } |
|---|
| 1717 | + |
|---|
| 1718 | + pf->nvm_phy_type_hi = pcaps->phy_type_high; |
|---|
| 1719 | + pf->nvm_phy_type_lo = pcaps->phy_type_low; |
|---|
| 1720 | + |
|---|
| 1721 | +out: |
|---|
| 1722 | + kfree(pcaps); |
|---|
| 1723 | + return err; |
|---|
| 1724 | +} |
|---|
| 1725 | + |
|---|
| 1726 | +/** |
|---|
| 1727 | + * ice_init_link_dflt_override - Initialize link default override |
|---|
| 1728 | + * @pi: port info structure |
|---|
| 1729 | + * |
|---|
| 1730 | + * Initialize link default override and PHY total port shutdown during probe |
|---|
| 1731 | + */ |
|---|
| 1732 | +static void ice_init_link_dflt_override(struct ice_port_info *pi) |
|---|
| 1733 | +{ |
|---|
| 1734 | + struct ice_link_default_override_tlv *ldo; |
|---|
| 1735 | + struct ice_pf *pf = pi->hw->back; |
|---|
| 1736 | + |
|---|
| 1737 | + ldo = &pf->link_dflt_override; |
|---|
| 1738 | + if (ice_get_link_default_override(ldo, pi)) |
|---|
| 1739 | + return; |
|---|
| 1740 | + |
|---|
| 1741 | + if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) |
|---|
| 1742 | + return; |
|---|
| 1743 | + |
|---|
| 1744 | + /* Enable Total Port Shutdown (override/replace link-down-on-close |
|---|
| 1745 | + * ethtool private flag) for ports with Port Disable bit set. |
|---|
| 1746 | + */ |
|---|
| 1747 | + set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); |
|---|
| 1748 | + set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); |
|---|
| 1749 | +} |
|---|
| 1750 | + |
|---|
| 1751 | +/** |
|---|
| 1752 | + * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings |
|---|
| 1753 | + * @pi: port info structure |
|---|
| 1754 | + * |
|---|
| 1755 | + * If default override is enabled, initialized the user PHY cfg speed and FEC |
|---|
| 1756 | + * settings using the default override mask from the NVM. |
|---|
| 1757 | + * |
|---|
| 1758 | + * The PHY should only be configured with the default override settings the |
|---|
| 1759 | + * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state |
|---|
| 1760 | + * is used to indicate that the user PHY cfg default override is initialized |
|---|
| 1761 | + * and the PHY has not been configured with the default override settings. The |
|---|
| 1762 | + * state is set here, and cleared in ice_configure_phy the first time the PHY is |
|---|
| 1763 | + * configured. |
|---|
| 1764 | + */ |
|---|
| 1765 | +static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) |
|---|
| 1766 | +{ |
|---|
| 1767 | + struct ice_link_default_override_tlv *ldo; |
|---|
| 1768 | + struct ice_aqc_set_phy_cfg_data *cfg; |
|---|
| 1769 | + struct ice_phy_info *phy = &pi->phy; |
|---|
| 1770 | + struct ice_pf *pf = pi->hw->back; |
|---|
| 1771 | + |
|---|
| 1772 | + ldo = &pf->link_dflt_override; |
|---|
| 1773 | + |
|---|
| 1774 | + /* If link default override is enabled, use to mask NVM PHY capabilities |
|---|
| 1775 | + * for speed and FEC default configuration. |
|---|
| 1776 | + */ |
|---|
| 1777 | + cfg = &phy->curr_user_phy_cfg; |
|---|
| 1778 | + |
|---|
| 1779 | + if (ldo->phy_type_low || ldo->phy_type_high) { |
|---|
| 1780 | + cfg->phy_type_low = pf->nvm_phy_type_lo & |
|---|
| 1781 | + cpu_to_le64(ldo->phy_type_low); |
|---|
| 1782 | + cfg->phy_type_high = pf->nvm_phy_type_hi & |
|---|
| 1783 | + cpu_to_le64(ldo->phy_type_high); |
|---|
| 1784 | + } |
|---|
| 1785 | + cfg->link_fec_opt = ldo->fec_options; |
|---|
| 1786 | + phy->curr_user_fec_req = ICE_FEC_AUTO; |
|---|
| 1787 | + |
|---|
| 1788 | + set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); |
|---|
| 1789 | +} |
|---|
| 1790 | + |
|---|
| 1791 | +/** |
|---|
| 1792 | + * ice_init_phy_user_cfg - Initialize the PHY user configuration |
|---|
| 1793 | + * @pi: port info structure |
|---|
| 1794 | + * |
|---|
| 1795 | + * Initialize the current user PHY configuration, speed, FEC, and FC requested |
|---|
| 1796 | + * mode to default. The PHY defaults are from get PHY capabilities topology |
|---|
| 1797 | + * with media so call when media is first available. An error is returned if |
|---|
| 1798 | + * called when media is not available. The PHY initialization completed state is |
|---|
| 1799 | + * set here. |
|---|
| 1800 | + * |
|---|
| 1801 | + * These configurations are used when setting PHY |
|---|
| 1802 | + * configuration. The user PHY configuration is updated on set PHY |
|---|
| 1803 | + * configuration. Returns 0 on success, negative on failure |
|---|
| 1804 | + */ |
|---|
| 1805 | +static int ice_init_phy_user_cfg(struct ice_port_info *pi) |
|---|
| 1806 | +{ |
|---|
| 1807 | + struct ice_aqc_get_phy_caps_data *pcaps; |
|---|
| 1808 | + struct ice_phy_info *phy = &pi->phy; |
|---|
| 1809 | + struct ice_pf *pf = pi->hw->back; |
|---|
| 1810 | + enum ice_status status; |
|---|
| 1811 | + struct ice_vsi *vsi; |
|---|
| 1812 | + int err = 0; |
|---|
| 1813 | + |
|---|
| 1814 | + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
|---|
| 1815 | + return -EIO; |
|---|
| 1816 | + |
|---|
| 1817 | + vsi = ice_get_main_vsi(pf); |
|---|
| 1818 | + if (!vsi) |
|---|
| 1819 | + return -EINVAL; |
|---|
| 1820 | + |
|---|
| 1821 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
|---|
| 1822 | + if (!pcaps) |
|---|
| 1823 | + return -ENOMEM; |
|---|
| 1824 | + |
|---|
| 1825 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, |
|---|
| 1826 | + NULL); |
|---|
| 1827 | + if (status) { |
|---|
| 1828 | + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
|---|
| 1829 | + err = -EIO; |
|---|
| 1830 | + goto err_out; |
|---|
| 1831 | + } |
|---|
| 1832 | + |
|---|
| 1833 | + ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); |
|---|
| 1834 | + |
|---|
| 1835 | + /* check if lenient mode is supported and enabled */ |
|---|
| 1836 | + if (ice_fw_supports_link_override(&vsi->back->hw) && |
|---|
| 1837 | + !(pcaps->module_compliance_enforcement & |
|---|
| 1838 | + ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { |
|---|
| 1839 | + set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); |
|---|
| 1840 | + |
|---|
| 1841 | + /* if link default override is enabled, initialize user PHY |
|---|
| 1842 | + * configuration with link default override values |
|---|
| 1843 | + */ |
|---|
| 1844 | + if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) { |
|---|
| 1845 | + ice_init_phy_cfg_dflt_override(pi); |
|---|
| 1846 | + goto out; |
|---|
| 1847 | + } |
|---|
| 1848 | + } |
|---|
| 1849 | + |
|---|
| 1850 | + /* if link default override is not enabled, initialize PHY using |
|---|
| 1851 | + * topology with media |
|---|
| 1852 | + */ |
|---|
| 1853 | + phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, |
|---|
| 1854 | + pcaps->link_fec_options); |
|---|
| 1855 | + phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); |
|---|
| 1856 | + |
|---|
| 1857 | +out: |
|---|
| 1858 | + phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; |
|---|
| 1859 | + set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); |
|---|
| 1860 | +err_out: |
|---|
| 1861 | + kfree(pcaps); |
|---|
| 1862 | + return err; |
|---|
| 1863 | +} |
|---|
| 1864 | + |
|---|
| 1865 | +/** |
|---|
| 1866 | + * ice_configure_phy - configure PHY |
|---|
| 1867 | + * @vsi: VSI of PHY |
|---|
| 1868 | + * |
|---|
| 1869 | + * Set the PHY configuration. If the current PHY configuration is the same as |
|---|
| 1870 | + * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise |
|---|
| 1871 | + * configure the based get PHY capabilities for topology with media. |
|---|
| 1872 | + */ |
|---|
| 1873 | +static int ice_configure_phy(struct ice_vsi *vsi) |
|---|
| 1874 | +{ |
|---|
| 1875 | + struct device *dev = ice_pf_to_dev(vsi->back); |
|---|
| 1876 | + struct ice_aqc_get_phy_caps_data *pcaps; |
|---|
| 1877 | + struct ice_aqc_set_phy_cfg_data *cfg; |
|---|
| 1878 | + struct ice_port_info *pi; |
|---|
| 1879 | + enum ice_status status; |
|---|
| 1880 | + int err = 0; |
|---|
| 1881 | + |
|---|
| 1882 | + pi = vsi->port_info; |
|---|
| 1883 | + if (!pi) |
|---|
| 1884 | + return -EINVAL; |
|---|
| 1885 | + |
|---|
| 1886 | + /* Ensure we have media as we cannot configure a medialess port */ |
|---|
| 1887 | + if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
|---|
| 1888 | + return -EPERM; |
|---|
| 1889 | + |
|---|
| 1890 | + ice_print_topo_conflict(vsi); |
|---|
| 1891 | + |
|---|
| 1892 | + if (vsi->port_info->phy.link_info.topo_media_conflict == |
|---|
| 1893 | + ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) |
|---|
| 1894 | + return -EPERM; |
|---|
| 1895 | + |
|---|
| 1896 | + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
|---|
| 1897 | + return ice_force_phys_link_state(vsi, true); |
|---|
| 1898 | + |
|---|
| 1899 | + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
|---|
| 1900 | + if (!pcaps) |
|---|
| 1901 | + return -ENOMEM; |
|---|
| 1902 | + |
|---|
| 1903 | + /* Get current PHY config */ |
|---|
| 1904 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
|---|
| 1905 | + NULL); |
|---|
| 1906 | + if (status) { |
|---|
| 1907 | + dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", |
|---|
| 1908 | + vsi->vsi_num, ice_stat_str(status)); |
|---|
| 1909 | + err = -EIO; |
|---|
| 1910 | + goto done; |
|---|
| 1911 | + } |
|---|
| 1912 | + |
|---|
| 1913 | + /* If PHY enable link is configured and configuration has not changed, |
|---|
| 1914 | + * there's nothing to do |
|---|
| 1915 | + */ |
|---|
| 1916 | + if (pcaps->caps & ICE_AQC_PHY_EN_LINK && |
|---|
| 1917 | + ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg)) |
|---|
| 1918 | + goto done; |
|---|
| 1919 | + |
|---|
| 1920 | + /* Use PHY topology as baseline for configuration */ |
|---|
| 1921 | + memset(pcaps, 0, sizeof(*pcaps)); |
|---|
| 1922 | + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, |
|---|
| 1923 | + NULL); |
|---|
| 1924 | + if (status) { |
|---|
| 1925 | + dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", |
|---|
| 1926 | + vsi->vsi_num, ice_stat_str(status)); |
|---|
| 1927 | + err = -EIO; |
|---|
| 1928 | + goto done; |
|---|
| 1929 | + } |
|---|
| 1930 | + |
|---|
| 1931 | + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
|---|
| 1932 | + if (!cfg) { |
|---|
| 1933 | + err = -ENOMEM; |
|---|
| 1934 | + goto done; |
|---|
| 1935 | + } |
|---|
| 1936 | + |
|---|
| 1937 | + ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); |
|---|
| 1938 | + |
|---|
| 1939 | + /* Speed - If default override pending, use curr_user_phy_cfg set in |
|---|
| 1940 | + * ice_init_phy_user_cfg_ldo. |
|---|
| 1941 | + */ |
|---|
| 1942 | + if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
|---|
| 1943 | + vsi->back->state)) { |
|---|
| 1944 | + cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low; |
|---|
| 1945 | + cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high; |
|---|
| 1946 | + } else { |
|---|
| 1947 | + u64 phy_low = 0, phy_high = 0; |
|---|
| 1948 | + |
|---|
| 1949 | + ice_update_phy_type(&phy_low, &phy_high, |
|---|
| 1950 | + pi->phy.curr_user_speed_req); |
|---|
| 1951 | + cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); |
|---|
| 1952 | + cfg->phy_type_high = pcaps->phy_type_high & |
|---|
| 1953 | + cpu_to_le64(phy_high); |
|---|
| 1954 | + } |
|---|
| 1955 | + |
|---|
| 1956 | + /* Can't provide what was requested; use PHY capabilities */ |
|---|
| 1957 | + if (!cfg->phy_type_low && !cfg->phy_type_high) { |
|---|
| 1958 | + cfg->phy_type_low = pcaps->phy_type_low; |
|---|
| 1959 | + cfg->phy_type_high = pcaps->phy_type_high; |
|---|
| 1960 | + } |
|---|
| 1961 | + |
|---|
| 1962 | + /* FEC */ |
|---|
| 1963 | + ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); |
|---|
| 1964 | + |
|---|
| 1965 | + /* Can't provide what was requested; use PHY capabilities */ |
|---|
| 1966 | + if (cfg->link_fec_opt != |
|---|
| 1967 | + (cfg->link_fec_opt & pcaps->link_fec_options)) { |
|---|
| 1968 | + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; |
|---|
| 1969 | + cfg->link_fec_opt = pcaps->link_fec_options; |
|---|
| 1970 | + } |
|---|
| 1971 | + |
|---|
| 1972 | + /* Flow Control - always supported; no need to check against |
|---|
| 1973 | + * capabilities |
|---|
| 1974 | + */ |
|---|
| 1975 | + ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req); |
|---|
| 1976 | + |
|---|
| 1977 | + /* Enable link and link update */ |
|---|
| 1978 | + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; |
|---|
| 1979 | + |
|---|
| 1980 | + status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
|---|
| 1981 | + if (status) { |
|---|
| 1982 | + dev_err(dev, "Failed to set phy config, VSI %d error %s\n", |
|---|
| 1983 | + vsi->vsi_num, ice_stat_str(status)); |
|---|
| 1984 | + err = -EIO; |
|---|
| 1985 | + } |
|---|
| 1986 | + |
|---|
| 1987 | + kfree(cfg); |
|---|
| 1988 | +done: |
|---|
| 1989 | + kfree(pcaps); |
|---|
| 1990 | + return err; |
|---|
| 1991 | +} |
|---|
| 1992 | + |
|---|
| 1993 | +/** |
|---|
| 1994 | + * ice_check_media_subtask - Check for media |
|---|
| 1995 | + * @pf: pointer to PF struct |
|---|
| 1996 | + * |
|---|
| 1997 | + * If media is available, then initialize PHY user configuration if it is not |
|---|
| 1998 | + * been, and configure the PHY if the interface is up. |
|---|
| 1999 | + */ |
|---|
| 2000 | +static void ice_check_media_subtask(struct ice_pf *pf) |
|---|
| 2001 | +{ |
|---|
| 2002 | + struct ice_port_info *pi; |
|---|
| 2003 | + struct ice_vsi *vsi; |
|---|
| 2004 | + int err; |
|---|
| 2005 | + |
|---|
| 2006 | + /* No need to check for media if it's already present */ |
|---|
| 2007 | + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) |
|---|
| 2008 | + return; |
|---|
| 2009 | + |
|---|
| 2010 | + vsi = ice_get_main_vsi(pf); |
|---|
| 2011 | + if (!vsi) |
|---|
| 2012 | + return; |
|---|
| 2013 | + |
|---|
| 2014 | + /* Refresh link info and check if media is present */ |
|---|
| 2015 | + pi = vsi->port_info; |
|---|
| 2016 | + err = ice_update_link_info(pi); |
|---|
| 2017 | + if (err) |
|---|
| 2018 | + return; |
|---|
| 2019 | + |
|---|
| 2020 | + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
|---|
| 2021 | + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) |
|---|
| 2022 | + ice_init_phy_user_cfg(pi); |
|---|
| 2023 | + |
|---|
| 2024 | + /* PHY settings are reset on media insertion, reconfigure |
|---|
| 2025 | + * PHY to preserve settings. |
|---|
| 2026 | + */ |
|---|
| 2027 | + if (test_bit(__ICE_DOWN, vsi->state) && |
|---|
| 2028 | + test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
|---|
| 2029 | + return; |
|---|
| 2030 | + |
|---|
| 2031 | + err = ice_configure_phy(vsi); |
|---|
| 2032 | + if (!err) |
|---|
| 2033 | + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
|---|
| 2034 | + |
|---|
| 2035 | + /* A Link Status Event will be generated; the event handler |
|---|
| 2036 | + * will complete bringing the interface up |
|---|
| 2037 | + */ |
|---|
| 2038 | + } |
|---|
| 1003 | 2039 | } |
|---|
| 1004 | 2040 | |
|---|
| 1005 | 2041 | /** |
|---|
| .. | .. |
|---|
| 1016 | 2052 | /* process reset requests first */ |
|---|
| 1017 | 2053 | ice_reset_subtask(pf); |
|---|
| 1018 | 2054 | |
|---|
| 1019 | | - /* bail if a reset/recovery cycle is pending */ |
|---|
| 1020 | | - if (ice_is_reset_recovery_pending(pf->state) || |
|---|
| 1021 | | - test_bit(__ICE_SUSPENDED, pf->state)) { |
|---|
| 2055 | + /* bail if a reset/recovery cycle is pending or rebuild failed */ |
|---|
| 2056 | + if (ice_is_reset_in_progress(pf->state) || |
|---|
| 2057 | + test_bit(__ICE_SUSPENDED, pf->state) || |
|---|
| 2058 | + test_bit(__ICE_NEEDS_RESTART, pf->state)) { |
|---|
| 1022 | 2059 | ice_service_task_complete(pf); |
|---|
| 1023 | 2060 | return; |
|---|
| 1024 | 2061 | } |
|---|
| 1025 | 2062 | |
|---|
| 1026 | | - ice_sync_fltr_subtask(pf); |
|---|
| 1027 | | - ice_watchdog_subtask(pf); |
|---|
| 1028 | 2063 | ice_clean_adminq_subtask(pf); |
|---|
| 2064 | + ice_check_media_subtask(pf); |
|---|
| 2065 | + ice_check_for_hang_subtask(pf); |
|---|
| 2066 | + ice_sync_fltr_subtask(pf); |
|---|
| 2067 | + ice_handle_mdd_event(pf); |
|---|
| 2068 | + ice_watchdog_subtask(pf); |
|---|
| 1029 | 2069 | |
|---|
| 2070 | + if (ice_is_safe_mode(pf)) { |
|---|
| 2071 | + ice_service_task_complete(pf); |
|---|
| 2072 | + return; |
|---|
| 2073 | + } |
|---|
| 2074 | + |
|---|
| 2075 | + ice_process_vflr_event(pf); |
|---|
| 2076 | + ice_clean_mailboxq_subtask(pf); |
|---|
| 2077 | + ice_sync_arfs_fltrs(pf); |
|---|
| 1030 | 2078 | /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ |
|---|
| 1031 | 2079 | ice_service_task_complete(pf); |
|---|
| 1032 | 2080 | |
|---|
| .. | .. |
|---|
| 1035 | 2083 | * schedule the service task now. |
|---|
| 1036 | 2084 | */ |
|---|
| 1037 | 2085 | if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || |
|---|
| 2086 | + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || |
|---|
| 2087 | + test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || |
|---|
| 2088 | + test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || |
|---|
| 1038 | 2089 | test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) |
|---|
| 1039 | 2090 | mod_timer(&pf->serv_tmr, jiffies); |
|---|
| 1040 | 2091 | } |
|---|
| 1041 | 2092 | |
|---|
| 1042 | 2093 | /** |
|---|
| 1043 | 2094 | * ice_set_ctrlq_len - helper function to set controlq length |
|---|
| 1044 | | - * @hw: pointer to the hw instance |
|---|
| 2095 | + * @hw: pointer to the HW instance |
|---|
| 1045 | 2096 | */ |
|---|
| 1046 | 2097 | static void ice_set_ctrlq_len(struct ice_hw *hw) |
|---|
| 1047 | 2098 | { |
|---|
| .. | .. |
|---|
| 1049 | 2100 | hw->adminq.num_sq_entries = ICE_AQ_LEN; |
|---|
| 1050 | 2101 | hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; |
|---|
| 1051 | 2102 | hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; |
|---|
| 2103 | + hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; |
|---|
| 2104 | + hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; |
|---|
| 2105 | + hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
|---|
| 2106 | + hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
|---|
| 2107 | +} |
|---|
| 2108 | + |
|---|
| 2109 | +/** |
|---|
| 2110 | + * ice_schedule_reset - schedule a reset |
|---|
| 2111 | + * @pf: board private structure |
|---|
| 2112 | + * @reset: reset being requested |
|---|
| 2113 | + */ |
|---|
| 2114 | +int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) |
|---|
| 2115 | +{ |
|---|
| 2116 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 2117 | + |
|---|
| 2118 | + /* bail out if earlier reset has failed */ |
|---|
| 2119 | + if (test_bit(__ICE_RESET_FAILED, pf->state)) { |
|---|
| 2120 | + dev_dbg(dev, "earlier reset has failed\n"); |
|---|
| 2121 | + return -EIO; |
|---|
| 2122 | + } |
|---|
| 2123 | + /* bail if reset/recovery already in progress */ |
|---|
| 2124 | + if (ice_is_reset_in_progress(pf->state)) { |
|---|
| 2125 | + dev_dbg(dev, "Reset already in progress\n"); |
|---|
| 2126 | + return -EBUSY; |
|---|
| 2127 | + } |
|---|
| 2128 | + |
|---|
| 2129 | + switch (reset) { |
|---|
| 2130 | + case ICE_RESET_PFR: |
|---|
| 2131 | + set_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 2132 | + break; |
|---|
| 2133 | + case ICE_RESET_CORER: |
|---|
| 2134 | + set_bit(__ICE_CORER_REQ, pf->state); |
|---|
| 2135 | + break; |
|---|
| 2136 | + case ICE_RESET_GLOBR: |
|---|
| 2137 | + set_bit(__ICE_GLOBR_REQ, pf->state); |
|---|
| 2138 | + break; |
|---|
| 2139 | + default: |
|---|
| 2140 | + return -EINVAL; |
|---|
| 2141 | + } |
|---|
| 2142 | + |
|---|
| 2143 | + ice_service_task_schedule(pf); |
|---|
| 2144 | + return 0; |
|---|
| 1052 | 2145 | } |
|---|
| 1053 | 2146 | |
|---|
| 1054 | 2147 | /** |
|---|
| .. | .. |
|---|
| 1059 | 2152 | * This is a callback function used by the irq_set_affinity_notifier function |
|---|
| 1060 | 2153 | * so that we may register to receive changes to the irq affinity masks. |
|---|
| 1061 | 2154 | */ |
|---|
| 1062 | | -static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, |
|---|
| 1063 | | - const cpumask_t *mask) |
|---|
| 2155 | +static void |
|---|
| 2156 | +ice_irq_affinity_notify(struct irq_affinity_notify *notify, |
|---|
| 2157 | + const cpumask_t *mask) |
|---|
| 1064 | 2158 | { |
|---|
| 1065 | 2159 | struct ice_q_vector *q_vector = |
|---|
| 1066 | 2160 | container_of(notify, struct ice_q_vector, affinity_notify); |
|---|
| .. | .. |
|---|
| 1079 | 2173 | static void ice_irq_affinity_release(struct kref __always_unused *ref) {} |
|---|
| 1080 | 2174 | |
|---|
| 1081 | 2175 | /** |
|---|
| 1082 | | - * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI |
|---|
| 1083 | | - * @vsi: the VSI being un-configured |
|---|
| 1084 | | - */ |
|---|
| 1085 | | -static void ice_vsi_dis_irq(struct ice_vsi *vsi) |
|---|
| 1086 | | -{ |
|---|
| 1087 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1088 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 1089 | | - int base = vsi->base_vector; |
|---|
| 1090 | | - u32 val; |
|---|
| 1091 | | - int i; |
|---|
| 1092 | | - |
|---|
| 1093 | | - /* disable interrupt causation from each queue */ |
|---|
| 1094 | | - if (vsi->tx_rings) { |
|---|
| 1095 | | - ice_for_each_txq(vsi, i) { |
|---|
| 1096 | | - if (vsi->tx_rings[i]) { |
|---|
| 1097 | | - u16 reg; |
|---|
| 1098 | | - |
|---|
| 1099 | | - reg = vsi->tx_rings[i]->reg_idx; |
|---|
| 1100 | | - val = rd32(hw, QINT_TQCTL(reg)); |
|---|
| 1101 | | - val &= ~QINT_TQCTL_CAUSE_ENA_M; |
|---|
| 1102 | | - wr32(hw, QINT_TQCTL(reg), val); |
|---|
| 1103 | | - } |
|---|
| 1104 | | - } |
|---|
| 1105 | | - } |
|---|
| 1106 | | - |
|---|
| 1107 | | - if (vsi->rx_rings) { |
|---|
| 1108 | | - ice_for_each_rxq(vsi, i) { |
|---|
| 1109 | | - if (vsi->rx_rings[i]) { |
|---|
| 1110 | | - u16 reg; |
|---|
| 1111 | | - |
|---|
| 1112 | | - reg = vsi->rx_rings[i]->reg_idx; |
|---|
| 1113 | | - val = rd32(hw, QINT_RQCTL(reg)); |
|---|
| 1114 | | - val &= ~QINT_RQCTL_CAUSE_ENA_M; |
|---|
| 1115 | | - wr32(hw, QINT_RQCTL(reg), val); |
|---|
| 1116 | | - } |
|---|
| 1117 | | - } |
|---|
| 1118 | | - } |
|---|
| 1119 | | - |
|---|
| 1120 | | - /* disable each interrupt */ |
|---|
| 1121 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
|---|
| 1122 | | - for (i = vsi->base_vector; |
|---|
| 1123 | | - i < (vsi->num_q_vectors + vsi->base_vector); i++) |
|---|
| 1124 | | - wr32(hw, GLINT_DYN_CTL(i), 0); |
|---|
| 1125 | | - |
|---|
| 1126 | | - ice_flush(hw); |
|---|
| 1127 | | - for (i = 0; i < vsi->num_q_vectors; i++) |
|---|
| 1128 | | - synchronize_irq(pf->msix_entries[i + base].vector); |
|---|
| 1129 | | - } |
|---|
| 1130 | | -} |
|---|
| 1131 | | - |
|---|
| 1132 | | -/** |
|---|
| 1133 | 2176 | * ice_vsi_ena_irq - Enable IRQ for the given VSI |
|---|
| 1134 | 2177 | * @vsi: the VSI being configured |
|---|
| 1135 | 2178 | */ |
|---|
| 1136 | 2179 | static int ice_vsi_ena_irq(struct ice_vsi *vsi) |
|---|
| 1137 | 2180 | { |
|---|
| 1138 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1139 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 2181 | + struct ice_hw *hw = &vsi->back->hw; |
|---|
| 2182 | + int i; |
|---|
| 1140 | 2183 | |
|---|
| 1141 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
|---|
| 1142 | | - int i; |
|---|
| 1143 | | - |
|---|
| 1144 | | - for (i = 0; i < vsi->num_q_vectors; i++) |
|---|
| 1145 | | - ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); |
|---|
| 1146 | | - } |
|---|
| 2184 | + ice_for_each_q_vector(vsi, i) |
|---|
| 2185 | + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); |
|---|
| 1147 | 2186 | |
|---|
| 1148 | 2187 | ice_flush(hw); |
|---|
| 1149 | 2188 | return 0; |
|---|
| 1150 | | -} |
|---|
| 1151 | | - |
|---|
| 1152 | | -/** |
|---|
| 1153 | | - * ice_vsi_delete - delete a VSI from the switch |
|---|
| 1154 | | - * @vsi: pointer to VSI being removed |
|---|
| 1155 | | - */ |
|---|
| 1156 | | -static void ice_vsi_delete(struct ice_vsi *vsi) |
|---|
| 1157 | | -{ |
|---|
| 1158 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1159 | | - struct ice_vsi_ctx ctxt; |
|---|
| 1160 | | - enum ice_status status; |
|---|
| 1161 | | - |
|---|
| 1162 | | - ctxt.vsi_num = vsi->vsi_num; |
|---|
| 1163 | | - |
|---|
| 1164 | | - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); |
|---|
| 1165 | | - |
|---|
| 1166 | | - status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); |
|---|
| 1167 | | - if (status) |
|---|
| 1168 | | - dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", |
|---|
| 1169 | | - vsi->vsi_num); |
|---|
| 1170 | 2189 | } |
|---|
| 1171 | 2190 | |
|---|
| 1172 | 2191 | /** |
|---|
| .. | .. |
|---|
| 1179 | 2198 | int q_vectors = vsi->num_q_vectors; |
|---|
| 1180 | 2199 | struct ice_pf *pf = vsi->back; |
|---|
| 1181 | 2200 | int base = vsi->base_vector; |
|---|
| 2201 | + struct device *dev; |
|---|
| 1182 | 2202 | int rx_int_idx = 0; |
|---|
| 1183 | 2203 | int tx_int_idx = 0; |
|---|
| 1184 | 2204 | int vector, err; |
|---|
| 1185 | 2205 | int irq_num; |
|---|
| 1186 | 2206 | |
|---|
| 2207 | + dev = ice_pf_to_dev(pf); |
|---|
| 1187 | 2208 | for (vector = 0; vector < q_vectors; vector++) { |
|---|
| 1188 | 2209 | struct ice_q_vector *q_vector = vsi->q_vectors[vector]; |
|---|
| 1189 | 2210 | |
|---|
| .. | .. |
|---|
| 1203 | 2224 | /* skip this unused q_vector */ |
|---|
| 1204 | 2225 | continue; |
|---|
| 1205 | 2226 | } |
|---|
| 1206 | | - err = devm_request_irq(&pf->pdev->dev, |
|---|
| 1207 | | - pf->msix_entries[base + vector].vector, |
|---|
| 1208 | | - vsi->irq_handler, 0, q_vector->name, |
|---|
| 1209 | | - q_vector); |
|---|
| 2227 | + err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, |
|---|
| 2228 | + q_vector->name, q_vector); |
|---|
| 1210 | 2229 | if (err) { |
|---|
| 1211 | | - netdev_err(vsi->netdev, |
|---|
| 1212 | | - "MSIX request_irq failed, error: %d\n", err); |
|---|
| 2230 | + netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", |
|---|
| 2231 | + err); |
|---|
| 1213 | 2232 | goto free_q_irqs; |
|---|
| 1214 | 2233 | } |
|---|
| 1215 | 2234 | |
|---|
| 1216 | 2235 | /* register for affinity change notifications */ |
|---|
| 1217 | | - q_vector->affinity_notify.notify = ice_irq_affinity_notify; |
|---|
| 1218 | | - q_vector->affinity_notify.release = ice_irq_affinity_release; |
|---|
| 1219 | | - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); |
|---|
| 2236 | + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { |
|---|
| 2237 | + struct irq_affinity_notify *affinity_notify; |
|---|
| 2238 | + |
|---|
| 2239 | + affinity_notify = &q_vector->affinity_notify; |
|---|
| 2240 | + affinity_notify->notify = ice_irq_affinity_notify; |
|---|
| 2241 | + affinity_notify->release = ice_irq_affinity_release; |
|---|
| 2242 | + irq_set_affinity_notifier(irq_num, affinity_notify); |
|---|
| 2243 | + } |
|---|
| 1220 | 2244 | |
|---|
| 1221 | 2245 | /* assign the mask for this irq */ |
|---|
| 1222 | 2246 | irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); |
|---|
| .. | .. |
|---|
| 1228 | 2252 | free_q_irqs: |
|---|
| 1229 | 2253 | while (vector) { |
|---|
| 1230 | 2254 | vector--; |
|---|
| 1231 | | - irq_num = pf->msix_entries[base + vector].vector, |
|---|
| 1232 | | - irq_set_affinity_notifier(irq_num, NULL); |
|---|
| 2255 | + irq_num = pf->msix_entries[base + vector].vector; |
|---|
| 2256 | + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) |
|---|
| 2257 | + irq_set_affinity_notifier(irq_num, NULL); |
|---|
| 1233 | 2258 | irq_set_affinity_hint(irq_num, NULL); |
|---|
| 1234 | | - devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); |
|---|
| 2259 | + devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); |
|---|
| 1235 | 2260 | } |
|---|
| 1236 | 2261 | return err; |
|---|
| 1237 | 2262 | } |
|---|
| 1238 | 2263 | |
|---|
| 1239 | 2264 | /** |
|---|
| 1240 | | - * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type |
|---|
| 1241 | | - * @vsi: the VSI being configured |
|---|
| 1242 | | - */ |
|---|
| 1243 | | -static void ice_vsi_set_rss_params(struct ice_vsi *vsi) |
|---|
| 1244 | | -{ |
|---|
| 1245 | | - struct ice_hw_common_caps *cap; |
|---|
| 1246 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1247 | | - |
|---|
| 1248 | | - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { |
|---|
| 1249 | | - vsi->rss_size = 1; |
|---|
| 1250 | | - return; |
|---|
| 1251 | | - } |
|---|
| 1252 | | - |
|---|
| 1253 | | - cap = &pf->hw.func_caps.common_cap; |
|---|
| 1254 | | - switch (vsi->type) { |
|---|
| 1255 | | - case ICE_VSI_PF: |
|---|
| 1256 | | - /* PF VSI will inherit RSS instance of PF */ |
|---|
| 1257 | | - vsi->rss_table_size = cap->rss_table_size; |
|---|
| 1258 | | - vsi->rss_size = min_t(int, num_online_cpus(), |
|---|
| 1259 | | - BIT(cap->rss_table_entry_width)); |
|---|
| 1260 | | - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; |
|---|
| 1261 | | - break; |
|---|
| 1262 | | - default: |
|---|
| 1263 | | - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); |
|---|
| 1264 | | - break; |
|---|
| 1265 | | - } |
|---|
| 1266 | | -} |
|---|
| 1267 | | - |
|---|
| 1268 | | -/** |
|---|
| 1269 | | - * ice_vsi_setup_q_map - Setup a VSI queue map |
|---|
| 1270 | | - * @vsi: the VSI being configured |
|---|
| 1271 | | - * @ctxt: VSI context structure |
|---|
| 1272 | | - */ |
|---|
| 1273 | | -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) |
|---|
| 1274 | | -{ |
|---|
| 1275 | | - u16 offset = 0, qmap = 0, numq_tc; |
|---|
| 1276 | | - u16 pow = 0, max_rss = 0, qcount; |
|---|
| 1277 | | - u16 qcount_tx = vsi->alloc_txq; |
|---|
| 1278 | | - u16 qcount_rx = vsi->alloc_rxq; |
|---|
| 1279 | | - bool ena_tc0 = false; |
|---|
| 1280 | | - int i; |
|---|
| 1281 | | - |
|---|
| 1282 | | - /* at least TC0 should be enabled by default */ |
|---|
| 1283 | | - if (vsi->tc_cfg.numtc) { |
|---|
| 1284 | | - if (!(vsi->tc_cfg.ena_tc & BIT(0))) |
|---|
| 1285 | | - ena_tc0 = true; |
|---|
| 1286 | | - } else { |
|---|
| 1287 | | - ena_tc0 = true; |
|---|
| 1288 | | - } |
|---|
| 1289 | | - |
|---|
| 1290 | | - if (ena_tc0) { |
|---|
| 1291 | | - vsi->tc_cfg.numtc++; |
|---|
| 1292 | | - vsi->tc_cfg.ena_tc |= 1; |
|---|
| 1293 | | - } |
|---|
| 1294 | | - |
|---|
| 1295 | | - numq_tc = qcount_rx / vsi->tc_cfg.numtc; |
|---|
| 1296 | | - |
|---|
| 1297 | | - /* TC mapping is a function of the number of Rx queues assigned to the |
|---|
| 1298 | | - * VSI for each traffic class and the offset of these queues. |
|---|
| 1299 | | - * The first 10 bits are for queue offset for TC0, next 4 bits for no:of |
|---|
| 1300 | | - * queues allocated to TC0. No:of queues is a power-of-2. |
|---|
| 1301 | | - * |
|---|
| 1302 | | - * If TC is not enabled, the queue offset is set to 0, and allocate one |
|---|
| 1303 | | - * queue, this way, traffic for the given TC will be sent to the default |
|---|
| 1304 | | - * queue. |
|---|
| 1305 | | - * |
|---|
| 1306 | | - * Setup number and offset of Rx queues for all TCs for the VSI |
|---|
| 1307 | | - */ |
|---|
| 1308 | | - |
|---|
| 1309 | | - /* qcount will change if RSS is enabled */ |
|---|
| 1310 | | - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { |
|---|
| 1311 | | - if (vsi->type == ICE_VSI_PF) |
|---|
| 1312 | | - max_rss = ICE_MAX_LG_RSS_QS; |
|---|
| 1313 | | - else |
|---|
| 1314 | | - max_rss = ICE_MAX_SMALL_RSS_QS; |
|---|
| 1315 | | - |
|---|
| 1316 | | - qcount = min_t(int, numq_tc, max_rss); |
|---|
| 1317 | | - qcount = min_t(int, qcount, vsi->rss_size); |
|---|
| 1318 | | - } else { |
|---|
| 1319 | | - qcount = numq_tc; |
|---|
| 1320 | | - } |
|---|
| 1321 | | - |
|---|
| 1322 | | - /* find the (rounded up) power-of-2 of qcount */ |
|---|
| 1323 | | - pow = order_base_2(qcount); |
|---|
| 1324 | | - |
|---|
| 1325 | | - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { |
|---|
| 1326 | | - if (!(vsi->tc_cfg.ena_tc & BIT(i))) { |
|---|
| 1327 | | - /* TC is not enabled */ |
|---|
| 1328 | | - vsi->tc_cfg.tc_info[i].qoffset = 0; |
|---|
| 1329 | | - vsi->tc_cfg.tc_info[i].qcount = 1; |
|---|
| 1330 | | - ctxt->info.tc_mapping[i] = 0; |
|---|
| 1331 | | - continue; |
|---|
| 1332 | | - } |
|---|
| 1333 | | - |
|---|
| 1334 | | - /* TC is enabled */ |
|---|
| 1335 | | - vsi->tc_cfg.tc_info[i].qoffset = offset; |
|---|
| 1336 | | - vsi->tc_cfg.tc_info[i].qcount = qcount; |
|---|
| 1337 | | - |
|---|
| 1338 | | - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & |
|---|
| 1339 | | - ICE_AQ_VSI_TC_Q_OFFSET_M) | |
|---|
| 1340 | | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & |
|---|
| 1341 | | - ICE_AQ_VSI_TC_Q_NUM_M); |
|---|
| 1342 | | - offset += qcount; |
|---|
| 1343 | | - ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); |
|---|
| 1344 | | - } |
|---|
| 1345 | | - |
|---|
| 1346 | | - vsi->num_txq = qcount_tx; |
|---|
| 1347 | | - vsi->num_rxq = offset; |
|---|
| 1348 | | - |
|---|
| 1349 | | - /* Rx queue mapping */ |
|---|
| 1350 | | - ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); |
|---|
| 1351 | | - /* q_mapping buffer holds the info for the first queue allocated for |
|---|
| 1352 | | - * this VSI in the PF space and also the number of queues associated |
|---|
| 1353 | | - * with this VSI. |
|---|
| 1354 | | - */ |
|---|
| 1355 | | - ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); |
|---|
| 1356 | | - ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); |
|---|
| 1357 | | -} |
|---|
| 1358 | | - |
|---|
| 1359 | | -/** |
|---|
| 1360 | | - * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI |
|---|
| 1361 | | - * @ctxt: the VSI context being set |
|---|
| 2265 | + * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP |
|---|
| 2266 | + * @vsi: VSI to setup Tx rings used by XDP |
|---|
| 1362 | 2267 | * |
|---|
| 1363 | | - * This initializes a default VSI context for all sections except the Queues. |
|---|
| 2268 | + * Return 0 on success and negative value on error |
|---|
| 1364 | 2269 | */ |
|---|
| 1365 | | -static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) |
|---|
| 2270 | +static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) |
|---|
| 1366 | 2271 | { |
|---|
| 1367 | | - u32 table = 0; |
|---|
| 1368 | | - |
|---|
| 1369 | | - memset(&ctxt->info, 0, sizeof(ctxt->info)); |
|---|
| 1370 | | - /* VSI's should be allocated from shared pool */ |
|---|
| 1371 | | - ctxt->alloc_from_pool = true; |
|---|
| 1372 | | - /* Src pruning enabled by default */ |
|---|
| 1373 | | - ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; |
|---|
| 1374 | | - /* Traffic from VSI can be sent to LAN */ |
|---|
| 1375 | | - ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; |
|---|
| 1376 | | - |
|---|
| 1377 | | - /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy |
|---|
| 1378 | | - * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all |
|---|
| 1379 | | - * packets untagged/tagged. |
|---|
| 1380 | | - */ |
|---|
| 1381 | | - ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & |
|---|
| 1382 | | - ICE_AQ_VSI_VLAN_MODE_M) >> |
|---|
| 1383 | | - ICE_AQ_VSI_VLAN_MODE_S); |
|---|
| 1384 | | - |
|---|
| 1385 | | - /* Have 1:1 UP mapping for both ingress/egress tables */ |
|---|
| 1386 | | - table |= ICE_UP_TABLE_TRANSLATE(0, 0); |
|---|
| 1387 | | - table |= ICE_UP_TABLE_TRANSLATE(1, 1); |
|---|
| 1388 | | - table |= ICE_UP_TABLE_TRANSLATE(2, 2); |
|---|
| 1389 | | - table |= ICE_UP_TABLE_TRANSLATE(3, 3); |
|---|
| 1390 | | - table |= ICE_UP_TABLE_TRANSLATE(4, 4); |
|---|
| 1391 | | - table |= ICE_UP_TABLE_TRANSLATE(5, 5); |
|---|
| 1392 | | - table |= ICE_UP_TABLE_TRANSLATE(6, 6); |
|---|
| 1393 | | - table |= ICE_UP_TABLE_TRANSLATE(7, 7); |
|---|
| 1394 | | - ctxt->info.ingress_table = cpu_to_le32(table); |
|---|
| 1395 | | - ctxt->info.egress_table = cpu_to_le32(table); |
|---|
| 1396 | | - /* Have 1:1 UP mapping for outer to inner UP table */ |
|---|
| 1397 | | - ctxt->info.outer_up_table = cpu_to_le32(table); |
|---|
| 1398 | | - /* No Outer tag support outer_tag_flags remains to zero */ |
|---|
| 1399 | | -} |
|---|
| 1400 | | - |
|---|
| 1401 | | -/** |
|---|
| 1402 | | - * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI |
|---|
| 1403 | | - * @ctxt: the VSI context being set |
|---|
| 1404 | | - * @vsi: the VSI being configured |
|---|
| 1405 | | - */ |
|---|
| 1406 | | -static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) |
|---|
| 1407 | | -{ |
|---|
| 1408 | | - u8 lut_type, hash_type; |
|---|
| 1409 | | - |
|---|
| 1410 | | - switch (vsi->type) { |
|---|
| 1411 | | - case ICE_VSI_PF: |
|---|
| 1412 | | - /* PF VSI will inherit RSS instance of PF */ |
|---|
| 1413 | | - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; |
|---|
| 1414 | | - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; |
|---|
| 1415 | | - break; |
|---|
| 1416 | | - default: |
|---|
| 1417 | | - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", |
|---|
| 1418 | | - vsi->type); |
|---|
| 1419 | | - return; |
|---|
| 1420 | | - } |
|---|
| 1421 | | - |
|---|
| 1422 | | - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & |
|---|
| 1423 | | - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | |
|---|
| 1424 | | - ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & |
|---|
| 1425 | | - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); |
|---|
| 1426 | | -} |
|---|
| 1427 | | - |
|---|
| 1428 | | -/** |
|---|
| 1429 | | - * ice_vsi_add - Create a new VSI or fetch preallocated VSI |
|---|
| 1430 | | - * @vsi: the VSI being configured |
|---|
| 1431 | | - * |
|---|
| 1432 | | - * This initializes a VSI context depending on the VSI type to be added and |
|---|
| 1433 | | - * passes it down to the add_vsi aq command to create a new VSI. |
|---|
| 1434 | | - */ |
|---|
| 1435 | | -static int ice_vsi_add(struct ice_vsi *vsi) |
|---|
| 1436 | | -{ |
|---|
| 1437 | | - struct ice_vsi_ctx ctxt = { 0 }; |
|---|
| 1438 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1439 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 1440 | | - int ret = 0; |
|---|
| 1441 | | - |
|---|
| 1442 | | - switch (vsi->type) { |
|---|
| 1443 | | - case ICE_VSI_PF: |
|---|
| 1444 | | - ctxt.flags = ICE_AQ_VSI_TYPE_PF; |
|---|
| 1445 | | - break; |
|---|
| 1446 | | - default: |
|---|
| 1447 | | - return -ENODEV; |
|---|
| 1448 | | - } |
|---|
| 1449 | | - |
|---|
| 1450 | | - ice_set_dflt_vsi_ctx(&ctxt); |
|---|
| 1451 | | - /* if the switch is in VEB mode, allow VSI loopback */ |
|---|
| 1452 | | - if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) |
|---|
| 1453 | | - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
|---|
| 1454 | | - |
|---|
| 1455 | | - /* Set LUT type and HASH type if RSS is enabled */ |
|---|
| 1456 | | - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
|---|
| 1457 | | - ice_set_rss_vsi_ctx(&ctxt, vsi); |
|---|
| 1458 | | - |
|---|
| 1459 | | - ctxt.info.sw_id = vsi->port_info->sw_id; |
|---|
| 1460 | | - ice_vsi_setup_q_map(vsi, &ctxt); |
|---|
| 1461 | | - |
|---|
| 1462 | | - ret = ice_aq_add_vsi(hw, &ctxt, NULL); |
|---|
| 1463 | | - if (ret) { |
|---|
| 1464 | | - dev_err(&vsi->back->pdev->dev, |
|---|
| 1465 | | - "Add VSI AQ call failed, err %d\n", ret); |
|---|
| 1466 | | - return -EIO; |
|---|
| 1467 | | - } |
|---|
| 1468 | | - vsi->info = ctxt.info; |
|---|
| 1469 | | - vsi->vsi_num = ctxt.vsi_num; |
|---|
| 1470 | | - |
|---|
| 1471 | | - return ret; |
|---|
| 1472 | | -} |
|---|
| 1473 | | - |
|---|
| 1474 | | -/** |
|---|
| 1475 | | - * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW |
|---|
| 1476 | | - * @vsi: the VSI being cleaned up |
|---|
| 1477 | | - */ |
|---|
| 1478 | | -static void ice_vsi_release_msix(struct ice_vsi *vsi) |
|---|
| 1479 | | -{ |
|---|
| 1480 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1481 | | - u16 vector = vsi->base_vector; |
|---|
| 1482 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 1483 | | - u32 txq = 0; |
|---|
| 1484 | | - u32 rxq = 0; |
|---|
| 1485 | | - int i, q; |
|---|
| 1486 | | - |
|---|
| 1487 | | - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
|---|
| 1488 | | - struct ice_q_vector *q_vector = vsi->q_vectors[i]; |
|---|
| 1489 | | - |
|---|
| 1490 | | - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); |
|---|
| 1491 | | - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); |
|---|
| 1492 | | - for (q = 0; q < q_vector->num_ring_tx; q++) { |
|---|
| 1493 | | - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); |
|---|
| 1494 | | - txq++; |
|---|
| 1495 | | - } |
|---|
| 1496 | | - |
|---|
| 1497 | | - for (q = 0; q < q_vector->num_ring_rx; q++) { |
|---|
| 1498 | | - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); |
|---|
| 1499 | | - rxq++; |
|---|
| 1500 | | - } |
|---|
| 1501 | | - } |
|---|
| 1502 | | - |
|---|
| 1503 | | - ice_flush(hw); |
|---|
| 1504 | | -} |
|---|
| 1505 | | - |
|---|
| 1506 | | -/** |
|---|
| 1507 | | - * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI |
|---|
| 1508 | | - * @vsi: the VSI having rings deallocated |
|---|
| 1509 | | - */ |
|---|
| 1510 | | -static void ice_vsi_clear_rings(struct ice_vsi *vsi) |
|---|
| 1511 | | -{ |
|---|
| 2272 | + struct device *dev = ice_pf_to_dev(vsi->back); |
|---|
| 1512 | 2273 | int i; |
|---|
| 1513 | 2274 | |
|---|
| 1514 | | - if (vsi->tx_rings) { |
|---|
| 1515 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
|---|
| 1516 | | - if (vsi->tx_rings[i]) { |
|---|
| 1517 | | - kfree_rcu(vsi->tx_rings[i], rcu); |
|---|
| 1518 | | - vsi->tx_rings[i] = NULL; |
|---|
| 1519 | | - } |
|---|
| 1520 | | - } |
|---|
| 1521 | | - } |
|---|
| 1522 | | - if (vsi->rx_rings) { |
|---|
| 1523 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
|---|
| 1524 | | - if (vsi->rx_rings[i]) { |
|---|
| 1525 | | - kfree_rcu(vsi->rx_rings[i], rcu); |
|---|
| 1526 | | - vsi->rx_rings[i] = NULL; |
|---|
| 1527 | | - } |
|---|
| 1528 | | - } |
|---|
| 1529 | | - } |
|---|
| 1530 | | -} |
|---|
| 2275 | + for (i = 0; i < vsi->num_xdp_txq; i++) { |
|---|
| 2276 | + u16 xdp_q_idx = vsi->alloc_txq + i; |
|---|
| 2277 | + struct ice_ring *xdp_ring; |
|---|
| 1531 | 2278 | |
|---|
| 1532 | | -/** |
|---|
| 1533 | | - * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI |
|---|
| 1534 | | - * @vsi: VSI which is having rings allocated |
|---|
| 1535 | | - */ |
|---|
| 1536 | | -static int ice_vsi_alloc_rings(struct ice_vsi *vsi) |
|---|
| 1537 | | -{ |
|---|
| 1538 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1539 | | - int i; |
|---|
| 2279 | + xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); |
|---|
| 1540 | 2280 | |
|---|
| 1541 | | - /* Allocate tx_rings */ |
|---|
| 1542 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
|---|
| 1543 | | - struct ice_ring *ring; |
|---|
| 2281 | + if (!xdp_ring) |
|---|
| 2282 | + goto free_xdp_rings; |
|---|
| 1544 | 2283 | |
|---|
| 1545 | | - /* allocate with kzalloc(), free with kfree_rcu() */ |
|---|
| 1546 | | - ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
|---|
| 1547 | | - |
|---|
| 1548 | | - if (!ring) |
|---|
| 1549 | | - goto err_out; |
|---|
| 1550 | | - |
|---|
| 1551 | | - ring->q_index = i; |
|---|
| 1552 | | - ring->reg_idx = vsi->txq_map[i]; |
|---|
| 1553 | | - ring->ring_active = false; |
|---|
| 1554 | | - ring->vsi = vsi; |
|---|
| 1555 | | - ring->netdev = vsi->netdev; |
|---|
| 1556 | | - ring->dev = &pf->pdev->dev; |
|---|
| 1557 | | - ring->count = vsi->num_desc; |
|---|
| 1558 | | - |
|---|
| 1559 | | - vsi->tx_rings[i] = ring; |
|---|
| 1560 | | - } |
|---|
| 1561 | | - |
|---|
| 1562 | | - /* Allocate rx_rings */ |
|---|
| 1563 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
|---|
| 1564 | | - struct ice_ring *ring; |
|---|
| 1565 | | - |
|---|
| 1566 | | - /* allocate with kzalloc(), free with kfree_rcu() */ |
|---|
| 1567 | | - ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
|---|
| 1568 | | - if (!ring) |
|---|
| 1569 | | - goto err_out; |
|---|
| 1570 | | - |
|---|
| 1571 | | - ring->q_index = i; |
|---|
| 1572 | | - ring->reg_idx = vsi->rxq_map[i]; |
|---|
| 1573 | | - ring->ring_active = false; |
|---|
| 1574 | | - ring->vsi = vsi; |
|---|
| 1575 | | - ring->netdev = vsi->netdev; |
|---|
| 1576 | | - ring->dev = &pf->pdev->dev; |
|---|
| 1577 | | - ring->count = vsi->num_desc; |
|---|
| 1578 | | - vsi->rx_rings[i] = ring; |
|---|
| 2284 | + xdp_ring->q_index = xdp_q_idx; |
|---|
| 2285 | + xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; |
|---|
| 2286 | + xdp_ring->ring_active = false; |
|---|
| 2287 | + xdp_ring->vsi = vsi; |
|---|
| 2288 | + xdp_ring->netdev = NULL; |
|---|
| 2289 | + xdp_ring->dev = dev; |
|---|
| 2290 | + xdp_ring->count = vsi->num_tx_desc; |
|---|
| 2291 | + WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); |
|---|
| 2292 | + if (ice_setup_tx_ring(xdp_ring)) |
|---|
| 2293 | + goto free_xdp_rings; |
|---|
| 2294 | + ice_set_ring_xdp(xdp_ring); |
|---|
| 2295 | + xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); |
|---|
| 1579 | 2296 | } |
|---|
| 1580 | 2297 | |
|---|
| 1581 | 2298 | return 0; |
|---|
| 1582 | 2299 | |
|---|
| 1583 | | -err_out: |
|---|
| 1584 | | - ice_vsi_clear_rings(vsi); |
|---|
| 2300 | +free_xdp_rings: |
|---|
| 2301 | + for (; i >= 0; i--) |
|---|
| 2302 | + if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) |
|---|
| 2303 | + ice_free_tx_ring(vsi->xdp_rings[i]); |
|---|
| 1585 | 2304 | return -ENOMEM; |
|---|
| 1586 | 2305 | } |
|---|
| 1587 | 2306 | |
|---|
| 1588 | 2307 | /** |
|---|
| 1589 | | - * ice_vsi_free_irq - Free the irq association with the OS |
|---|
| 1590 | | - * @vsi: the VSI being configured |
|---|
| 2308 | + * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI |
|---|
| 2309 | + * @vsi: VSI to set the bpf prog on |
|---|
| 2310 | + * @prog: the bpf prog pointer |
|---|
| 1591 | 2311 | */ |
|---|
| 1592 | | -static void ice_vsi_free_irq(struct ice_vsi *vsi) |
|---|
| 2312 | +static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) |
|---|
| 1593 | 2313 | { |
|---|
| 1594 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1595 | | - int base = vsi->base_vector; |
|---|
| 2314 | + struct bpf_prog *old_prog; |
|---|
| 2315 | + int i; |
|---|
| 1596 | 2316 | |
|---|
| 1597 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
|---|
| 1598 | | - int i; |
|---|
| 2317 | + old_prog = xchg(&vsi->xdp_prog, prog); |
|---|
| 2318 | + if (old_prog) |
|---|
| 2319 | + bpf_prog_put(old_prog); |
|---|
| 1599 | 2320 | |
|---|
| 1600 | | - if (!vsi->q_vectors || !vsi->irqs_ready) |
|---|
| 1601 | | - return; |
|---|
| 1602 | | - |
|---|
| 1603 | | - vsi->irqs_ready = false; |
|---|
| 1604 | | - for (i = 0; i < vsi->num_q_vectors; i++) { |
|---|
| 1605 | | - u16 vector = i + base; |
|---|
| 1606 | | - int irq_num; |
|---|
| 1607 | | - |
|---|
| 1608 | | - irq_num = pf->msix_entries[vector].vector; |
|---|
| 1609 | | - |
|---|
| 1610 | | - /* free only the irqs that were actually requested */ |
|---|
| 1611 | | - if (!vsi->q_vectors[i] || |
|---|
| 1612 | | - !(vsi->q_vectors[i]->num_ring_tx || |
|---|
| 1613 | | - vsi->q_vectors[i]->num_ring_rx)) |
|---|
| 1614 | | - continue; |
|---|
| 1615 | | - |
|---|
| 1616 | | - /* clear the affinity notifier in the IRQ descriptor */ |
|---|
| 1617 | | - irq_set_affinity_notifier(irq_num, NULL); |
|---|
| 1618 | | - |
|---|
| 1619 | | - /* clear the affinity_mask in the IRQ descriptor */ |
|---|
| 1620 | | - irq_set_affinity_hint(irq_num, NULL); |
|---|
| 1621 | | - synchronize_irq(irq_num); |
|---|
| 1622 | | - devm_free_irq(&pf->pdev->dev, irq_num, |
|---|
| 1623 | | - vsi->q_vectors[i]); |
|---|
| 1624 | | - } |
|---|
| 1625 | | - ice_vsi_release_msix(vsi); |
|---|
| 1626 | | - } |
|---|
| 2321 | + ice_for_each_rxq(vsi, i) |
|---|
| 2322 | + WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); |
|---|
| 1627 | 2323 | } |
|---|
| 1628 | 2324 | |
|---|
| 1629 | 2325 | /** |
|---|
| 1630 | | - * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW |
|---|
| 1631 | | - * @vsi: the VSI being configured |
|---|
| 2326 | + * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP |
|---|
| 2327 | + * @vsi: VSI to bring up Tx rings used by XDP |
|---|
| 2328 | + * @prog: bpf program that will be assigned to VSI |
|---|
| 2329 | + * |
|---|
| 2330 | + * Return 0 on success and negative value on error |
|---|
| 1632 | 2331 | */ |
|---|
| 1633 | | -static void ice_vsi_cfg_msix(struct ice_vsi *vsi) |
|---|
| 2332 | +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) |
|---|
| 1634 | 2333 | { |
|---|
| 2334 | + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
|---|
| 2335 | + int xdp_rings_rem = vsi->num_xdp_txq; |
|---|
| 1635 | 2336 | struct ice_pf *pf = vsi->back; |
|---|
| 1636 | | - u16 vector = vsi->base_vector; |
|---|
| 1637 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 1638 | | - u32 txq = 0, rxq = 0; |
|---|
| 1639 | | - int i, q, itr; |
|---|
| 1640 | | - u8 itr_gran; |
|---|
| 2337 | + struct ice_qs_cfg xdp_qs_cfg = { |
|---|
| 2338 | + .qs_mutex = &pf->avail_q_mutex, |
|---|
| 2339 | + .pf_map = pf->avail_txqs, |
|---|
| 2340 | + .pf_map_size = pf->max_pf_txqs, |
|---|
| 2341 | + .q_count = vsi->num_xdp_txq, |
|---|
| 2342 | + .scatter_count = ICE_MAX_SCATTER_TXQS, |
|---|
| 2343 | + .vsi_map = vsi->txq_map, |
|---|
| 2344 | + .vsi_map_offset = vsi->alloc_txq, |
|---|
| 2345 | + .mapping_mode = ICE_VSI_MAP_CONTIG |
|---|
| 2346 | + }; |
|---|
| 2347 | + enum ice_status status; |
|---|
| 2348 | + struct device *dev; |
|---|
| 2349 | + int i, v_idx; |
|---|
| 1641 | 2350 | |
|---|
| 1642 | | - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
|---|
| 1643 | | - struct ice_q_vector *q_vector = vsi->q_vectors[i]; |
|---|
| 2351 | + dev = ice_pf_to_dev(pf); |
|---|
| 2352 | + vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, |
|---|
| 2353 | + sizeof(*vsi->xdp_rings), GFP_KERNEL); |
|---|
| 2354 | + if (!vsi->xdp_rings) |
|---|
| 2355 | + return -ENOMEM; |
|---|
| 1644 | 2356 | |
|---|
| 1645 | | - itr_gran = hw->itr_gran_200; |
|---|
| 2357 | + vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; |
|---|
| 2358 | + if (__ice_vsi_get_qs(&xdp_qs_cfg)) |
|---|
| 2359 | + goto err_map_xdp; |
|---|
| 1646 | 2360 | |
|---|
| 1647 | | - if (q_vector->num_ring_rx) { |
|---|
| 1648 | | - q_vector->rx.itr = |
|---|
| 1649 | | - ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, |
|---|
| 1650 | | - itr_gran); |
|---|
| 1651 | | - q_vector->rx.latency_range = ICE_LOW_LATENCY; |
|---|
| 2361 | + if (ice_xdp_alloc_setup_rings(vsi)) |
|---|
| 2362 | + goto clear_xdp_rings; |
|---|
| 2363 | + |
|---|
| 2364 | + /* follow the logic from ice_vsi_map_rings_to_vectors */ |
|---|
| 2365 | + ice_for_each_q_vector(vsi, v_idx) { |
|---|
| 2366 | + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; |
|---|
| 2367 | + int xdp_rings_per_v, q_id, q_base; |
|---|
| 2368 | + |
|---|
| 2369 | + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, |
|---|
| 2370 | + vsi->num_q_vectors - v_idx); |
|---|
| 2371 | + q_base = vsi->num_xdp_txq - xdp_rings_rem; |
|---|
| 2372 | + |
|---|
| 2373 | + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { |
|---|
| 2374 | + struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; |
|---|
| 2375 | + |
|---|
| 2376 | + xdp_ring->q_vector = q_vector; |
|---|
| 2377 | + xdp_ring->next = q_vector->tx.ring; |
|---|
| 2378 | + q_vector->tx.ring = xdp_ring; |
|---|
| 2379 | + } |
|---|
| 2380 | + xdp_rings_rem -= xdp_rings_per_v; |
|---|
| 2381 | + } |
|---|
| 2382 | + |
|---|
| 2383 | + /* omit the scheduler update if in reset path; XDP queues will be |
|---|
| 2384 | + * taken into account at the end of ice_vsi_rebuild, where |
|---|
| 2385 | + * ice_cfg_vsi_lan is being called |
|---|
| 2386 | + */ |
|---|
| 2387 | + if (ice_is_reset_in_progress(pf->state)) |
|---|
| 2388 | + return 0; |
|---|
| 2389 | + |
|---|
| 2390 | + /* tell the Tx scheduler that right now we have |
|---|
| 2391 | + * additional queues |
|---|
| 2392 | + */ |
|---|
| 2393 | + for (i = 0; i < vsi->tc_cfg.numtc; i++) |
|---|
| 2394 | + max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; |
|---|
| 2395 | + |
|---|
| 2396 | + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
|---|
| 2397 | + max_txqs); |
|---|
| 2398 | + if (status) { |
|---|
| 2399 | + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", |
|---|
| 2400 | + ice_stat_str(status)); |
|---|
| 2401 | + goto clear_xdp_rings; |
|---|
| 2402 | + } |
|---|
| 2403 | + |
|---|
| 2404 | + /* assign the prog only when it's not already present on VSI; |
|---|
| 2405 | + * this flow is a subject of both ethtool -L and ndo_bpf flows; |
|---|
| 2406 | + * VSI rebuild that happens under ethtool -L can expose us to |
|---|
| 2407 | + * the bpf_prog refcount issues as we would be swapping same |
|---|
| 2408 | + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put |
|---|
| 2409 | + * on it as it would be treated as an 'old_prog'; for ndo_bpf |
|---|
| 2410 | + * this is not harmful as dev_xdp_install bumps the refcount |
|---|
| 2411 | + * before calling the op exposed by the driver; |
|---|
| 2412 | + */ |
|---|
| 2413 | + if (!ice_is_xdp_ena_vsi(vsi)) |
|---|
| 2414 | + ice_vsi_assign_bpf_prog(vsi, prog); |
|---|
| 2415 | + |
|---|
| 2416 | + return 0; |
|---|
| 2417 | +clear_xdp_rings: |
|---|
| 2418 | + for (i = 0; i < vsi->num_xdp_txq; i++) |
|---|
| 2419 | + if (vsi->xdp_rings[i]) { |
|---|
| 2420 | + kfree_rcu(vsi->xdp_rings[i], rcu); |
|---|
| 2421 | + vsi->xdp_rings[i] = NULL; |
|---|
| 1652 | 2422 | } |
|---|
| 1653 | 2423 | |
|---|
| 1654 | | - if (q_vector->num_ring_tx) { |
|---|
| 1655 | | - q_vector->tx.itr = |
|---|
| 1656 | | - ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, |
|---|
| 1657 | | - itr_gran); |
|---|
| 1658 | | - q_vector->tx.latency_range = ICE_LOW_LATENCY; |
|---|
| 1659 | | - } |
|---|
| 1660 | | - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); |
|---|
| 1661 | | - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); |
|---|
| 2424 | +err_map_xdp: |
|---|
| 2425 | + mutex_lock(&pf->avail_q_mutex); |
|---|
| 2426 | + for (i = 0; i < vsi->num_xdp_txq; i++) { |
|---|
| 2427 | + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
|---|
| 2428 | + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; |
|---|
| 2429 | + } |
|---|
| 2430 | + mutex_unlock(&pf->avail_q_mutex); |
|---|
| 1662 | 2431 | |
|---|
| 1663 | | - /* Both Transmit Queue Interrupt Cause Control register |
|---|
| 1664 | | - * and Receive Queue Interrupt Cause control register |
|---|
| 1665 | | - * expects MSIX_INDX field to be the vector index |
|---|
| 1666 | | - * within the function space and not the absolute |
|---|
| 1667 | | - * vector index across PF or across device. |
|---|
| 1668 | | - * For SR-IOV VF VSIs queue vector index always starts |
|---|
| 1669 | | - * with 1 since first vector index(0) is used for OICR |
|---|
| 1670 | | - * in VF space. Since VMDq and other PF VSIs are withtin |
|---|
| 1671 | | - * the PF function space, use the vector index thats |
|---|
| 1672 | | - * tracked for this PF. |
|---|
| 1673 | | - */ |
|---|
| 1674 | | - for (q = 0; q < q_vector->num_ring_tx; q++) { |
|---|
| 1675 | | - u32 val; |
|---|
| 2432 | + devm_kfree(dev, vsi->xdp_rings); |
|---|
| 2433 | + return -ENOMEM; |
|---|
| 2434 | +} |
|---|
| 1676 | 2435 | |
|---|
| 1677 | | - itr = ICE_TX_ITR; |
|---|
| 1678 | | - val = QINT_TQCTL_CAUSE_ENA_M | |
|---|
| 1679 | | - (itr << QINT_TQCTL_ITR_INDX_S) | |
|---|
| 1680 | | - (vector << QINT_TQCTL_MSIX_INDX_S); |
|---|
| 1681 | | - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); |
|---|
| 1682 | | - txq++; |
|---|
| 2436 | +/** |
|---|
| 2437 | + * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings |
|---|
| 2438 | + * @vsi: VSI to remove XDP rings |
|---|
| 2439 | + * |
|---|
| 2440 | + * Detach XDP rings from irq vectors, clean up the PF bitmap and free |
|---|
| 2441 | + * resources |
|---|
| 2442 | + */ |
|---|
| 2443 | +int ice_destroy_xdp_rings(struct ice_vsi *vsi) |
|---|
| 2444 | +{ |
|---|
| 2445 | + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
|---|
| 2446 | + struct ice_pf *pf = vsi->back; |
|---|
| 2447 | + int i, v_idx; |
|---|
| 2448 | + |
|---|
| 2449 | + /* q_vectors are freed in reset path so there's no point in detaching |
|---|
| 2450 | + * rings; in case of rebuild being triggered not from reset bits |
|---|
| 2451 | + * in pf->state won't be set, so additionally check first q_vector |
|---|
| 2452 | + * against NULL |
|---|
| 2453 | + */ |
|---|
| 2454 | + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) |
|---|
| 2455 | + goto free_qmap; |
|---|
| 2456 | + |
|---|
| 2457 | + ice_for_each_q_vector(vsi, v_idx) { |
|---|
| 2458 | + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; |
|---|
| 2459 | + struct ice_ring *ring; |
|---|
| 2460 | + |
|---|
| 2461 | + ice_for_each_ring(ring, q_vector->tx) |
|---|
| 2462 | + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) |
|---|
| 2463 | + break; |
|---|
| 2464 | + |
|---|
| 2465 | + /* restore the value of last node prior to XDP setup */ |
|---|
| 2466 | + q_vector->tx.ring = ring; |
|---|
| 2467 | + } |
|---|
| 2468 | + |
|---|
| 2469 | +free_qmap: |
|---|
| 2470 | + mutex_lock(&pf->avail_q_mutex); |
|---|
| 2471 | + for (i = 0; i < vsi->num_xdp_txq; i++) { |
|---|
| 2472 | + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
|---|
| 2473 | + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; |
|---|
| 2474 | + } |
|---|
| 2475 | + mutex_unlock(&pf->avail_q_mutex); |
|---|
| 2476 | + |
|---|
| 2477 | + for (i = 0; i < vsi->num_xdp_txq; i++) |
|---|
| 2478 | + if (vsi->xdp_rings[i]) { |
|---|
| 2479 | + if (vsi->xdp_rings[i]->desc) { |
|---|
| 2480 | + synchronize_rcu(); |
|---|
| 2481 | + ice_free_tx_ring(vsi->xdp_rings[i]); |
|---|
| 2482 | + } |
|---|
| 2483 | + kfree_rcu(vsi->xdp_rings[i], rcu); |
|---|
| 2484 | + vsi->xdp_rings[i] = NULL; |
|---|
| 1683 | 2485 | } |
|---|
| 1684 | 2486 | |
|---|
| 1685 | | - for (q = 0; q < q_vector->num_ring_rx; q++) { |
|---|
| 1686 | | - u32 val; |
|---|
| 2487 | + devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); |
|---|
| 2488 | + vsi->xdp_rings = NULL; |
|---|
| 1687 | 2489 | |
|---|
| 1688 | | - itr = ICE_RX_ITR; |
|---|
| 1689 | | - val = QINT_RQCTL_CAUSE_ENA_M | |
|---|
| 1690 | | - (itr << QINT_RQCTL_ITR_INDX_S) | |
|---|
| 1691 | | - (vector << QINT_RQCTL_MSIX_INDX_S); |
|---|
| 1692 | | - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); |
|---|
| 1693 | | - rxq++; |
|---|
| 2490 | + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) |
|---|
| 2491 | + return 0; |
|---|
| 2492 | + |
|---|
| 2493 | + ice_vsi_assign_bpf_prog(vsi, NULL); |
|---|
| 2494 | + |
|---|
| 2495 | + /* notify Tx scheduler that we destroyed XDP queues and bring |
|---|
| 2496 | + * back the old number of child nodes |
|---|
| 2497 | + */ |
|---|
| 2498 | + for (i = 0; i < vsi->tc_cfg.numtc; i++) |
|---|
| 2499 | + max_txqs[i] = vsi->num_txq; |
|---|
| 2500 | + |
|---|
| 2501 | + /* change number of XDP Tx queues to 0 */ |
|---|
| 2502 | + vsi->num_xdp_txq = 0; |
|---|
| 2503 | + |
|---|
| 2504 | + return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
|---|
| 2505 | + max_txqs); |
|---|
| 2506 | +} |
|---|
| 2507 | + |
|---|
| 2508 | +/** |
|---|
| 2509 | + * ice_xdp_setup_prog - Add or remove XDP eBPF program |
|---|
| 2510 | + * @vsi: VSI to setup XDP for |
|---|
| 2511 | + * @prog: XDP program |
|---|
| 2512 | + * @extack: netlink extended ack |
|---|
| 2513 | + */ |
|---|
| 2514 | +static int |
|---|
| 2515 | +ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, |
|---|
| 2516 | + struct netlink_ext_ack *extack) |
|---|
| 2517 | +{ |
|---|
| 2518 | + int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; |
|---|
| 2519 | + bool if_running = netif_running(vsi->netdev); |
|---|
| 2520 | + int ret = 0, xdp_ring_err = 0; |
|---|
| 2521 | + |
|---|
| 2522 | + if (frame_size > vsi->rx_buf_len) { |
|---|
| 2523 | + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); |
|---|
| 2524 | + return -EOPNOTSUPP; |
|---|
| 2525 | + } |
|---|
| 2526 | + |
|---|
| 2527 | + /* need to stop netdev while setting up the program for Rx rings */ |
|---|
| 2528 | + if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { |
|---|
| 2529 | + ret = ice_down(vsi); |
|---|
| 2530 | + if (ret) { |
|---|
| 2531 | + NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); |
|---|
| 2532 | + return ret; |
|---|
| 1694 | 2533 | } |
|---|
| 1695 | 2534 | } |
|---|
| 1696 | 2535 | |
|---|
| 1697 | | - ice_flush(hw); |
|---|
| 2536 | + if (!ice_is_xdp_ena_vsi(vsi) && prog) { |
|---|
| 2537 | + vsi->num_xdp_txq = vsi->alloc_rxq; |
|---|
| 2538 | + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); |
|---|
| 2539 | + if (xdp_ring_err) |
|---|
| 2540 | + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); |
|---|
| 2541 | + } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { |
|---|
| 2542 | + xdp_ring_err = ice_destroy_xdp_rings(vsi); |
|---|
| 2543 | + if (xdp_ring_err) |
|---|
| 2544 | + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); |
|---|
| 2545 | + } else { |
|---|
| 2546 | + /* safe to call even when prog == vsi->xdp_prog as |
|---|
| 2547 | + * dev_xdp_install in net/core/dev.c incremented prog's |
|---|
| 2548 | + * refcount so corresponding bpf_prog_put won't cause |
|---|
| 2549 | + * underflow |
|---|
| 2550 | + */ |
|---|
| 2551 | + ice_vsi_assign_bpf_prog(vsi, prog); |
|---|
| 2552 | + } |
|---|
| 2553 | + |
|---|
| 2554 | + if (if_running) |
|---|
| 2555 | + ret = ice_up(vsi); |
|---|
| 2556 | + |
|---|
| 2557 | + if (!ret && prog && vsi->xsk_pools) { |
|---|
| 2558 | + int i; |
|---|
| 2559 | + |
|---|
| 2560 | + ice_for_each_rxq(vsi, i) { |
|---|
| 2561 | + struct ice_ring *rx_ring = vsi->rx_rings[i]; |
|---|
| 2562 | + |
|---|
| 2563 | + if (rx_ring->xsk_pool) |
|---|
| 2564 | + napi_schedule(&rx_ring->q_vector->napi); |
|---|
| 2565 | + } |
|---|
| 2566 | + } |
|---|
| 2567 | + |
|---|
| 2568 | + return (ret || xdp_ring_err) ? -ENOMEM : 0; |
|---|
| 2569 | +} |
|---|
| 2570 | + |
|---|
| 2571 | +/** |
|---|
| 2572 | + * ice_xdp_safe_mode - XDP handler for safe mode |
|---|
| 2573 | + * @dev: netdevice |
|---|
| 2574 | + * @xdp: XDP command |
|---|
| 2575 | + */ |
|---|
| 2576 | +static int ice_xdp_safe_mode(struct net_device __always_unused *dev, |
|---|
| 2577 | + struct netdev_bpf *xdp) |
|---|
| 2578 | +{ |
|---|
| 2579 | + NL_SET_ERR_MSG_MOD(xdp->extack, |
|---|
| 2580 | + "Please provide working DDP firmware package in order to use XDP\n" |
|---|
| 2581 | + "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); |
|---|
| 2582 | + return -EOPNOTSUPP; |
|---|
| 2583 | +} |
|---|
| 2584 | + |
|---|
| 2585 | +/** |
|---|
| 2586 | + * ice_xdp - implements XDP handler |
|---|
| 2587 | + * @dev: netdevice |
|---|
| 2588 | + * @xdp: XDP command |
|---|
| 2589 | + */ |
|---|
| 2590 | +static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
|---|
| 2591 | +{ |
|---|
| 2592 | + struct ice_netdev_priv *np = netdev_priv(dev); |
|---|
| 2593 | + struct ice_vsi *vsi = np->vsi; |
|---|
| 2594 | + |
|---|
| 2595 | + if (vsi->type != ICE_VSI_PF) { |
|---|
| 2596 | + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); |
|---|
| 2597 | + return -EINVAL; |
|---|
| 2598 | + } |
|---|
| 2599 | + |
|---|
| 2600 | + switch (xdp->command) { |
|---|
| 2601 | + case XDP_SETUP_PROG: |
|---|
| 2602 | + return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); |
|---|
| 2603 | + case XDP_SETUP_XSK_POOL: |
|---|
| 2604 | + return ice_xsk_pool_setup(vsi, xdp->xsk.pool, |
|---|
| 2605 | + xdp->xsk.queue_id); |
|---|
| 2606 | + default: |
|---|
| 2607 | + return -EINVAL; |
|---|
| 2608 | + } |
|---|
| 1698 | 2609 | } |
|---|
| 1699 | 2610 | |
|---|
| 1700 | 2611 | /** |
|---|
| .. | .. |
|---|
| 1706 | 2617 | struct ice_hw *hw = &pf->hw; |
|---|
| 1707 | 2618 | u32 val; |
|---|
| 1708 | 2619 | |
|---|
| 2620 | + /* Disable anti-spoof detection interrupt to prevent spurious event |
|---|
| 2621 | + * interrupts during a function reset. Anti-spoof functionally is |
|---|
| 2622 | + * still supported. |
|---|
| 2623 | + */ |
|---|
| 2624 | + val = rd32(hw, GL_MDCK_TX_TDPU); |
|---|
| 2625 | + val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; |
|---|
| 2626 | + wr32(hw, GL_MDCK_TX_TDPU, val); |
|---|
| 2627 | + |
|---|
| 1709 | 2628 | /* clear things first */ |
|---|
| 1710 | 2629 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
|---|
| 1711 | 2630 | rd32(hw, PFINT_OICR); /* read to clear */ |
|---|
| .. | .. |
|---|
| 1714 | 2633 | PFINT_OICR_MAL_DETECT_M | |
|---|
| 1715 | 2634 | PFINT_OICR_GRST_M | |
|---|
| 1716 | 2635 | PFINT_OICR_PCI_EXCEPTION_M | |
|---|
| 2636 | + PFINT_OICR_VFLR_M | |
|---|
| 1717 | 2637 | PFINT_OICR_HMC_ERR_M | |
|---|
| 1718 | 2638 | PFINT_OICR_PE_CRITERR_M); |
|---|
| 1719 | 2639 | |
|---|
| .. | .. |
|---|
| 1734 | 2654 | struct ice_pf *pf = (struct ice_pf *)data; |
|---|
| 1735 | 2655 | struct ice_hw *hw = &pf->hw; |
|---|
| 1736 | 2656 | irqreturn_t ret = IRQ_NONE; |
|---|
| 2657 | + struct device *dev; |
|---|
| 1737 | 2658 | u32 oicr, ena_mask; |
|---|
| 1738 | 2659 | |
|---|
| 2660 | + dev = ice_pf_to_dev(pf); |
|---|
| 1739 | 2661 | set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); |
|---|
| 2662 | + set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
|---|
| 1740 | 2663 | |
|---|
| 1741 | 2664 | oicr = rd32(hw, PFINT_OICR); |
|---|
| 1742 | 2665 | ena_mask = rd32(hw, PFINT_OICR_ENA); |
|---|
| 1743 | 2666 | |
|---|
| 2667 | + if (oicr & PFINT_OICR_SWINT_M) { |
|---|
| 2668 | + ena_mask &= ~PFINT_OICR_SWINT_M; |
|---|
| 2669 | + pf->sw_int_count++; |
|---|
| 2670 | + } |
|---|
| 2671 | + |
|---|
| 2672 | + if (oicr & PFINT_OICR_MAL_DETECT_M) { |
|---|
| 2673 | + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; |
|---|
| 2674 | + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); |
|---|
| 2675 | + } |
|---|
| 2676 | + if (oicr & PFINT_OICR_VFLR_M) { |
|---|
| 2677 | + /* disable any further VFLR event notifications */ |
|---|
| 2678 | + if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { |
|---|
| 2679 | + u32 reg = rd32(hw, PFINT_OICR_ENA); |
|---|
| 2680 | + |
|---|
| 2681 | + reg &= ~PFINT_OICR_VFLR_M; |
|---|
| 2682 | + wr32(hw, PFINT_OICR_ENA, reg); |
|---|
| 2683 | + } else { |
|---|
| 2684 | + ena_mask &= ~PFINT_OICR_VFLR_M; |
|---|
| 2685 | + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); |
|---|
| 2686 | + } |
|---|
| 2687 | + } |
|---|
| 2688 | + |
|---|
| 1744 | 2689 | if (oicr & PFINT_OICR_GRST_M) { |
|---|
| 1745 | 2690 | u32 reset; |
|---|
| 2691 | + |
|---|
| 1746 | 2692 | /* we have a reset warning */ |
|---|
| 1747 | 2693 | ena_mask &= ~PFINT_OICR_GRST_M; |
|---|
| 1748 | 2694 | reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> |
|---|
| .. | .. |
|---|
| 1752 | 2698 | pf->corer_count++; |
|---|
| 1753 | 2699 | else if (reset == ICE_RESET_GLOBR) |
|---|
| 1754 | 2700 | pf->globr_count++; |
|---|
| 1755 | | - else |
|---|
| 2701 | + else if (reset == ICE_RESET_EMPR) |
|---|
| 1756 | 2702 | pf->empr_count++; |
|---|
| 2703 | + else |
|---|
| 2704 | + dev_dbg(dev, "Invalid reset type %d\n", reset); |
|---|
| 1757 | 2705 | |
|---|
| 1758 | 2706 | /* If a reset cycle isn't already in progress, we set a bit in |
|---|
| 1759 | 2707 | * pf->state so that the service task can start a reset/rebuild. |
|---|
| 1760 | 2708 | * We also make note of which reset happened so that peer |
|---|
| 1761 | 2709 | * devices/drivers can be informed. |
|---|
| 1762 | 2710 | */ |
|---|
| 1763 | | - if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING, |
|---|
| 1764 | | - pf->state)) { |
|---|
| 2711 | + if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { |
|---|
| 1765 | 2712 | if (reset == ICE_RESET_CORER) |
|---|
| 1766 | 2713 | set_bit(__ICE_CORER_RECV, pf->state); |
|---|
| 1767 | 2714 | else if (reset == ICE_RESET_GLOBR) |
|---|
| .. | .. |
|---|
| 1775 | 2722 | * is received and set back to false after the driver |
|---|
| 1776 | 2723 | * has determined that the hardware is out of reset. |
|---|
| 1777 | 2724 | * |
|---|
| 1778 | | - * __ICE_RESET_RECOVERY_PENDING in pf->state indicates |
|---|
| 2725 | + * __ICE_RESET_OICR_RECV in pf->state indicates |
|---|
| 1779 | 2726 | * that a post reset rebuild is required before the |
|---|
| 1780 | 2727 | * driver is operational again. This is set above. |
|---|
| 1781 | 2728 | * |
|---|
| .. | .. |
|---|
| 1788 | 2735 | |
|---|
| 1789 | 2736 | if (oicr & PFINT_OICR_HMC_ERR_M) { |
|---|
| 1790 | 2737 | ena_mask &= ~PFINT_OICR_HMC_ERR_M; |
|---|
| 1791 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 1792 | | - "HMC Error interrupt - info 0x%x, data 0x%x\n", |
|---|
| 2738 | + dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", |
|---|
| 1793 | 2739 | rd32(hw, PFHMC_ERRORINFO), |
|---|
| 1794 | 2740 | rd32(hw, PFHMC_ERRORDATA)); |
|---|
| 1795 | 2741 | } |
|---|
| 1796 | 2742 | |
|---|
| 1797 | | - /* Report and mask off any remaining unexpected interrupts */ |
|---|
| 2743 | + /* Report any remaining unexpected interrupts */ |
|---|
| 1798 | 2744 | oicr &= ena_mask; |
|---|
| 1799 | 2745 | if (oicr) { |
|---|
| 1800 | | - dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", |
|---|
| 1801 | | - oicr); |
|---|
| 2746 | + dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); |
|---|
| 1802 | 2747 | /* If a critical error is pending there is no choice but to |
|---|
| 1803 | 2748 | * reset the device. |
|---|
| 1804 | 2749 | */ |
|---|
| .. | .. |
|---|
| 1808 | 2753 | set_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 1809 | 2754 | ice_service_task_schedule(pf); |
|---|
| 1810 | 2755 | } |
|---|
| 1811 | | - ena_mask &= ~oicr; |
|---|
| 1812 | 2756 | } |
|---|
| 1813 | 2757 | ret = IRQ_HANDLED; |
|---|
| 1814 | 2758 | |
|---|
| 1815 | | - /* re-enable interrupt causes that are not handled during this pass */ |
|---|
| 1816 | | - wr32(hw, PFINT_OICR_ENA, ena_mask); |
|---|
| 1817 | | - if (!test_bit(__ICE_DOWN, pf->state)) { |
|---|
| 1818 | | - ice_service_task_schedule(pf); |
|---|
| 1819 | | - ice_irq_dynamic_ena(hw, NULL, NULL); |
|---|
| 1820 | | - } |
|---|
| 2759 | + ice_service_task_schedule(pf); |
|---|
| 2760 | + ice_irq_dynamic_ena(hw, NULL, NULL); |
|---|
| 1821 | 2761 | |
|---|
| 1822 | 2762 | return ret; |
|---|
| 1823 | 2763 | } |
|---|
| 1824 | 2764 | |
|---|
| 1825 | 2765 | /** |
|---|
| 1826 | | - * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors |
|---|
| 1827 | | - * @vsi: the VSI being configured |
|---|
| 1828 | | - * |
|---|
| 1829 | | - * This function maps descriptor rings to the queue-specific vectors allotted |
|---|
| 1830 | | - * through the MSI-X enabling code. On a constrained vector budget, we map Tx |
|---|
| 1831 | | - * and Rx rings to the vector as "efficiently" as possible. |
|---|
| 2766 | + * ice_dis_ctrlq_interrupts - disable control queue interrupts |
|---|
| 2767 | + * @hw: pointer to HW structure |
|---|
| 1832 | 2768 | */ |
|---|
| 1833 | | -static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) |
|---|
| 2769 | +static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) |
|---|
| 1834 | 2770 | { |
|---|
| 1835 | | - int q_vectors = vsi->num_q_vectors; |
|---|
| 1836 | | - int tx_rings_rem, rx_rings_rem; |
|---|
| 1837 | | - int v_id; |
|---|
| 2771 | + /* disable Admin queue Interrupt causes */ |
|---|
| 2772 | + wr32(hw, PFINT_FW_CTL, |
|---|
| 2773 | + rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); |
|---|
| 1838 | 2774 | |
|---|
| 1839 | | - /* initially assigning remaining rings count to VSIs num queue value */ |
|---|
| 1840 | | - tx_rings_rem = vsi->num_txq; |
|---|
| 1841 | | - rx_rings_rem = vsi->num_rxq; |
|---|
| 2775 | + /* disable Mailbox queue Interrupt causes */ |
|---|
| 2776 | + wr32(hw, PFINT_MBX_CTL, |
|---|
| 2777 | + rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); |
|---|
| 1842 | 2778 | |
|---|
| 1843 | | - for (v_id = 0; v_id < q_vectors; v_id++) { |
|---|
| 1844 | | - struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; |
|---|
| 1845 | | - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; |
|---|
| 2779 | + /* disable Control queue Interrupt causes */ |
|---|
| 2780 | + wr32(hw, PFINT_OICR_CTL, |
|---|
| 2781 | + rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); |
|---|
| 1846 | 2782 | |
|---|
| 1847 | | - /* Tx rings mapping to vector */ |
|---|
| 1848 | | - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); |
|---|
| 1849 | | - q_vector->num_ring_tx = tx_rings_per_v; |
|---|
| 1850 | | - q_vector->tx.ring = NULL; |
|---|
| 1851 | | - q_base = vsi->num_txq - tx_rings_rem; |
|---|
| 1852 | | - |
|---|
| 1853 | | - for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { |
|---|
| 1854 | | - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; |
|---|
| 1855 | | - |
|---|
| 1856 | | - tx_ring->q_vector = q_vector; |
|---|
| 1857 | | - tx_ring->next = q_vector->tx.ring; |
|---|
| 1858 | | - q_vector->tx.ring = tx_ring; |
|---|
| 1859 | | - } |
|---|
| 1860 | | - tx_rings_rem -= tx_rings_per_v; |
|---|
| 1861 | | - |
|---|
| 1862 | | - /* Rx rings mapping to vector */ |
|---|
| 1863 | | - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); |
|---|
| 1864 | | - q_vector->num_ring_rx = rx_rings_per_v; |
|---|
| 1865 | | - q_vector->rx.ring = NULL; |
|---|
| 1866 | | - q_base = vsi->num_rxq - rx_rings_rem; |
|---|
| 1867 | | - |
|---|
| 1868 | | - for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { |
|---|
| 1869 | | - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; |
|---|
| 1870 | | - |
|---|
| 1871 | | - rx_ring->q_vector = q_vector; |
|---|
| 1872 | | - rx_ring->next = q_vector->rx.ring; |
|---|
| 1873 | | - q_vector->rx.ring = rx_ring; |
|---|
| 1874 | | - } |
|---|
| 1875 | | - rx_rings_rem -= rx_rings_per_v; |
|---|
| 1876 | | - } |
|---|
| 1877 | | -} |
|---|
| 1878 | | - |
|---|
| 1879 | | -/** |
|---|
| 1880 | | - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI |
|---|
| 1881 | | - * @vsi: the VSI being configured |
|---|
| 1882 | | - * |
|---|
| 1883 | | - * Return 0 on success and a negative value on error |
|---|
| 1884 | | - */ |
|---|
| 1885 | | -static void ice_vsi_set_num_qs(struct ice_vsi *vsi) |
|---|
| 1886 | | -{ |
|---|
| 1887 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1888 | | - |
|---|
| 1889 | | - switch (vsi->type) { |
|---|
| 1890 | | - case ICE_VSI_PF: |
|---|
| 1891 | | - vsi->alloc_txq = pf->num_lan_tx; |
|---|
| 1892 | | - vsi->alloc_rxq = pf->num_lan_rx; |
|---|
| 1893 | | - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); |
|---|
| 1894 | | - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); |
|---|
| 1895 | | - break; |
|---|
| 1896 | | - default: |
|---|
| 1897 | | - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", |
|---|
| 1898 | | - vsi->type); |
|---|
| 1899 | | - break; |
|---|
| 1900 | | - } |
|---|
| 1901 | | -} |
|---|
| 1902 | | - |
|---|
| 1903 | | -/** |
|---|
| 1904 | | - * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi |
|---|
| 1905 | | - * @vsi: VSI pointer |
|---|
| 1906 | | - * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. |
|---|
| 1907 | | - * |
|---|
| 1908 | | - * On error: returns error code (negative) |
|---|
| 1909 | | - * On success: returns 0 |
|---|
| 1910 | | - */ |
|---|
| 1911 | | -static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) |
|---|
| 1912 | | -{ |
|---|
| 1913 | | - struct ice_pf *pf = vsi->back; |
|---|
| 1914 | | - |
|---|
| 1915 | | - /* allocate memory for both Tx and Rx ring pointers */ |
|---|
| 1916 | | - vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, |
|---|
| 1917 | | - sizeof(struct ice_ring *), GFP_KERNEL); |
|---|
| 1918 | | - if (!vsi->tx_rings) |
|---|
| 1919 | | - goto err_txrings; |
|---|
| 1920 | | - |
|---|
| 1921 | | - vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, |
|---|
| 1922 | | - sizeof(struct ice_ring *), GFP_KERNEL); |
|---|
| 1923 | | - if (!vsi->rx_rings) |
|---|
| 1924 | | - goto err_rxrings; |
|---|
| 1925 | | - |
|---|
| 1926 | | - if (alloc_qvectors) { |
|---|
| 1927 | | - /* allocate memory for q_vector pointers */ |
|---|
| 1928 | | - vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, |
|---|
| 1929 | | - vsi->num_q_vectors, |
|---|
| 1930 | | - sizeof(struct ice_q_vector *), |
|---|
| 1931 | | - GFP_KERNEL); |
|---|
| 1932 | | - if (!vsi->q_vectors) |
|---|
| 1933 | | - goto err_vectors; |
|---|
| 1934 | | - } |
|---|
| 1935 | | - |
|---|
| 1936 | | - return 0; |
|---|
| 1937 | | - |
|---|
| 1938 | | -err_vectors: |
|---|
| 1939 | | - devm_kfree(&pf->pdev->dev, vsi->rx_rings); |
|---|
| 1940 | | -err_rxrings: |
|---|
| 1941 | | - devm_kfree(&pf->pdev->dev, vsi->tx_rings); |
|---|
| 1942 | | -err_txrings: |
|---|
| 1943 | | - return -ENOMEM; |
|---|
| 1944 | | -} |
|---|
| 1945 | | - |
|---|
| 1946 | | -/** |
|---|
| 1947 | | - * ice_msix_clean_rings - MSIX mode Interrupt Handler |
|---|
| 1948 | | - * @irq: interrupt number |
|---|
| 1949 | | - * @data: pointer to a q_vector |
|---|
| 1950 | | - */ |
|---|
| 1951 | | -static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) |
|---|
| 1952 | | -{ |
|---|
| 1953 | | - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; |
|---|
| 1954 | | - |
|---|
| 1955 | | - if (!q_vector->tx.ring && !q_vector->rx.ring) |
|---|
| 1956 | | - return IRQ_HANDLED; |
|---|
| 1957 | | - |
|---|
| 1958 | | - napi_schedule(&q_vector->napi); |
|---|
| 1959 | | - |
|---|
| 1960 | | - return IRQ_HANDLED; |
|---|
| 1961 | | -} |
|---|
| 1962 | | - |
|---|
| 1963 | | -/** |
|---|
| 1964 | | - * ice_vsi_alloc - Allocates the next available struct vsi in the PF |
|---|
| 1965 | | - * @pf: board private structure |
|---|
| 1966 | | - * @type: type of VSI |
|---|
| 1967 | | - * |
|---|
| 1968 | | - * returns a pointer to a VSI on success, NULL on failure. |
|---|
| 1969 | | - */ |
|---|
| 1970 | | -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) |
|---|
| 1971 | | -{ |
|---|
| 1972 | | - struct ice_vsi *vsi = NULL; |
|---|
| 1973 | | - |
|---|
| 1974 | | - /* Need to protect the allocation of the VSIs at the PF level */ |
|---|
| 1975 | | - mutex_lock(&pf->sw_mutex); |
|---|
| 1976 | | - |
|---|
| 1977 | | - /* If we have already allocated our maximum number of VSIs, |
|---|
| 1978 | | - * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index |
|---|
| 1979 | | - * is available to be populated |
|---|
| 1980 | | - */ |
|---|
| 1981 | | - if (pf->next_vsi == ICE_NO_VSI) { |
|---|
| 1982 | | - dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); |
|---|
| 1983 | | - goto unlock_pf; |
|---|
| 1984 | | - } |
|---|
| 1985 | | - |
|---|
| 1986 | | - vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); |
|---|
| 1987 | | - if (!vsi) |
|---|
| 1988 | | - goto unlock_pf; |
|---|
| 1989 | | - |
|---|
| 1990 | | - vsi->type = type; |
|---|
| 1991 | | - vsi->back = pf; |
|---|
| 1992 | | - set_bit(__ICE_DOWN, vsi->state); |
|---|
| 1993 | | - vsi->idx = pf->next_vsi; |
|---|
| 1994 | | - vsi->work_lmt = ICE_DFLT_IRQ_WORK; |
|---|
| 1995 | | - |
|---|
| 1996 | | - ice_vsi_set_num_qs(vsi); |
|---|
| 1997 | | - |
|---|
| 1998 | | - switch (vsi->type) { |
|---|
| 1999 | | - case ICE_VSI_PF: |
|---|
| 2000 | | - if (ice_vsi_alloc_arrays(vsi, true)) |
|---|
| 2001 | | - goto err_rings; |
|---|
| 2002 | | - |
|---|
| 2003 | | - /* Setup default MSIX irq handler for VSI */ |
|---|
| 2004 | | - vsi->irq_handler = ice_msix_clean_rings; |
|---|
| 2005 | | - break; |
|---|
| 2006 | | - default: |
|---|
| 2007 | | - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); |
|---|
| 2008 | | - goto unlock_pf; |
|---|
| 2009 | | - } |
|---|
| 2010 | | - |
|---|
| 2011 | | - /* fill VSI slot in the PF struct */ |
|---|
| 2012 | | - pf->vsi[pf->next_vsi] = vsi; |
|---|
| 2013 | | - |
|---|
| 2014 | | - /* prepare pf->next_vsi for next use */ |
|---|
| 2015 | | - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, |
|---|
| 2016 | | - pf->next_vsi); |
|---|
| 2017 | | - goto unlock_pf; |
|---|
| 2018 | | - |
|---|
| 2019 | | -err_rings: |
|---|
| 2020 | | - devm_kfree(&pf->pdev->dev, vsi); |
|---|
| 2021 | | - vsi = NULL; |
|---|
| 2022 | | -unlock_pf: |
|---|
| 2023 | | - mutex_unlock(&pf->sw_mutex); |
|---|
| 2024 | | - return vsi; |
|---|
| 2783 | + ice_flush(hw); |
|---|
| 2025 | 2784 | } |
|---|
| 2026 | 2785 | |
|---|
| 2027 | 2786 | /** |
|---|
| .. | .. |
|---|
| 2030 | 2789 | */ |
|---|
| 2031 | 2790 | static void ice_free_irq_msix_misc(struct ice_pf *pf) |
|---|
| 2032 | 2791 | { |
|---|
| 2033 | | - /* disable OICR interrupt */ |
|---|
| 2034 | | - wr32(&pf->hw, PFINT_OICR_ENA, 0); |
|---|
| 2035 | | - ice_flush(&pf->hw); |
|---|
| 2792 | + struct ice_hw *hw = &pf->hw; |
|---|
| 2036 | 2793 | |
|---|
| 2037 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { |
|---|
| 2794 | + ice_dis_ctrlq_interrupts(hw); |
|---|
| 2795 | + |
|---|
| 2796 | + /* disable OICR interrupt */ |
|---|
| 2797 | + wr32(hw, PFINT_OICR_ENA, 0); |
|---|
| 2798 | + ice_flush(hw); |
|---|
| 2799 | + |
|---|
| 2800 | + if (pf->msix_entries) { |
|---|
| 2038 | 2801 | synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); |
|---|
| 2039 | | - devm_free_irq(&pf->pdev->dev, |
|---|
| 2802 | + devm_free_irq(ice_pf_to_dev(pf), |
|---|
| 2040 | 2803 | pf->msix_entries[pf->oicr_idx].vector, pf); |
|---|
| 2041 | 2804 | } |
|---|
| 2042 | 2805 | |
|---|
| 2806 | + pf->num_avail_sw_msix += 1; |
|---|
| 2043 | 2807 | ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); |
|---|
| 2808 | +} |
|---|
| 2809 | + |
|---|
| 2810 | +/** |
|---|
| 2811 | + * ice_ena_ctrlq_interrupts - enable control queue interrupts |
|---|
| 2812 | + * @hw: pointer to HW structure |
|---|
| 2813 | + * @reg_idx: HW vector index to associate the control queue interrupts with |
|---|
| 2814 | + */ |
|---|
| 2815 | +static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) |
|---|
| 2816 | +{ |
|---|
| 2817 | + u32 val; |
|---|
| 2818 | + |
|---|
| 2819 | + val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
|---|
| 2820 | + PFINT_OICR_CTL_CAUSE_ENA_M); |
|---|
| 2821 | + wr32(hw, PFINT_OICR_CTL, val); |
|---|
| 2822 | + |
|---|
| 2823 | + /* enable Admin queue Interrupt causes */ |
|---|
| 2824 | + val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
|---|
| 2825 | + PFINT_FW_CTL_CAUSE_ENA_M); |
|---|
| 2826 | + wr32(hw, PFINT_FW_CTL, val); |
|---|
| 2827 | + |
|---|
| 2828 | + /* enable Mailbox queue Interrupt causes */ |
|---|
| 2829 | + val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | |
|---|
| 2830 | + PFINT_MBX_CTL_CAUSE_ENA_M); |
|---|
| 2831 | + wr32(hw, PFINT_MBX_CTL, val); |
|---|
| 2832 | + |
|---|
| 2833 | + ice_flush(hw); |
|---|
| 2044 | 2834 | } |
|---|
| 2045 | 2835 | |
|---|
| 2046 | 2836 | /** |
|---|
| .. | .. |
|---|
| 2048 | 2838 | * @pf: board private structure |
|---|
| 2049 | 2839 | * |
|---|
| 2050 | 2840 | * This sets up the handler for MSIX 0, which is used to manage the |
|---|
| 2051 | | - * non-queue interrupts, e.g. AdminQ and errors. This is not used |
|---|
| 2841 | + * non-queue interrupts, e.g. AdminQ and errors. This is not used |
|---|
| 2052 | 2842 | * when in MSI or Legacy interrupt mode. |
|---|
| 2053 | 2843 | */ |
|---|
| 2054 | 2844 | static int ice_req_irq_msix_misc(struct ice_pf *pf) |
|---|
| 2055 | 2845 | { |
|---|
| 2846 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 2056 | 2847 | struct ice_hw *hw = &pf->hw; |
|---|
| 2057 | 2848 | int oicr_idx, err = 0; |
|---|
| 2058 | | - u8 itr_gran; |
|---|
| 2059 | | - u32 val; |
|---|
| 2060 | 2849 | |
|---|
| 2061 | 2850 | if (!pf->int_name[0]) |
|---|
| 2062 | 2851 | snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", |
|---|
| 2063 | | - dev_driver_string(&pf->pdev->dev), |
|---|
| 2064 | | - dev_name(&pf->pdev->dev)); |
|---|
| 2852 | + dev_driver_string(dev), dev_name(dev)); |
|---|
| 2065 | 2853 | |
|---|
| 2066 | 2854 | /* Do not request IRQ but do enable OICR interrupt since settings are |
|---|
| 2067 | 2855 | * lost during reset. Note that this function is called only during |
|---|
| 2068 | 2856 | * rebuild path and not while reset is in progress. |
|---|
| 2069 | 2857 | */ |
|---|
| 2070 | | - if (ice_is_reset_recovery_pending(pf->state)) |
|---|
| 2858 | + if (ice_is_reset_in_progress(pf->state)) |
|---|
| 2071 | 2859 | goto skip_req_irq; |
|---|
| 2072 | 2860 | |
|---|
| 2073 | 2861 | /* reserve one vector in irq_tracker for misc interrupts */ |
|---|
| .. | .. |
|---|
| 2075 | 2863 | if (oicr_idx < 0) |
|---|
| 2076 | 2864 | return oicr_idx; |
|---|
| 2077 | 2865 | |
|---|
| 2078 | | - pf->oicr_idx = oicr_idx; |
|---|
| 2866 | + pf->num_avail_sw_msix -= 1; |
|---|
| 2867 | + pf->oicr_idx = (u16)oicr_idx; |
|---|
| 2079 | 2868 | |
|---|
| 2080 | | - err = devm_request_irq(&pf->pdev->dev, |
|---|
| 2081 | | - pf->msix_entries[pf->oicr_idx].vector, |
|---|
| 2869 | + err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, |
|---|
| 2082 | 2870 | ice_misc_intr, 0, pf->int_name, pf); |
|---|
| 2083 | 2871 | if (err) { |
|---|
| 2084 | | - dev_err(&pf->pdev->dev, |
|---|
| 2085 | | - "devm_request_irq for %s failed: %d\n", |
|---|
| 2872 | + dev_err(dev, "devm_request_irq for %s failed: %d\n", |
|---|
| 2086 | 2873 | pf->int_name, err); |
|---|
| 2087 | 2874 | ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); |
|---|
| 2875 | + pf->num_avail_sw_msix += 1; |
|---|
| 2088 | 2876 | return err; |
|---|
| 2089 | 2877 | } |
|---|
| 2090 | 2878 | |
|---|
| 2091 | 2879 | skip_req_irq: |
|---|
| 2092 | 2880 | ice_ena_misc_vector(pf); |
|---|
| 2093 | 2881 | |
|---|
| 2094 | | - val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
|---|
| 2095 | | - PFINT_OICR_CTL_CAUSE_ENA_M); |
|---|
| 2096 | | - wr32(hw, PFINT_OICR_CTL, val); |
|---|
| 2097 | | - |
|---|
| 2098 | | - /* This enables Admin queue Interrupt causes */ |
|---|
| 2099 | | - val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
|---|
| 2100 | | - PFINT_FW_CTL_CAUSE_ENA_M); |
|---|
| 2101 | | - wr32(hw, PFINT_FW_CTL, val); |
|---|
| 2102 | | - |
|---|
| 2103 | | - itr_gran = hw->itr_gran_200; |
|---|
| 2104 | | - |
|---|
| 2882 | + ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); |
|---|
| 2105 | 2883 | wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), |
|---|
| 2106 | | - ITR_TO_REG(ICE_ITR_8K, itr_gran)); |
|---|
| 2884 | + ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); |
|---|
| 2107 | 2885 | |
|---|
| 2108 | 2886 | ice_flush(hw); |
|---|
| 2109 | 2887 | ice_irq_dynamic_ena(hw, NULL, NULL); |
|---|
| .. | .. |
|---|
| 2112 | 2890 | } |
|---|
| 2113 | 2891 | |
|---|
| 2114 | 2892 | /** |
|---|
| 2115 | | - * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI |
|---|
| 2116 | | - * @vsi: the VSI getting queues |
|---|
| 2893 | + * ice_napi_add - register NAPI handler for the VSI |
|---|
| 2894 | + * @vsi: VSI for which NAPI handler is to be registered |
|---|
| 2117 | 2895 | * |
|---|
| 2118 | | - * Return 0 on success and a negative value on error |
|---|
| 2896 | + * This function is only called in the driver's load path. Registering the NAPI |
|---|
| 2897 | + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, |
|---|
| 2898 | + * reset/rebuild, etc.) |
|---|
| 2119 | 2899 | */ |
|---|
| 2120 | | -static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) |
|---|
| 2121 | | -{ |
|---|
| 2122 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2123 | | - int offset, ret = 0; |
|---|
| 2124 | | - |
|---|
| 2125 | | - mutex_lock(&pf->avail_q_mutex); |
|---|
| 2126 | | - /* look for contiguous block of queues for tx */ |
|---|
| 2127 | | - offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, |
|---|
| 2128 | | - 0, vsi->alloc_txq, 0); |
|---|
| 2129 | | - if (offset < ICE_MAX_TXQS) { |
|---|
| 2130 | | - int i; |
|---|
| 2131 | | - |
|---|
| 2132 | | - bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); |
|---|
| 2133 | | - for (i = 0; i < vsi->alloc_txq; i++) |
|---|
| 2134 | | - vsi->txq_map[i] = i + offset; |
|---|
| 2135 | | - } else { |
|---|
| 2136 | | - ret = -ENOMEM; |
|---|
| 2137 | | - vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; |
|---|
| 2138 | | - } |
|---|
| 2139 | | - |
|---|
| 2140 | | - /* look for contiguous block of queues for rx */ |
|---|
| 2141 | | - offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, |
|---|
| 2142 | | - 0, vsi->alloc_rxq, 0); |
|---|
| 2143 | | - if (offset < ICE_MAX_RXQS) { |
|---|
| 2144 | | - int i; |
|---|
| 2145 | | - |
|---|
| 2146 | | - bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); |
|---|
| 2147 | | - for (i = 0; i < vsi->alloc_rxq; i++) |
|---|
| 2148 | | - vsi->rxq_map[i] = i + offset; |
|---|
| 2149 | | - } else { |
|---|
| 2150 | | - ret = -ENOMEM; |
|---|
| 2151 | | - vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; |
|---|
| 2152 | | - } |
|---|
| 2153 | | - mutex_unlock(&pf->avail_q_mutex); |
|---|
| 2154 | | - |
|---|
| 2155 | | - return ret; |
|---|
| 2156 | | -} |
|---|
| 2157 | | - |
|---|
| 2158 | | -/** |
|---|
| 2159 | | - * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI |
|---|
| 2160 | | - * @vsi: the VSI getting queues |
|---|
| 2161 | | - * |
|---|
| 2162 | | - * Return 0 on success and a negative value on error |
|---|
| 2163 | | - */ |
|---|
| 2164 | | -static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) |
|---|
| 2165 | | -{ |
|---|
| 2166 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2167 | | - int i, index = 0; |
|---|
| 2168 | | - |
|---|
| 2169 | | - mutex_lock(&pf->avail_q_mutex); |
|---|
| 2170 | | - |
|---|
| 2171 | | - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { |
|---|
| 2172 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
|---|
| 2173 | | - index = find_next_zero_bit(pf->avail_txqs, |
|---|
| 2174 | | - ICE_MAX_TXQS, index); |
|---|
| 2175 | | - if (index < ICE_MAX_TXQS) { |
|---|
| 2176 | | - set_bit(index, pf->avail_txqs); |
|---|
| 2177 | | - vsi->txq_map[i] = index; |
|---|
| 2178 | | - } else { |
|---|
| 2179 | | - goto err_scatter_tx; |
|---|
| 2180 | | - } |
|---|
| 2181 | | - } |
|---|
| 2182 | | - } |
|---|
| 2183 | | - |
|---|
| 2184 | | - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { |
|---|
| 2185 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
|---|
| 2186 | | - index = find_next_zero_bit(pf->avail_rxqs, |
|---|
| 2187 | | - ICE_MAX_RXQS, index); |
|---|
| 2188 | | - if (index < ICE_MAX_RXQS) { |
|---|
| 2189 | | - set_bit(index, pf->avail_rxqs); |
|---|
| 2190 | | - vsi->rxq_map[i] = index; |
|---|
| 2191 | | - } else { |
|---|
| 2192 | | - goto err_scatter_rx; |
|---|
| 2193 | | - } |
|---|
| 2194 | | - } |
|---|
| 2195 | | - } |
|---|
| 2196 | | - |
|---|
| 2197 | | - mutex_unlock(&pf->avail_q_mutex); |
|---|
| 2198 | | - return 0; |
|---|
| 2199 | | - |
|---|
| 2200 | | -err_scatter_rx: |
|---|
| 2201 | | - /* unflag any queues we have grabbed (i is failed position) */ |
|---|
| 2202 | | - for (index = 0; index < i; index++) { |
|---|
| 2203 | | - clear_bit(vsi->rxq_map[index], pf->avail_rxqs); |
|---|
| 2204 | | - vsi->rxq_map[index] = 0; |
|---|
| 2205 | | - } |
|---|
| 2206 | | - i = vsi->alloc_txq; |
|---|
| 2207 | | -err_scatter_tx: |
|---|
| 2208 | | - /* i is either position of failed attempt or vsi->alloc_txq */ |
|---|
| 2209 | | - for (index = 0; index < i; index++) { |
|---|
| 2210 | | - clear_bit(vsi->txq_map[index], pf->avail_txqs); |
|---|
| 2211 | | - vsi->txq_map[index] = 0; |
|---|
| 2212 | | - } |
|---|
| 2213 | | - |
|---|
| 2214 | | - mutex_unlock(&pf->avail_q_mutex); |
|---|
| 2215 | | - return -ENOMEM; |
|---|
| 2216 | | -} |
|---|
| 2217 | | - |
|---|
| 2218 | | -/** |
|---|
| 2219 | | - * ice_vsi_get_qs - Assign queues from PF to VSI |
|---|
| 2220 | | - * @vsi: the VSI to assign queues to |
|---|
| 2221 | | - * |
|---|
| 2222 | | - * Returns 0 on success and a negative value on error |
|---|
| 2223 | | - */ |
|---|
| 2224 | | -static int ice_vsi_get_qs(struct ice_vsi *vsi) |
|---|
| 2225 | | -{ |
|---|
| 2226 | | - int ret = 0; |
|---|
| 2227 | | - |
|---|
| 2228 | | - vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; |
|---|
| 2229 | | - vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; |
|---|
| 2230 | | - |
|---|
| 2231 | | - /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping |
|---|
| 2232 | | - * modes individually to scatter if assigning contiguous queues |
|---|
| 2233 | | - * to rx or tx fails |
|---|
| 2234 | | - */ |
|---|
| 2235 | | - ret = ice_vsi_get_qs_contig(vsi); |
|---|
| 2236 | | - if (ret < 0) { |
|---|
| 2237 | | - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) |
|---|
| 2238 | | - vsi->alloc_txq = max_t(u16, vsi->alloc_txq, |
|---|
| 2239 | | - ICE_MAX_SCATTER_TXQS); |
|---|
| 2240 | | - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) |
|---|
| 2241 | | - vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, |
|---|
| 2242 | | - ICE_MAX_SCATTER_RXQS); |
|---|
| 2243 | | - ret = ice_vsi_get_qs_scatter(vsi); |
|---|
| 2244 | | - } |
|---|
| 2245 | | - |
|---|
| 2246 | | - return ret; |
|---|
| 2247 | | -} |
|---|
| 2248 | | - |
|---|
| 2249 | | -/** |
|---|
| 2250 | | - * ice_vsi_put_qs - Release queues from VSI to PF |
|---|
| 2251 | | - * @vsi: the VSI thats going to release queues |
|---|
| 2252 | | - */ |
|---|
| 2253 | | -static void ice_vsi_put_qs(struct ice_vsi *vsi) |
|---|
| 2254 | | -{ |
|---|
| 2255 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2256 | | - int i; |
|---|
| 2257 | | - |
|---|
| 2258 | | - mutex_lock(&pf->avail_q_mutex); |
|---|
| 2259 | | - |
|---|
| 2260 | | - for (i = 0; i < vsi->alloc_txq; i++) { |
|---|
| 2261 | | - clear_bit(vsi->txq_map[i], pf->avail_txqs); |
|---|
| 2262 | | - vsi->txq_map[i] = ICE_INVAL_Q_INDEX; |
|---|
| 2263 | | - } |
|---|
| 2264 | | - |
|---|
| 2265 | | - for (i = 0; i < vsi->alloc_rxq; i++) { |
|---|
| 2266 | | - clear_bit(vsi->rxq_map[i], pf->avail_rxqs); |
|---|
| 2267 | | - vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; |
|---|
| 2268 | | - } |
|---|
| 2269 | | - |
|---|
| 2270 | | - mutex_unlock(&pf->avail_q_mutex); |
|---|
| 2271 | | -} |
|---|
| 2272 | | - |
|---|
| 2273 | | -/** |
|---|
| 2274 | | - * ice_free_q_vector - Free memory allocated for a specific interrupt vector |
|---|
| 2275 | | - * @vsi: VSI having the memory freed |
|---|
| 2276 | | - * @v_idx: index of the vector to be freed |
|---|
| 2277 | | - */ |
|---|
| 2278 | | -static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) |
|---|
| 2279 | | -{ |
|---|
| 2280 | | - struct ice_q_vector *q_vector; |
|---|
| 2281 | | - struct ice_ring *ring; |
|---|
| 2282 | | - |
|---|
| 2283 | | - if (!vsi->q_vectors[v_idx]) { |
|---|
| 2284 | | - dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", |
|---|
| 2285 | | - v_idx); |
|---|
| 2286 | | - return; |
|---|
| 2287 | | - } |
|---|
| 2288 | | - q_vector = vsi->q_vectors[v_idx]; |
|---|
| 2289 | | - |
|---|
| 2290 | | - ice_for_each_ring(ring, q_vector->tx) |
|---|
| 2291 | | - ring->q_vector = NULL; |
|---|
| 2292 | | - ice_for_each_ring(ring, q_vector->rx) |
|---|
| 2293 | | - ring->q_vector = NULL; |
|---|
| 2294 | | - |
|---|
| 2295 | | - /* only VSI with an associated netdev is set up with NAPI */ |
|---|
| 2296 | | - if (vsi->netdev) |
|---|
| 2297 | | - netif_napi_del(&q_vector->napi); |
|---|
| 2298 | | - |
|---|
| 2299 | | - devm_kfree(&vsi->back->pdev->dev, q_vector); |
|---|
| 2300 | | - vsi->q_vectors[v_idx] = NULL; |
|---|
| 2301 | | -} |
|---|
| 2302 | | - |
|---|
| 2303 | | -/** |
|---|
| 2304 | | - * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors |
|---|
| 2305 | | - * @vsi: the VSI having memory freed |
|---|
| 2306 | | - */ |
|---|
| 2307 | | -static void ice_vsi_free_q_vectors(struct ice_vsi *vsi) |
|---|
| 2900 | +static void ice_napi_add(struct ice_vsi *vsi) |
|---|
| 2308 | 2901 | { |
|---|
| 2309 | 2902 | int v_idx; |
|---|
| 2310 | 2903 | |
|---|
| 2311 | | - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) |
|---|
| 2312 | | - ice_free_q_vector(vsi, v_idx); |
|---|
| 2904 | + if (!vsi->netdev) |
|---|
| 2905 | + return; |
|---|
| 2906 | + |
|---|
| 2907 | + ice_for_each_q_vector(vsi, v_idx) |
|---|
| 2908 | + netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, |
|---|
| 2909 | + ice_napi_poll, NAPI_POLL_WEIGHT); |
|---|
| 2313 | 2910 | } |
|---|
| 2314 | 2911 | |
|---|
| 2315 | 2912 | /** |
|---|
| 2316 | | - * ice_cfg_netdev - Setup the netdev flags |
|---|
| 2317 | | - * @vsi: the VSI being configured |
|---|
| 2318 | | - * |
|---|
| 2319 | | - * Returns 0 on success, negative value on failure |
|---|
| 2913 | + * ice_set_ops - set netdev and ethtools ops for the given netdev |
|---|
| 2914 | + * @netdev: netdev instance |
|---|
| 2320 | 2915 | */ |
|---|
| 2321 | | -static int ice_cfg_netdev(struct ice_vsi *vsi) |
|---|
| 2916 | +static void ice_set_ops(struct net_device *netdev) |
|---|
| 2322 | 2917 | { |
|---|
| 2918 | + struct ice_pf *pf = ice_netdev_to_pf(netdev); |
|---|
| 2919 | + |
|---|
| 2920 | + if (ice_is_safe_mode(pf)) { |
|---|
| 2921 | + netdev->netdev_ops = &ice_netdev_safe_mode_ops; |
|---|
| 2922 | + ice_set_ethtool_safe_mode_ops(netdev); |
|---|
| 2923 | + return; |
|---|
| 2924 | + } |
|---|
| 2925 | + |
|---|
| 2926 | + netdev->netdev_ops = &ice_netdev_ops; |
|---|
| 2927 | + netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; |
|---|
| 2928 | + ice_set_ethtool_ops(netdev); |
|---|
| 2929 | +} |
|---|
| 2930 | + |
|---|
| 2931 | +/** |
|---|
| 2932 | + * ice_set_netdev_features - set features for the given netdev |
|---|
| 2933 | + * @netdev: netdev instance |
|---|
| 2934 | + */ |
|---|
| 2935 | +static void ice_set_netdev_features(struct net_device *netdev) |
|---|
| 2936 | +{ |
|---|
| 2937 | + struct ice_pf *pf = ice_netdev_to_pf(netdev); |
|---|
| 2323 | 2938 | netdev_features_t csumo_features; |
|---|
| 2324 | 2939 | netdev_features_t vlano_features; |
|---|
| 2325 | 2940 | netdev_features_t dflt_features; |
|---|
| 2326 | 2941 | netdev_features_t tso_features; |
|---|
| 2327 | | - struct ice_netdev_priv *np; |
|---|
| 2328 | | - struct net_device *netdev; |
|---|
| 2329 | | - u8 mac_addr[ETH_ALEN]; |
|---|
| 2330 | 2942 | |
|---|
| 2331 | | - netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), |
|---|
| 2332 | | - vsi->alloc_txq, vsi->alloc_rxq); |
|---|
| 2333 | | - if (!netdev) |
|---|
| 2334 | | - return -ENOMEM; |
|---|
| 2335 | | - |
|---|
| 2336 | | - vsi->netdev = netdev; |
|---|
| 2337 | | - np = netdev_priv(netdev); |
|---|
| 2338 | | - np->vsi = vsi; |
|---|
| 2943 | + if (ice_is_safe_mode(pf)) { |
|---|
| 2944 | + /* safe mode */ |
|---|
| 2945 | + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; |
|---|
| 2946 | + netdev->hw_features = netdev->features; |
|---|
| 2947 | + return; |
|---|
| 2948 | + } |
|---|
| 2339 | 2949 | |
|---|
| 2340 | 2950 | dflt_features = NETIF_F_SG | |
|---|
| 2341 | 2951 | NETIF_F_HIGHDMA | |
|---|
| 2952 | + NETIF_F_NTUPLE | |
|---|
| 2342 | 2953 | NETIF_F_RXHASH; |
|---|
| 2343 | 2954 | |
|---|
| 2344 | 2955 | csumo_features = NETIF_F_RXCSUM | |
|---|
| 2345 | 2956 | NETIF_F_IP_CSUM | |
|---|
| 2957 | + NETIF_F_SCTP_CRC | |
|---|
| 2346 | 2958 | NETIF_F_IPV6_CSUM; |
|---|
| 2347 | 2959 | |
|---|
| 2348 | 2960 | vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | |
|---|
| 2349 | 2961 | NETIF_F_HW_VLAN_CTAG_TX | |
|---|
| 2350 | 2962 | NETIF_F_HW_VLAN_CTAG_RX; |
|---|
| 2351 | 2963 | |
|---|
| 2352 | | - tso_features = NETIF_F_TSO; |
|---|
| 2964 | + tso_features = NETIF_F_TSO | |
|---|
| 2965 | + NETIF_F_TSO_ECN | |
|---|
| 2966 | + NETIF_F_TSO6 | |
|---|
| 2967 | + NETIF_F_GSO_GRE | |
|---|
| 2968 | + NETIF_F_GSO_UDP_TUNNEL | |
|---|
| 2969 | + NETIF_F_GSO_GRE_CSUM | |
|---|
| 2970 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | |
|---|
| 2971 | + NETIF_F_GSO_PARTIAL | |
|---|
| 2972 | + NETIF_F_GSO_IPXIP4 | |
|---|
| 2973 | + NETIF_F_GSO_IPXIP6 | |
|---|
| 2974 | + NETIF_F_GSO_UDP_L4; |
|---|
| 2353 | 2975 | |
|---|
| 2976 | + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | |
|---|
| 2977 | + NETIF_F_GSO_GRE_CSUM; |
|---|
| 2354 | 2978 | /* set features that user can change */ |
|---|
| 2355 | 2979 | netdev->hw_features = dflt_features | csumo_features | |
|---|
| 2356 | 2980 | vlano_features | tso_features; |
|---|
| 2981 | + |
|---|
| 2982 | + /* add support for HW_CSUM on packets with MPLS header */ |
|---|
| 2983 | + netdev->mpls_features = NETIF_F_HW_CSUM; |
|---|
| 2357 | 2984 | |
|---|
| 2358 | 2985 | /* enable features */ |
|---|
| 2359 | 2986 | netdev->features |= netdev->hw_features; |
|---|
| .. | .. |
|---|
| 2362 | 2989 | tso_features; |
|---|
| 2363 | 2990 | netdev->vlan_features |= dflt_features | csumo_features | |
|---|
| 2364 | 2991 | tso_features; |
|---|
| 2992 | +} |
|---|
| 2993 | + |
|---|
| 2994 | +/** |
|---|
| 2995 | + * ice_cfg_netdev - Allocate, configure and register a netdev |
|---|
| 2996 | + * @vsi: the VSI associated with the new netdev |
|---|
| 2997 | + * |
|---|
| 2998 | + * Returns 0 on success, negative value on failure |
|---|
| 2999 | + */ |
|---|
| 3000 | +static int ice_cfg_netdev(struct ice_vsi *vsi) |
|---|
| 3001 | +{ |
|---|
| 3002 | + struct ice_pf *pf = vsi->back; |
|---|
| 3003 | + struct ice_netdev_priv *np; |
|---|
| 3004 | + struct net_device *netdev; |
|---|
| 3005 | + u8 mac_addr[ETH_ALEN]; |
|---|
| 3006 | + int err; |
|---|
| 3007 | + |
|---|
| 3008 | + err = ice_devlink_create_port(vsi); |
|---|
| 3009 | + if (err) |
|---|
| 3010 | + return err; |
|---|
| 3011 | + |
|---|
| 3012 | + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, |
|---|
| 3013 | + vsi->alloc_rxq); |
|---|
| 3014 | + if (!netdev) { |
|---|
| 3015 | + err = -ENOMEM; |
|---|
| 3016 | + goto err_destroy_devlink_port; |
|---|
| 3017 | + } |
|---|
| 3018 | + |
|---|
| 3019 | + vsi->netdev = netdev; |
|---|
| 3020 | + np = netdev_priv(netdev); |
|---|
| 3021 | + np->vsi = vsi; |
|---|
| 3022 | + |
|---|
| 3023 | + ice_set_netdev_features(netdev); |
|---|
| 3024 | + |
|---|
| 3025 | + ice_set_ops(netdev); |
|---|
| 2365 | 3026 | |
|---|
| 2366 | 3027 | if (vsi->type == ICE_VSI_PF) { |
|---|
| 2367 | | - SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); |
|---|
| 3028 | + SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf)); |
|---|
| 2368 | 3029 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
|---|
| 2369 | | - |
|---|
| 2370 | 3030 | ether_addr_copy(netdev->dev_addr, mac_addr); |
|---|
| 2371 | 3031 | ether_addr_copy(netdev->perm_addr, mac_addr); |
|---|
| 2372 | 3032 | } |
|---|
| 2373 | 3033 | |
|---|
| 2374 | 3034 | netdev->priv_flags |= IFF_UNICAST_FLT; |
|---|
| 2375 | 3035 | |
|---|
| 2376 | | - /* assign netdev_ops */ |
|---|
| 2377 | | - netdev->netdev_ops = &ice_netdev_ops; |
|---|
| 3036 | + /* Setup netdev TC information */ |
|---|
| 3037 | + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); |
|---|
| 2378 | 3038 | |
|---|
| 2379 | 3039 | /* setup watchdog timeout value to be 5 second */ |
|---|
| 2380 | 3040 | netdev->watchdog_timeo = 5 * HZ; |
|---|
| 2381 | 3041 | |
|---|
| 2382 | | - ice_set_ethtool_ops(netdev); |
|---|
| 2383 | | - |
|---|
| 2384 | 3042 | netdev->min_mtu = ETH_MIN_MTU; |
|---|
| 2385 | 3043 | netdev->max_mtu = ICE_MAX_MTU; |
|---|
| 2386 | 3044 | |
|---|
| 2387 | | - return 0; |
|---|
| 2388 | | -} |
|---|
| 3045 | + err = register_netdev(vsi->netdev); |
|---|
| 3046 | + if (err) |
|---|
| 3047 | + goto err_free_netdev; |
|---|
| 2389 | 3048 | |
|---|
| 2390 | | -/** |
|---|
| 2391 | | - * ice_vsi_free_arrays - clean up vsi resources |
|---|
| 2392 | | - * @vsi: pointer to VSI being cleared |
|---|
| 2393 | | - * @free_qvectors: bool to specify if q_vectors should be deallocated |
|---|
| 2394 | | - */ |
|---|
| 2395 | | -static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) |
|---|
| 2396 | | -{ |
|---|
| 2397 | | - struct ice_pf *pf = vsi->back; |
|---|
| 3049 | + devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); |
|---|
| 2398 | 3050 | |
|---|
| 2399 | | - /* free the ring and vector containers */ |
|---|
| 2400 | | - if (free_qvectors && vsi->q_vectors) { |
|---|
| 2401 | | - devm_kfree(&pf->pdev->dev, vsi->q_vectors); |
|---|
| 2402 | | - vsi->q_vectors = NULL; |
|---|
| 2403 | | - } |
|---|
| 2404 | | - if (vsi->tx_rings) { |
|---|
| 2405 | | - devm_kfree(&pf->pdev->dev, vsi->tx_rings); |
|---|
| 2406 | | - vsi->tx_rings = NULL; |
|---|
| 2407 | | - } |
|---|
| 2408 | | - if (vsi->rx_rings) { |
|---|
| 2409 | | - devm_kfree(&pf->pdev->dev, vsi->rx_rings); |
|---|
| 2410 | | - vsi->rx_rings = NULL; |
|---|
| 2411 | | - } |
|---|
| 2412 | | -} |
|---|
| 3051 | + netif_carrier_off(vsi->netdev); |
|---|
| 2413 | 3052 | |
|---|
| 2414 | | -/** |
|---|
| 2415 | | - * ice_vsi_clear - clean up and deallocate the provided vsi |
|---|
| 2416 | | - * @vsi: pointer to VSI being cleared |
|---|
| 2417 | | - * |
|---|
| 2418 | | - * This deallocates the vsi's queue resources, removes it from the PF's |
|---|
| 2419 | | - * VSI array if necessary, and deallocates the VSI |
|---|
| 2420 | | - * |
|---|
| 2421 | | - * Returns 0 on success, negative on failure |
|---|
| 2422 | | - */ |
|---|
| 2423 | | -static int ice_vsi_clear(struct ice_vsi *vsi) |
|---|
| 2424 | | -{ |
|---|
| 2425 | | - struct ice_pf *pf = NULL; |
|---|
| 2426 | | - |
|---|
| 2427 | | - if (!vsi) |
|---|
| 2428 | | - return 0; |
|---|
| 2429 | | - |
|---|
| 2430 | | - if (!vsi->back) |
|---|
| 2431 | | - return -EINVAL; |
|---|
| 2432 | | - |
|---|
| 2433 | | - pf = vsi->back; |
|---|
| 2434 | | - |
|---|
| 2435 | | - if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { |
|---|
| 2436 | | - dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", |
|---|
| 2437 | | - vsi->idx); |
|---|
| 2438 | | - return -EINVAL; |
|---|
| 2439 | | - } |
|---|
| 2440 | | - |
|---|
| 2441 | | - mutex_lock(&pf->sw_mutex); |
|---|
| 2442 | | - /* updates the PF for this cleared vsi */ |
|---|
| 2443 | | - |
|---|
| 2444 | | - pf->vsi[vsi->idx] = NULL; |
|---|
| 2445 | | - if (vsi->idx < pf->next_vsi) |
|---|
| 2446 | | - pf->next_vsi = vsi->idx; |
|---|
| 2447 | | - |
|---|
| 2448 | | - ice_vsi_free_arrays(vsi, true); |
|---|
| 2449 | | - mutex_unlock(&pf->sw_mutex); |
|---|
| 2450 | | - devm_kfree(&pf->pdev->dev, vsi); |
|---|
| 2451 | | - |
|---|
| 2452 | | - return 0; |
|---|
| 2453 | | -} |
|---|
| 2454 | | - |
|---|
| 2455 | | -/** |
|---|
| 2456 | | - * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector |
|---|
| 2457 | | - * @vsi: the VSI being configured |
|---|
| 2458 | | - * @v_idx: index of the vector in the vsi struct |
|---|
| 2459 | | - * |
|---|
| 2460 | | - * We allocate one q_vector. If allocation fails we return -ENOMEM. |
|---|
| 2461 | | - */ |
|---|
| 2462 | | -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) |
|---|
| 2463 | | -{ |
|---|
| 2464 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2465 | | - struct ice_q_vector *q_vector; |
|---|
| 2466 | | - |
|---|
| 2467 | | - /* allocate q_vector */ |
|---|
| 2468 | | - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); |
|---|
| 2469 | | - if (!q_vector) |
|---|
| 2470 | | - return -ENOMEM; |
|---|
| 2471 | | - |
|---|
| 2472 | | - q_vector->vsi = vsi; |
|---|
| 2473 | | - q_vector->v_idx = v_idx; |
|---|
| 2474 | | - /* only set affinity_mask if the CPU is online */ |
|---|
| 2475 | | - if (cpu_online(v_idx)) |
|---|
| 2476 | | - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); |
|---|
| 2477 | | - |
|---|
| 2478 | | - if (vsi->netdev) |
|---|
| 2479 | | - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, |
|---|
| 2480 | | - NAPI_POLL_WEIGHT); |
|---|
| 2481 | | - /* tie q_vector and vsi together */ |
|---|
| 2482 | | - vsi->q_vectors[v_idx] = q_vector; |
|---|
| 2483 | | - |
|---|
| 2484 | | - return 0; |
|---|
| 2485 | | -} |
|---|
| 2486 | | - |
|---|
| 2487 | | -/** |
|---|
| 2488 | | - * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors |
|---|
| 2489 | | - * @vsi: the VSI being configured |
|---|
| 2490 | | - * |
|---|
| 2491 | | - * We allocate one q_vector per queue interrupt. If allocation fails we |
|---|
| 2492 | | - * return -ENOMEM. |
|---|
| 2493 | | - */ |
|---|
| 2494 | | -static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) |
|---|
| 2495 | | -{ |
|---|
| 2496 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2497 | | - int v_idx = 0, num_q_vectors; |
|---|
| 2498 | | - int err; |
|---|
| 2499 | | - |
|---|
| 2500 | | - if (vsi->q_vectors[0]) { |
|---|
| 2501 | | - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", |
|---|
| 2502 | | - vsi->vsi_num); |
|---|
| 2503 | | - return -EEXIST; |
|---|
| 2504 | | - } |
|---|
| 2505 | | - |
|---|
| 2506 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
|---|
| 2507 | | - num_q_vectors = vsi->num_q_vectors; |
|---|
| 2508 | | - } else { |
|---|
| 2509 | | - err = -EINVAL; |
|---|
| 2510 | | - goto err_out; |
|---|
| 2511 | | - } |
|---|
| 2512 | | - |
|---|
| 2513 | | - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
|---|
| 2514 | | - err = ice_vsi_alloc_q_vector(vsi, v_idx); |
|---|
| 2515 | | - if (err) |
|---|
| 2516 | | - goto err_out; |
|---|
| 2517 | | - } |
|---|
| 3053 | + /* make sure transmit queues start off as stopped */ |
|---|
| 3054 | + netif_tx_stop_all_queues(vsi->netdev); |
|---|
| 2518 | 3055 | |
|---|
| 2519 | 3056 | return 0; |
|---|
| 2520 | 3057 | |
|---|
| 2521 | | -err_out: |
|---|
| 2522 | | - while (v_idx--) |
|---|
| 2523 | | - ice_free_q_vector(vsi, v_idx); |
|---|
| 2524 | | - |
|---|
| 2525 | | - dev_err(&pf->pdev->dev, |
|---|
| 2526 | | - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", |
|---|
| 2527 | | - vsi->num_q_vectors, vsi->vsi_num, err); |
|---|
| 2528 | | - vsi->num_q_vectors = 0; |
|---|
| 3058 | +err_free_netdev: |
|---|
| 3059 | + free_netdev(vsi->netdev); |
|---|
| 3060 | + vsi->netdev = NULL; |
|---|
| 3061 | +err_destroy_devlink_port: |
|---|
| 3062 | + ice_devlink_destroy_port(vsi); |
|---|
| 2529 | 3063 | return err; |
|---|
| 2530 | | -} |
|---|
| 2531 | | - |
|---|
| 2532 | | -/** |
|---|
| 2533 | | - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI |
|---|
| 2534 | | - * @vsi: ptr to the VSI |
|---|
| 2535 | | - * |
|---|
| 2536 | | - * This should only be called after ice_vsi_alloc() which allocates the |
|---|
| 2537 | | - * corresponding SW VSI structure and initializes num_queue_pairs for the |
|---|
| 2538 | | - * newly allocated VSI. |
|---|
| 2539 | | - * |
|---|
| 2540 | | - * Returns 0 on success or negative on failure |
|---|
| 2541 | | - */ |
|---|
| 2542 | | -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) |
|---|
| 2543 | | -{ |
|---|
| 2544 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2545 | | - int num_q_vectors = 0; |
|---|
| 2546 | | - |
|---|
| 2547 | | - if (vsi->base_vector) { |
|---|
| 2548 | | - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", |
|---|
| 2549 | | - vsi->vsi_num, vsi->base_vector); |
|---|
| 2550 | | - return -EEXIST; |
|---|
| 2551 | | - } |
|---|
| 2552 | | - |
|---|
| 2553 | | - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
|---|
| 2554 | | - return -ENOENT; |
|---|
| 2555 | | - |
|---|
| 2556 | | - switch (vsi->type) { |
|---|
| 2557 | | - case ICE_VSI_PF: |
|---|
| 2558 | | - num_q_vectors = vsi->num_q_vectors; |
|---|
| 2559 | | - break; |
|---|
| 2560 | | - default: |
|---|
| 2561 | | - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", |
|---|
| 2562 | | - vsi->type); |
|---|
| 2563 | | - break; |
|---|
| 2564 | | - } |
|---|
| 2565 | | - |
|---|
| 2566 | | - if (num_q_vectors) |
|---|
| 2567 | | - vsi->base_vector = ice_get_res(pf, pf->irq_tracker, |
|---|
| 2568 | | - num_q_vectors, vsi->idx); |
|---|
| 2569 | | - |
|---|
| 2570 | | - if (vsi->base_vector < 0) { |
|---|
| 2571 | | - dev_err(&pf->pdev->dev, |
|---|
| 2572 | | - "Failed to get tracking for %d vectors for VSI %d, err=%d\n", |
|---|
| 2573 | | - num_q_vectors, vsi->vsi_num, vsi->base_vector); |
|---|
| 2574 | | - return -ENOENT; |
|---|
| 2575 | | - } |
|---|
| 2576 | | - |
|---|
| 2577 | | - return 0; |
|---|
| 2578 | 3064 | } |
|---|
| 2579 | 3065 | |
|---|
| 2580 | 3066 | /** |
|---|
| .. | .. |
|---|
| 2592 | 3078 | } |
|---|
| 2593 | 3079 | |
|---|
| 2594 | 3080 | /** |
|---|
| 2595 | | - * ice_vsi_cfg_rss - Configure RSS params for a VSI |
|---|
| 2596 | | - * @vsi: VSI to be configured |
|---|
| 2597 | | - */ |
|---|
| 2598 | | -static int ice_vsi_cfg_rss(struct ice_vsi *vsi) |
|---|
| 2599 | | -{ |
|---|
| 2600 | | - u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; |
|---|
| 2601 | | - struct ice_aqc_get_set_rss_keys *key; |
|---|
| 2602 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2603 | | - enum ice_status status; |
|---|
| 2604 | | - int err = 0; |
|---|
| 2605 | | - u8 *lut; |
|---|
| 2606 | | - |
|---|
| 2607 | | - vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); |
|---|
| 2608 | | - |
|---|
| 2609 | | - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); |
|---|
| 2610 | | - if (!lut) |
|---|
| 2611 | | - return -ENOMEM; |
|---|
| 2612 | | - |
|---|
| 2613 | | - if (vsi->rss_lut_user) |
|---|
| 2614 | | - memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); |
|---|
| 2615 | | - else |
|---|
| 2616 | | - ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); |
|---|
| 2617 | | - |
|---|
| 2618 | | - status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type, |
|---|
| 2619 | | - lut, vsi->rss_table_size); |
|---|
| 2620 | | - |
|---|
| 2621 | | - if (status) { |
|---|
| 2622 | | - dev_err(&vsi->back->pdev->dev, |
|---|
| 2623 | | - "set_rss_lut failed, error %d\n", status); |
|---|
| 2624 | | - err = -EIO; |
|---|
| 2625 | | - goto ice_vsi_cfg_rss_exit; |
|---|
| 2626 | | - } |
|---|
| 2627 | | - |
|---|
| 2628 | | - key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); |
|---|
| 2629 | | - if (!key) { |
|---|
| 2630 | | - err = -ENOMEM; |
|---|
| 2631 | | - goto ice_vsi_cfg_rss_exit; |
|---|
| 2632 | | - } |
|---|
| 2633 | | - |
|---|
| 2634 | | - if (vsi->rss_hkey_user) |
|---|
| 2635 | | - memcpy(seed, vsi->rss_hkey_user, |
|---|
| 2636 | | - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); |
|---|
| 2637 | | - else |
|---|
| 2638 | | - netdev_rss_key_fill((void *)seed, |
|---|
| 2639 | | - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); |
|---|
| 2640 | | - memcpy(&key->standard_rss_key, seed, |
|---|
| 2641 | | - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); |
|---|
| 2642 | | - |
|---|
| 2643 | | - status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key); |
|---|
| 2644 | | - |
|---|
| 2645 | | - if (status) { |
|---|
| 2646 | | - dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", |
|---|
| 2647 | | - status); |
|---|
| 2648 | | - err = -EIO; |
|---|
| 2649 | | - } |
|---|
| 2650 | | - |
|---|
| 2651 | | - devm_kfree(&pf->pdev->dev, key); |
|---|
| 2652 | | -ice_vsi_cfg_rss_exit: |
|---|
| 2653 | | - devm_kfree(&pf->pdev->dev, lut); |
|---|
| 2654 | | - return err; |
|---|
| 2655 | | -} |
|---|
| 2656 | | - |
|---|
| 2657 | | -/** |
|---|
| 2658 | | - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI |
|---|
| 2659 | | - * @vsi: pointer to the ice_vsi |
|---|
| 2660 | | - * |
|---|
| 2661 | | - * This reallocates the VSIs queue resources |
|---|
| 2662 | | - * |
|---|
| 2663 | | - * Returns 0 on success and negative value on failure |
|---|
| 2664 | | - */ |
|---|
| 2665 | | -static int ice_vsi_reinit_setup(struct ice_vsi *vsi) |
|---|
| 2666 | | -{ |
|---|
| 2667 | | - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
|---|
| 2668 | | - int ret, i; |
|---|
| 2669 | | - |
|---|
| 2670 | | - if (!vsi) |
|---|
| 2671 | | - return -EINVAL; |
|---|
| 2672 | | - |
|---|
| 2673 | | - ice_vsi_free_q_vectors(vsi); |
|---|
| 2674 | | - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); |
|---|
| 2675 | | - vsi->base_vector = 0; |
|---|
| 2676 | | - ice_vsi_clear_rings(vsi); |
|---|
| 2677 | | - ice_vsi_free_arrays(vsi, false); |
|---|
| 2678 | | - ice_vsi_set_num_qs(vsi); |
|---|
| 2679 | | - |
|---|
| 2680 | | - /* Initialize VSI struct elements and create VSI in FW */ |
|---|
| 2681 | | - ret = ice_vsi_add(vsi); |
|---|
| 2682 | | - if (ret < 0) |
|---|
| 2683 | | - goto err_vsi; |
|---|
| 2684 | | - |
|---|
| 2685 | | - ret = ice_vsi_alloc_arrays(vsi, false); |
|---|
| 2686 | | - if (ret < 0) |
|---|
| 2687 | | - goto err_vsi; |
|---|
| 2688 | | - |
|---|
| 2689 | | - switch (vsi->type) { |
|---|
| 2690 | | - case ICE_VSI_PF: |
|---|
| 2691 | | - if (!vsi->netdev) { |
|---|
| 2692 | | - ret = ice_cfg_netdev(vsi); |
|---|
| 2693 | | - if (ret) |
|---|
| 2694 | | - goto err_rings; |
|---|
| 2695 | | - |
|---|
| 2696 | | - ret = register_netdev(vsi->netdev); |
|---|
| 2697 | | - if (ret) |
|---|
| 2698 | | - goto err_rings; |
|---|
| 2699 | | - |
|---|
| 2700 | | - netif_carrier_off(vsi->netdev); |
|---|
| 2701 | | - netif_tx_stop_all_queues(vsi->netdev); |
|---|
| 2702 | | - } |
|---|
| 2703 | | - |
|---|
| 2704 | | - ret = ice_vsi_alloc_q_vectors(vsi); |
|---|
| 2705 | | - if (ret) |
|---|
| 2706 | | - goto err_rings; |
|---|
| 2707 | | - |
|---|
| 2708 | | - ret = ice_vsi_setup_vector_base(vsi); |
|---|
| 2709 | | - if (ret) |
|---|
| 2710 | | - goto err_vectors; |
|---|
| 2711 | | - |
|---|
| 2712 | | - ret = ice_vsi_alloc_rings(vsi); |
|---|
| 2713 | | - if (ret) |
|---|
| 2714 | | - goto err_vectors; |
|---|
| 2715 | | - |
|---|
| 2716 | | - ice_vsi_map_rings_to_vectors(vsi); |
|---|
| 2717 | | - break; |
|---|
| 2718 | | - default: |
|---|
| 2719 | | - break; |
|---|
| 2720 | | - } |
|---|
| 2721 | | - |
|---|
| 2722 | | - ice_vsi_set_tc_cfg(vsi); |
|---|
| 2723 | | - |
|---|
| 2724 | | - /* configure VSI nodes based on number of queues and TC's */ |
|---|
| 2725 | | - for (i = 0; i < vsi->tc_cfg.numtc; i++) |
|---|
| 2726 | | - max_txqs[i] = vsi->num_txq; |
|---|
| 2727 | | - |
|---|
| 2728 | | - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, |
|---|
| 2729 | | - vsi->tc_cfg.ena_tc, max_txqs); |
|---|
| 2730 | | - if (ret) { |
|---|
| 2731 | | - dev_info(&vsi->back->pdev->dev, |
|---|
| 2732 | | - "Failed VSI lan queue config\n"); |
|---|
| 2733 | | - goto err_vectors; |
|---|
| 2734 | | - } |
|---|
| 2735 | | - return 0; |
|---|
| 2736 | | - |
|---|
| 2737 | | -err_vectors: |
|---|
| 2738 | | - ice_vsi_free_q_vectors(vsi); |
|---|
| 2739 | | -err_rings: |
|---|
| 2740 | | - if (vsi->netdev) { |
|---|
| 2741 | | - vsi->current_netdev_flags = 0; |
|---|
| 2742 | | - unregister_netdev(vsi->netdev); |
|---|
| 2743 | | - free_netdev(vsi->netdev); |
|---|
| 2744 | | - vsi->netdev = NULL; |
|---|
| 2745 | | - } |
|---|
| 2746 | | -err_vsi: |
|---|
| 2747 | | - ice_vsi_clear(vsi); |
|---|
| 2748 | | - set_bit(__ICE_RESET_FAILED, vsi->back->state); |
|---|
| 2749 | | - return ret; |
|---|
| 2750 | | -} |
|---|
| 2751 | | - |
|---|
| 2752 | | -/** |
|---|
| 2753 | | - * ice_vsi_setup - Set up a VSI by a given type |
|---|
| 3081 | + * ice_pf_vsi_setup - Set up a PF VSI |
|---|
| 2754 | 3082 | * @pf: board private structure |
|---|
| 2755 | | - * @type: VSI type |
|---|
| 2756 | 3083 | * @pi: pointer to the port_info instance |
|---|
| 2757 | 3084 | * |
|---|
| 2758 | | - * This allocates the sw VSI structure and its queue resources. |
|---|
| 2759 | | - * |
|---|
| 2760 | | - * Returns pointer to the successfully allocated and configure VSI sw struct on |
|---|
| 2761 | | - * success, otherwise returns NULL on failure. |
|---|
| 3085 | + * Returns pointer to the successfully allocated VSI software struct |
|---|
| 3086 | + * on success, otherwise returns NULL on failure. |
|---|
| 2762 | 3087 | */ |
|---|
| 2763 | 3088 | static struct ice_vsi * |
|---|
| 2764 | | -ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, |
|---|
| 2765 | | - struct ice_port_info *pi) |
|---|
| 3089 | +ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
|---|
| 2766 | 3090 | { |
|---|
| 2767 | | - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
|---|
| 2768 | | - struct device *dev = &pf->pdev->dev; |
|---|
| 2769 | | - struct ice_vsi_ctx ctxt = { 0 }; |
|---|
| 2770 | | - struct ice_vsi *vsi; |
|---|
| 2771 | | - int ret, i; |
|---|
| 2772 | | - |
|---|
| 2773 | | - vsi = ice_vsi_alloc(pf, type); |
|---|
| 2774 | | - if (!vsi) { |
|---|
| 2775 | | - dev_err(dev, "could not allocate VSI\n"); |
|---|
| 2776 | | - return NULL; |
|---|
| 2777 | | - } |
|---|
| 2778 | | - |
|---|
| 2779 | | - vsi->port_info = pi; |
|---|
| 2780 | | - vsi->vsw = pf->first_sw; |
|---|
| 2781 | | - |
|---|
| 2782 | | - if (ice_vsi_get_qs(vsi)) { |
|---|
| 2783 | | - dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", |
|---|
| 2784 | | - vsi->idx); |
|---|
| 2785 | | - goto err_get_qs; |
|---|
| 2786 | | - } |
|---|
| 2787 | | - |
|---|
| 2788 | | - /* set RSS capabilities */ |
|---|
| 2789 | | - ice_vsi_set_rss_params(vsi); |
|---|
| 2790 | | - |
|---|
| 2791 | | - /* create the VSI */ |
|---|
| 2792 | | - ret = ice_vsi_add(vsi); |
|---|
| 2793 | | - if (ret) |
|---|
| 2794 | | - goto err_vsi; |
|---|
| 2795 | | - |
|---|
| 2796 | | - ctxt.vsi_num = vsi->vsi_num; |
|---|
| 2797 | | - |
|---|
| 2798 | | - switch (vsi->type) { |
|---|
| 2799 | | - case ICE_VSI_PF: |
|---|
| 2800 | | - ret = ice_cfg_netdev(vsi); |
|---|
| 2801 | | - if (ret) |
|---|
| 2802 | | - goto err_cfg_netdev; |
|---|
| 2803 | | - |
|---|
| 2804 | | - ret = register_netdev(vsi->netdev); |
|---|
| 2805 | | - if (ret) |
|---|
| 2806 | | - goto err_register_netdev; |
|---|
| 2807 | | - |
|---|
| 2808 | | - netif_carrier_off(vsi->netdev); |
|---|
| 2809 | | - |
|---|
| 2810 | | - /* make sure transmit queues start off as stopped */ |
|---|
| 2811 | | - netif_tx_stop_all_queues(vsi->netdev); |
|---|
| 2812 | | - ret = ice_vsi_alloc_q_vectors(vsi); |
|---|
| 2813 | | - if (ret) |
|---|
| 2814 | | - goto err_msix; |
|---|
| 2815 | | - |
|---|
| 2816 | | - ret = ice_vsi_setup_vector_base(vsi); |
|---|
| 2817 | | - if (ret) |
|---|
| 2818 | | - goto err_rings; |
|---|
| 2819 | | - |
|---|
| 2820 | | - ret = ice_vsi_alloc_rings(vsi); |
|---|
| 2821 | | - if (ret) |
|---|
| 2822 | | - goto err_rings; |
|---|
| 2823 | | - |
|---|
| 2824 | | - ice_vsi_map_rings_to_vectors(vsi); |
|---|
| 2825 | | - |
|---|
| 2826 | | - /* Do not exit if configuring RSS had an issue, at least |
|---|
| 2827 | | - * receive traffic on first queue. Hence no need to capture |
|---|
| 2828 | | - * return value |
|---|
| 2829 | | - */ |
|---|
| 2830 | | - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
|---|
| 2831 | | - ice_vsi_cfg_rss(vsi); |
|---|
| 2832 | | - break; |
|---|
| 2833 | | - default: |
|---|
| 2834 | | - /* if vsi type is not recognized, clean up the resources and |
|---|
| 2835 | | - * exit |
|---|
| 2836 | | - */ |
|---|
| 2837 | | - goto err_rings; |
|---|
| 2838 | | - } |
|---|
| 2839 | | - |
|---|
| 2840 | | - ice_vsi_set_tc_cfg(vsi); |
|---|
| 2841 | | - |
|---|
| 2842 | | - /* configure VSI nodes based on number of queues and TC's */ |
|---|
| 2843 | | - for (i = 0; i < vsi->tc_cfg.numtc; i++) |
|---|
| 2844 | | - max_txqs[i] = vsi->num_txq; |
|---|
| 2845 | | - |
|---|
| 2846 | | - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, |
|---|
| 2847 | | - vsi->tc_cfg.ena_tc, max_txqs); |
|---|
| 2848 | | - if (ret) { |
|---|
| 2849 | | - dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); |
|---|
| 2850 | | - goto err_rings; |
|---|
| 2851 | | - } |
|---|
| 2852 | | - |
|---|
| 2853 | | - return vsi; |
|---|
| 2854 | | - |
|---|
| 2855 | | -err_rings: |
|---|
| 2856 | | - ice_vsi_free_q_vectors(vsi); |
|---|
| 2857 | | -err_msix: |
|---|
| 2858 | | - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) |
|---|
| 2859 | | - unregister_netdev(vsi->netdev); |
|---|
| 2860 | | -err_register_netdev: |
|---|
| 2861 | | - if (vsi->netdev) { |
|---|
| 2862 | | - free_netdev(vsi->netdev); |
|---|
| 2863 | | - vsi->netdev = NULL; |
|---|
| 2864 | | - } |
|---|
| 2865 | | -err_cfg_netdev: |
|---|
| 2866 | | - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); |
|---|
| 2867 | | - if (ret) |
|---|
| 2868 | | - dev_err(&vsi->back->pdev->dev, |
|---|
| 2869 | | - "Free VSI AQ call failed, err %d\n", ret); |
|---|
| 2870 | | -err_vsi: |
|---|
| 2871 | | - ice_vsi_put_qs(vsi); |
|---|
| 2872 | | -err_get_qs: |
|---|
| 2873 | | - pf->q_left_tx += vsi->alloc_txq; |
|---|
| 2874 | | - pf->q_left_rx += vsi->alloc_rxq; |
|---|
| 2875 | | - ice_vsi_clear(vsi); |
|---|
| 2876 | | - |
|---|
| 2877 | | - return NULL; |
|---|
| 3091 | + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); |
|---|
| 2878 | 3092 | } |
|---|
| 2879 | 3093 | |
|---|
| 2880 | 3094 | /** |
|---|
| 2881 | | - * ice_vsi_add_vlan - Add vsi membership for given vlan |
|---|
| 2882 | | - * @vsi: the vsi being configured |
|---|
| 2883 | | - * @vid: vlan id to be added |
|---|
| 3095 | + * ice_ctrl_vsi_setup - Set up a control VSI |
|---|
| 3096 | + * @pf: board private structure |
|---|
| 3097 | + * @pi: pointer to the port_info instance |
|---|
| 3098 | + * |
|---|
| 3099 | + * Returns pointer to the successfully allocated VSI software struct |
|---|
| 3100 | + * on success, otherwise returns NULL on failure. |
|---|
| 2884 | 3101 | */ |
|---|
| 2885 | | -static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) |
|---|
| 3102 | +static struct ice_vsi * |
|---|
| 3103 | +ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
|---|
| 2886 | 3104 | { |
|---|
| 2887 | | - struct ice_fltr_list_entry *tmp; |
|---|
| 2888 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2889 | | - LIST_HEAD(tmp_add_list); |
|---|
| 2890 | | - enum ice_status status; |
|---|
| 2891 | | - int err = 0; |
|---|
| 2892 | | - |
|---|
| 2893 | | - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); |
|---|
| 2894 | | - if (!tmp) |
|---|
| 2895 | | - return -ENOMEM; |
|---|
| 2896 | | - |
|---|
| 2897 | | - tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; |
|---|
| 2898 | | - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; |
|---|
| 2899 | | - tmp->fltr_info.flag = ICE_FLTR_TX; |
|---|
| 2900 | | - tmp->fltr_info.src = vsi->vsi_num; |
|---|
| 2901 | | - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; |
|---|
| 2902 | | - tmp->fltr_info.l_data.vlan.vlan_id = vid; |
|---|
| 2903 | | - |
|---|
| 2904 | | - INIT_LIST_HEAD(&tmp->list_entry); |
|---|
| 2905 | | - list_add(&tmp->list_entry, &tmp_add_list); |
|---|
| 2906 | | - |
|---|
| 2907 | | - status = ice_add_vlan(&pf->hw, &tmp_add_list); |
|---|
| 2908 | | - if (status) { |
|---|
| 2909 | | - err = -ENODEV; |
|---|
| 2910 | | - dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", |
|---|
| 2911 | | - vid, vsi->vsi_num); |
|---|
| 2912 | | - } |
|---|
| 2913 | | - |
|---|
| 2914 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
|---|
| 2915 | | - return err; |
|---|
| 3105 | + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); |
|---|
| 2916 | 3106 | } |
|---|
| 2917 | 3107 | |
|---|
| 2918 | 3108 | /** |
|---|
| 2919 | | - * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload |
|---|
| 3109 | + * ice_lb_vsi_setup - Set up a loopback VSI |
|---|
| 3110 | + * @pf: board private structure |
|---|
| 3111 | + * @pi: pointer to the port_info instance |
|---|
| 3112 | + * |
|---|
| 3113 | + * Returns pointer to the successfully allocated VSI software struct |
|---|
| 3114 | + * on success, otherwise returns NULL on failure. |
|---|
| 3115 | + */ |
|---|
| 3116 | +struct ice_vsi * |
|---|
| 3117 | +ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
|---|
| 3118 | +{ |
|---|
| 3119 | + return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); |
|---|
| 3120 | +} |
|---|
| 3121 | + |
|---|
| 3122 | +/** |
|---|
| 3123 | + * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload |
|---|
| 2920 | 3124 | * @netdev: network interface to be adjusted |
|---|
| 2921 | 3125 | * @proto: unused protocol |
|---|
| 2922 | | - * @vid: vlan id to be added |
|---|
| 3126 | + * @vid: VLAN ID to be added |
|---|
| 2923 | 3127 | * |
|---|
| 2924 | | - * net_device_ops implementation for adding vlan ids |
|---|
| 3128 | + * net_device_ops implementation for adding VLAN IDs |
|---|
| 2925 | 3129 | */ |
|---|
| 2926 | | -static int ice_vlan_rx_add_vid(struct net_device *netdev, |
|---|
| 2927 | | - __always_unused __be16 proto, u16 vid) |
|---|
| 3130 | +static int |
|---|
| 3131 | +ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, |
|---|
| 3132 | + u16 vid) |
|---|
| 2928 | 3133 | { |
|---|
| 2929 | 3134 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 2930 | 3135 | struct ice_vsi *vsi = np->vsi; |
|---|
| 2931 | | - int ret = 0; |
|---|
| 3136 | + int ret; |
|---|
| 2932 | 3137 | |
|---|
| 2933 | 3138 | if (vid >= VLAN_N_VID) { |
|---|
| 2934 | 3139 | netdev_err(netdev, "VLAN id requested %d is out of range %d\n", |
|---|
| .. | .. |
|---|
| 2939 | 3144 | if (vsi->info.pvid) |
|---|
| 2940 | 3145 | return -EINVAL; |
|---|
| 2941 | 3146 | |
|---|
| 2942 | | - /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is |
|---|
| 2943 | | - * needed to continue allowing all untagged packets since VLAN prune |
|---|
| 2944 | | - * list is applied to all packets by the switch |
|---|
| 2945 | | - */ |
|---|
| 2946 | | - ret = ice_vsi_add_vlan(vsi, vid); |
|---|
| 3147 | + /* VLAN 0 is added by default during load/reset */ |
|---|
| 3148 | + if (!vid) |
|---|
| 3149 | + return 0; |
|---|
| 2947 | 3150 | |
|---|
| 2948 | | - if (!ret) |
|---|
| 2949 | | - set_bit(vid, vsi->active_vlans); |
|---|
| 3151 | + /* Enable VLAN pruning when a VLAN other than 0 is added */ |
|---|
| 3152 | + if (!ice_vsi_is_vlan_pruning_ena(vsi)) { |
|---|
| 3153 | + ret = ice_cfg_vlan_pruning(vsi, true, false); |
|---|
| 3154 | + if (ret) |
|---|
| 3155 | + return ret; |
|---|
| 3156 | + } |
|---|
| 3157 | + |
|---|
| 3158 | + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged |
|---|
| 3159 | + * packets aren't pruned by the device's internal switch on Rx |
|---|
| 3160 | + */ |
|---|
| 3161 | + ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); |
|---|
| 3162 | + if (!ret) { |
|---|
| 3163 | + vsi->vlan_ena = true; |
|---|
| 3164 | + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); |
|---|
| 3165 | + } |
|---|
| 2950 | 3166 | |
|---|
| 2951 | 3167 | return ret; |
|---|
| 2952 | 3168 | } |
|---|
| 2953 | 3169 | |
|---|
| 2954 | 3170 | /** |
|---|
| 2955 | | - * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN |
|---|
| 2956 | | - * @vsi: the VSI being configured |
|---|
| 2957 | | - * @vid: VLAN id to be removed |
|---|
| 2958 | | - */ |
|---|
| 2959 | | -static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) |
|---|
| 2960 | | -{ |
|---|
| 2961 | | - struct ice_fltr_list_entry *list; |
|---|
| 2962 | | - struct ice_pf *pf = vsi->back; |
|---|
| 2963 | | - LIST_HEAD(tmp_add_list); |
|---|
| 2964 | | - |
|---|
| 2965 | | - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); |
|---|
| 2966 | | - if (!list) |
|---|
| 2967 | | - return; |
|---|
| 2968 | | - |
|---|
| 2969 | | - list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; |
|---|
| 2970 | | - list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; |
|---|
| 2971 | | - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; |
|---|
| 2972 | | - list->fltr_info.l_data.vlan.vlan_id = vid; |
|---|
| 2973 | | - list->fltr_info.flag = ICE_FLTR_TX; |
|---|
| 2974 | | - list->fltr_info.src = vsi->vsi_num; |
|---|
| 2975 | | - |
|---|
| 2976 | | - INIT_LIST_HEAD(&list->list_entry); |
|---|
| 2977 | | - list_add(&list->list_entry, &tmp_add_list); |
|---|
| 2978 | | - |
|---|
| 2979 | | - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) |
|---|
| 2980 | | - dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", |
|---|
| 2981 | | - vid, vsi->vsi_num); |
|---|
| 2982 | | - |
|---|
| 2983 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
|---|
| 2984 | | -} |
|---|
| 2985 | | - |
|---|
| 2986 | | -/** |
|---|
| 2987 | | - * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload |
|---|
| 3171 | + * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload |
|---|
| 2988 | 3172 | * @netdev: network interface to be adjusted |
|---|
| 2989 | 3173 | * @proto: unused protocol |
|---|
| 2990 | | - * @vid: vlan id to be removed |
|---|
| 3174 | + * @vid: VLAN ID to be removed |
|---|
| 2991 | 3175 | * |
|---|
| 2992 | | - * net_device_ops implementation for removing vlan ids |
|---|
| 3176 | + * net_device_ops implementation for removing VLAN IDs |
|---|
| 2993 | 3177 | */ |
|---|
| 2994 | | -static int ice_vlan_rx_kill_vid(struct net_device *netdev, |
|---|
| 2995 | | - __always_unused __be16 proto, u16 vid) |
|---|
| 3178 | +static int |
|---|
| 3179 | +ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, |
|---|
| 3180 | + u16 vid) |
|---|
| 2996 | 3181 | { |
|---|
| 2997 | 3182 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 2998 | 3183 | struct ice_vsi *vsi = np->vsi; |
|---|
| 3184 | + int ret; |
|---|
| 2999 | 3185 | |
|---|
| 3000 | 3186 | if (vsi->info.pvid) |
|---|
| 3001 | 3187 | return -EINVAL; |
|---|
| 3002 | 3188 | |
|---|
| 3003 | | - /* return code is ignored as there is nothing a user |
|---|
| 3004 | | - * can do about failure to remove and a log message was |
|---|
| 3005 | | - * already printed from the other function |
|---|
| 3189 | + /* don't allow removal of VLAN 0 */ |
|---|
| 3190 | + if (!vid) |
|---|
| 3191 | + return 0; |
|---|
| 3192 | + |
|---|
| 3193 | + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN |
|---|
| 3194 | + * information |
|---|
| 3006 | 3195 | */ |
|---|
| 3007 | | - ice_vsi_kill_vlan(vsi, vid); |
|---|
| 3196 | + ret = ice_vsi_kill_vlan(vsi, vid); |
|---|
| 3197 | + if (ret) |
|---|
| 3198 | + return ret; |
|---|
| 3008 | 3199 | |
|---|
| 3009 | | - clear_bit(vid, vsi->active_vlans); |
|---|
| 3200 | + /* Disable pruning when VLAN 0 is the only VLAN rule */ |
|---|
| 3201 | + if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) |
|---|
| 3202 | + ret = ice_cfg_vlan_pruning(vsi, false, false); |
|---|
| 3010 | 3203 | |
|---|
| 3011 | | - return 0; |
|---|
| 3204 | + vsi->vlan_ena = false; |
|---|
| 3205 | + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); |
|---|
| 3206 | + return ret; |
|---|
| 3012 | 3207 | } |
|---|
| 3013 | 3208 | |
|---|
| 3014 | 3209 | /** |
|---|
| .. | .. |
|---|
| 3019 | 3214 | */ |
|---|
| 3020 | 3215 | static int ice_setup_pf_sw(struct ice_pf *pf) |
|---|
| 3021 | 3216 | { |
|---|
| 3022 | | - LIST_HEAD(tmp_add_list); |
|---|
| 3023 | | - u8 broadcast[ETH_ALEN]; |
|---|
| 3024 | 3217 | struct ice_vsi *vsi; |
|---|
| 3025 | 3218 | int status = 0; |
|---|
| 3026 | 3219 | |
|---|
| 3027 | | - if (!ice_is_reset_recovery_pending(pf->state)) { |
|---|
| 3028 | | - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); |
|---|
| 3029 | | - if (!vsi) { |
|---|
| 3030 | | - status = -ENOMEM; |
|---|
| 3031 | | - goto error_exit; |
|---|
| 3032 | | - } |
|---|
| 3033 | | - } else { |
|---|
| 3034 | | - vsi = pf->vsi[0]; |
|---|
| 3035 | | - status = ice_vsi_reinit_setup(vsi); |
|---|
| 3036 | | - if (status < 0) |
|---|
| 3037 | | - return -EIO; |
|---|
| 3038 | | - } |
|---|
| 3220 | + if (ice_is_reset_in_progress(pf->state)) |
|---|
| 3221 | + return -EBUSY; |
|---|
| 3039 | 3222 | |
|---|
| 3040 | | - /* tmp_add_list contains a list of MAC addresses for which MAC |
|---|
| 3041 | | - * filters need to be programmed. Add the VSI's unicast MAC to |
|---|
| 3042 | | - * this list |
|---|
| 3043 | | - */ |
|---|
| 3044 | | - status = ice_add_mac_to_list(vsi, &tmp_add_list, |
|---|
| 3045 | | - vsi->port_info->mac.perm_addr); |
|---|
| 3046 | | - if (status) |
|---|
| 3047 | | - goto error_exit; |
|---|
| 3223 | + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); |
|---|
| 3224 | + if (!vsi) |
|---|
| 3225 | + return -ENOMEM; |
|---|
| 3048 | 3226 | |
|---|
| 3049 | | - /* VSI needs to receive broadcast traffic, so add the broadcast |
|---|
| 3050 | | - * MAC address to the list. |
|---|
| 3051 | | - */ |
|---|
| 3052 | | - eth_broadcast_addr(broadcast); |
|---|
| 3053 | | - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); |
|---|
| 3054 | | - if (status) |
|---|
| 3055 | | - goto error_exit; |
|---|
| 3056 | | - |
|---|
| 3057 | | - /* program MAC filters for entries in tmp_add_list */ |
|---|
| 3058 | | - status = ice_add_mac(&pf->hw, &tmp_add_list); |
|---|
| 3227 | + status = ice_cfg_netdev(vsi); |
|---|
| 3059 | 3228 | if (status) { |
|---|
| 3060 | | - dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); |
|---|
| 3061 | | - status = -ENOMEM; |
|---|
| 3062 | | - goto error_exit; |
|---|
| 3229 | + status = -ENODEV; |
|---|
| 3230 | + goto unroll_vsi_setup; |
|---|
| 3063 | 3231 | } |
|---|
| 3232 | + /* netdev has to be configured before setting frame size */ |
|---|
| 3233 | + ice_vsi_cfg_frame_size(vsi); |
|---|
| 3064 | 3234 | |
|---|
| 3065 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
|---|
| 3235 | + /* Setup DCB netlink interface */ |
|---|
| 3236 | + ice_dcbnl_setup(vsi); |
|---|
| 3237 | + |
|---|
| 3238 | + /* registering the NAPI handler requires both the queues and |
|---|
| 3239 | + * netdev to be created, which are done in ice_pf_vsi_setup() |
|---|
| 3240 | + * and ice_cfg_netdev() respectively |
|---|
| 3241 | + */ |
|---|
| 3242 | + ice_napi_add(vsi); |
|---|
| 3243 | + |
|---|
| 3244 | + status = ice_set_cpu_rx_rmap(vsi); |
|---|
| 3245 | + if (status) { |
|---|
| 3246 | + dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", |
|---|
| 3247 | + vsi->vsi_num, status); |
|---|
| 3248 | + status = -EINVAL; |
|---|
| 3249 | + goto unroll_napi_add; |
|---|
| 3250 | + } |
|---|
| 3251 | + status = ice_init_mac_fltr(pf); |
|---|
| 3252 | + if (status) |
|---|
| 3253 | + goto free_cpu_rx_map; |
|---|
| 3254 | + |
|---|
| 3066 | 3255 | return status; |
|---|
| 3067 | 3256 | |
|---|
| 3068 | | -error_exit: |
|---|
| 3069 | | - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); |
|---|
| 3257 | +free_cpu_rx_map: |
|---|
| 3258 | + ice_free_cpu_rx_rmap(vsi); |
|---|
| 3070 | 3259 | |
|---|
| 3260 | +unroll_napi_add: |
|---|
| 3071 | 3261 | if (vsi) { |
|---|
| 3072 | | - ice_vsi_free_q_vectors(vsi); |
|---|
| 3073 | | - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) |
|---|
| 3074 | | - unregister_netdev(vsi->netdev); |
|---|
| 3262 | + ice_napi_del(vsi); |
|---|
| 3075 | 3263 | if (vsi->netdev) { |
|---|
| 3264 | + if (vsi->netdev->reg_state == NETREG_REGISTERED) |
|---|
| 3265 | + unregister_netdev(vsi->netdev); |
|---|
| 3076 | 3266 | free_netdev(vsi->netdev); |
|---|
| 3077 | 3267 | vsi->netdev = NULL; |
|---|
| 3078 | 3268 | } |
|---|
| 3079 | | - |
|---|
| 3080 | | - ice_vsi_delete(vsi); |
|---|
| 3081 | | - ice_vsi_put_qs(vsi); |
|---|
| 3082 | | - pf->q_left_tx += vsi->alloc_txq; |
|---|
| 3083 | | - pf->q_left_rx += vsi->alloc_rxq; |
|---|
| 3084 | | - ice_vsi_clear(vsi); |
|---|
| 3085 | 3269 | } |
|---|
| 3270 | + |
|---|
| 3271 | +unroll_vsi_setup: |
|---|
| 3272 | + ice_vsi_release(vsi); |
|---|
| 3086 | 3273 | return status; |
|---|
| 3087 | 3274 | } |
|---|
| 3088 | 3275 | |
|---|
| 3089 | 3276 | /** |
|---|
| 3090 | | - * ice_determine_q_usage - Calculate queue distribution |
|---|
| 3091 | | - * @pf: board private structure |
|---|
| 3092 | | - * |
|---|
| 3093 | | - * Return -ENOMEM if we don't get enough queues for all ports |
|---|
| 3277 | + * ice_get_avail_q_count - Get count of queues in use |
|---|
| 3278 | + * @pf_qmap: bitmap to get queue use count from |
|---|
| 3279 | + * @lock: pointer to a mutex that protects access to pf_qmap |
|---|
| 3280 | + * @size: size of the bitmap |
|---|
| 3094 | 3281 | */ |
|---|
| 3095 | | -static void ice_determine_q_usage(struct ice_pf *pf) |
|---|
| 3282 | +static u16 |
|---|
| 3283 | +ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) |
|---|
| 3096 | 3284 | { |
|---|
| 3097 | | - u16 q_left_tx, q_left_rx; |
|---|
| 3285 | + unsigned long bit; |
|---|
| 3286 | + u16 count = 0; |
|---|
| 3098 | 3287 | |
|---|
| 3099 | | - q_left_tx = pf->hw.func_caps.common_cap.num_txq; |
|---|
| 3100 | | - q_left_rx = pf->hw.func_caps.common_cap.num_rxq; |
|---|
| 3288 | + mutex_lock(lock); |
|---|
| 3289 | + for_each_clear_bit(bit, pf_qmap, size) |
|---|
| 3290 | + count++; |
|---|
| 3291 | + mutex_unlock(lock); |
|---|
| 3101 | 3292 | |
|---|
| 3102 | | - pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); |
|---|
| 3293 | + return count; |
|---|
| 3294 | +} |
|---|
| 3103 | 3295 | |
|---|
| 3104 | | - /* only 1 rx queue unless RSS is enabled */ |
|---|
| 3105 | | - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
|---|
| 3106 | | - pf->num_lan_rx = 1; |
|---|
| 3107 | | - else |
|---|
| 3108 | | - pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); |
|---|
| 3296 | +/** |
|---|
| 3297 | + * ice_get_avail_txq_count - Get count of Tx queues in use |
|---|
| 3298 | + * @pf: pointer to an ice_pf instance |
|---|
| 3299 | + */ |
|---|
| 3300 | +u16 ice_get_avail_txq_count(struct ice_pf *pf) |
|---|
| 3301 | +{ |
|---|
| 3302 | + return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, |
|---|
| 3303 | + pf->max_pf_txqs); |
|---|
| 3304 | +} |
|---|
| 3109 | 3305 | |
|---|
| 3110 | | - pf->q_left_tx = q_left_tx - pf->num_lan_tx; |
|---|
| 3111 | | - pf->q_left_rx = q_left_rx - pf->num_lan_rx; |
|---|
| 3306 | +/** |
|---|
| 3307 | + * ice_get_avail_rxq_count - Get count of Rx queues in use |
|---|
| 3308 | + * @pf: pointer to an ice_pf instance |
|---|
| 3309 | + */ |
|---|
| 3310 | +u16 ice_get_avail_rxq_count(struct ice_pf *pf) |
|---|
| 3311 | +{ |
|---|
| 3312 | + return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, |
|---|
| 3313 | + pf->max_pf_rxqs); |
|---|
| 3112 | 3314 | } |
|---|
| 3113 | 3315 | |
|---|
| 3114 | 3316 | /** |
|---|
| .. | .. |
|---|
| 3117 | 3319 | */ |
|---|
| 3118 | 3320 | static void ice_deinit_pf(struct ice_pf *pf) |
|---|
| 3119 | 3321 | { |
|---|
| 3120 | | - if (pf->serv_tmr.function) |
|---|
| 3121 | | - del_timer_sync(&pf->serv_tmr); |
|---|
| 3122 | | - if (pf->serv_task.func) |
|---|
| 3123 | | - cancel_work_sync(&pf->serv_task); |
|---|
| 3322 | + ice_service_task_stop(pf); |
|---|
| 3124 | 3323 | mutex_destroy(&pf->sw_mutex); |
|---|
| 3324 | + mutex_destroy(&pf->tc_mutex); |
|---|
| 3125 | 3325 | mutex_destroy(&pf->avail_q_mutex); |
|---|
| 3326 | + |
|---|
| 3327 | + if (pf->avail_txqs) { |
|---|
| 3328 | + bitmap_free(pf->avail_txqs); |
|---|
| 3329 | + pf->avail_txqs = NULL; |
|---|
| 3330 | + } |
|---|
| 3331 | + |
|---|
| 3332 | + if (pf->avail_rxqs) { |
|---|
| 3333 | + bitmap_free(pf->avail_rxqs); |
|---|
| 3334 | + pf->avail_rxqs = NULL; |
|---|
| 3335 | + } |
|---|
| 3336 | +} |
|---|
| 3337 | + |
|---|
| 3338 | +/** |
|---|
| 3339 | + * ice_set_pf_caps - set PFs capability flags |
|---|
| 3340 | + * @pf: pointer to the PF instance |
|---|
| 3341 | + */ |
|---|
| 3342 | +static void ice_set_pf_caps(struct ice_pf *pf) |
|---|
| 3343 | +{ |
|---|
| 3344 | + struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; |
|---|
| 3345 | + |
|---|
| 3346 | + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
|---|
| 3347 | + if (func_caps->common_cap.dcb) |
|---|
| 3348 | + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
|---|
| 3349 | + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
|---|
| 3350 | + if (func_caps->common_cap.sr_iov_1_1) { |
|---|
| 3351 | + set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
|---|
| 3352 | + pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, |
|---|
| 3353 | + ICE_MAX_VF_COUNT); |
|---|
| 3354 | + } |
|---|
| 3355 | + clear_bit(ICE_FLAG_RSS_ENA, pf->flags); |
|---|
| 3356 | + if (func_caps->common_cap.rss_table_size) |
|---|
| 3357 | + set_bit(ICE_FLAG_RSS_ENA, pf->flags); |
|---|
| 3358 | + |
|---|
| 3359 | + clear_bit(ICE_FLAG_FD_ENA, pf->flags); |
|---|
| 3360 | + if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { |
|---|
| 3361 | + u16 unused; |
|---|
| 3362 | + |
|---|
| 3363 | + /* ctrl_vsi_idx will be set to a valid value when flow director |
|---|
| 3364 | + * is setup by ice_init_fdir |
|---|
| 3365 | + */ |
|---|
| 3366 | + pf->ctrl_vsi_idx = ICE_NO_VSI; |
|---|
| 3367 | + set_bit(ICE_FLAG_FD_ENA, pf->flags); |
|---|
| 3368 | + /* force guaranteed filter pool for PF */ |
|---|
| 3369 | + ice_alloc_fd_guar_item(&pf->hw, &unused, |
|---|
| 3370 | + func_caps->fd_fltr_guar); |
|---|
| 3371 | + /* force shared filter pool for PF */ |
|---|
| 3372 | + ice_alloc_fd_shrd_item(&pf->hw, &unused, |
|---|
| 3373 | + func_caps->fd_fltr_best_effort); |
|---|
| 3374 | + } |
|---|
| 3375 | + |
|---|
| 3376 | + pf->max_pf_txqs = func_caps->common_cap.num_txq; |
|---|
| 3377 | + pf->max_pf_rxqs = func_caps->common_cap.num_rxq; |
|---|
| 3126 | 3378 | } |
|---|
| 3127 | 3379 | |
|---|
| 3128 | 3380 | /** |
|---|
| 3129 | 3381 | * ice_init_pf - Initialize general software structures (struct ice_pf) |
|---|
| 3130 | 3382 | * @pf: board private structure to initialize |
|---|
| 3131 | 3383 | */ |
|---|
| 3132 | | -static void ice_init_pf(struct ice_pf *pf) |
|---|
| 3384 | +static int ice_init_pf(struct ice_pf *pf) |
|---|
| 3133 | 3385 | { |
|---|
| 3134 | | - bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); |
|---|
| 3135 | | - set_bit(ICE_FLAG_MSIX_ENA, pf->flags); |
|---|
| 3386 | + ice_set_pf_caps(pf); |
|---|
| 3136 | 3387 | |
|---|
| 3137 | 3388 | mutex_init(&pf->sw_mutex); |
|---|
| 3138 | | - mutex_init(&pf->avail_q_mutex); |
|---|
| 3389 | + mutex_init(&pf->tc_mutex); |
|---|
| 3139 | 3390 | |
|---|
| 3140 | | - /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ |
|---|
| 3141 | | - mutex_lock(&pf->avail_q_mutex); |
|---|
| 3142 | | - bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); |
|---|
| 3143 | | - bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); |
|---|
| 3144 | | - mutex_unlock(&pf->avail_q_mutex); |
|---|
| 3145 | | - |
|---|
| 3146 | | - if (pf->hw.func_caps.common_cap.rss_table_size) |
|---|
| 3147 | | - set_bit(ICE_FLAG_RSS_ENA, pf->flags); |
|---|
| 3391 | + INIT_HLIST_HEAD(&pf->aq_wait_list); |
|---|
| 3392 | + spin_lock_init(&pf->aq_wait_lock); |
|---|
| 3393 | + init_waitqueue_head(&pf->aq_wait_queue); |
|---|
| 3148 | 3394 | |
|---|
| 3149 | 3395 | /* setup service timer and periodic service task */ |
|---|
| 3150 | 3396 | timer_setup(&pf->serv_tmr, ice_service_timer, 0); |
|---|
| 3151 | 3397 | pf->serv_tmr_period = HZ; |
|---|
| 3152 | 3398 | INIT_WORK(&pf->serv_task, ice_service_task); |
|---|
| 3153 | 3399 | clear_bit(__ICE_SERVICE_SCHED, pf->state); |
|---|
| 3400 | + |
|---|
| 3401 | + mutex_init(&pf->avail_q_mutex); |
|---|
| 3402 | + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); |
|---|
| 3403 | + if (!pf->avail_txqs) |
|---|
| 3404 | + return -ENOMEM; |
|---|
| 3405 | + |
|---|
| 3406 | + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); |
|---|
| 3407 | + if (!pf->avail_rxqs) { |
|---|
| 3408 | + bitmap_free(pf->avail_txqs); |
|---|
| 3409 | + pf->avail_txqs = NULL; |
|---|
| 3410 | + return -ENOMEM; |
|---|
| 3411 | + } |
|---|
| 3412 | + |
|---|
| 3413 | + return 0; |
|---|
| 3154 | 3414 | } |
|---|
| 3155 | 3415 | |
|---|
| 3156 | 3416 | /** |
|---|
| .. | .. |
|---|
| 3162 | 3422 | */ |
|---|
| 3163 | 3423 | static int ice_ena_msix_range(struct ice_pf *pf) |
|---|
| 3164 | 3424 | { |
|---|
| 3425 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 3165 | 3426 | int v_left, v_actual, v_budget = 0; |
|---|
| 3166 | 3427 | int needed, err, i; |
|---|
| 3167 | 3428 | |
|---|
| .. | .. |
|---|
| 3169 | 3430 | |
|---|
| 3170 | 3431 | /* reserve one vector for miscellaneous handler */ |
|---|
| 3171 | 3432 | needed = 1; |
|---|
| 3433 | + if (v_left < needed) |
|---|
| 3434 | + goto no_hw_vecs_left_err; |
|---|
| 3172 | 3435 | v_budget += needed; |
|---|
| 3173 | 3436 | v_left -= needed; |
|---|
| 3174 | 3437 | |
|---|
| 3175 | 3438 | /* reserve vectors for LAN traffic */ |
|---|
| 3176 | | - pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); |
|---|
| 3177 | | - v_budget += pf->num_lan_msix; |
|---|
| 3439 | + needed = min_t(int, num_online_cpus(), v_left); |
|---|
| 3440 | + if (v_left < needed) |
|---|
| 3441 | + goto no_hw_vecs_left_err; |
|---|
| 3442 | + pf->num_lan_msix = needed; |
|---|
| 3443 | + v_budget += needed; |
|---|
| 3444 | + v_left -= needed; |
|---|
| 3178 | 3445 | |
|---|
| 3179 | | - pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, |
|---|
| 3180 | | - sizeof(struct msix_entry), GFP_KERNEL); |
|---|
| 3446 | + /* reserve one vector for flow director */ |
|---|
| 3447 | + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
|---|
| 3448 | + needed = ICE_FDIR_MSIX; |
|---|
| 3449 | + if (v_left < needed) |
|---|
| 3450 | + goto no_hw_vecs_left_err; |
|---|
| 3451 | + v_budget += needed; |
|---|
| 3452 | + v_left -= needed; |
|---|
| 3453 | + } |
|---|
| 3454 | + |
|---|
| 3455 | + pf->msix_entries = devm_kcalloc(dev, v_budget, |
|---|
| 3456 | + sizeof(*pf->msix_entries), GFP_KERNEL); |
|---|
| 3181 | 3457 | |
|---|
| 3182 | 3458 | if (!pf->msix_entries) { |
|---|
| 3183 | 3459 | err = -ENOMEM; |
|---|
| .. | .. |
|---|
| 3192 | 3468 | ICE_MIN_MSIX, v_budget); |
|---|
| 3193 | 3469 | |
|---|
| 3194 | 3470 | if (v_actual < 0) { |
|---|
| 3195 | | - dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); |
|---|
| 3471 | + dev_err(dev, "unable to reserve MSI-X vectors\n"); |
|---|
| 3196 | 3472 | err = v_actual; |
|---|
| 3197 | 3473 | goto msix_err; |
|---|
| 3198 | 3474 | } |
|---|
| 3199 | 3475 | |
|---|
| 3200 | 3476 | if (v_actual < v_budget) { |
|---|
| 3201 | | - dev_warn(&pf->pdev->dev, |
|---|
| 3202 | | - "not enough vectors. requested = %d, obtained = %d\n", |
|---|
| 3477 | + dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", |
|---|
| 3203 | 3478 | v_budget, v_actual); |
|---|
| 3204 | | - if (v_actual >= (pf->num_lan_msix + 1)) { |
|---|
| 3205 | | - pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1); |
|---|
| 3206 | | - } else if (v_actual >= 2) { |
|---|
| 3207 | | - pf->num_lan_msix = 1; |
|---|
| 3208 | | - pf->num_avail_msix = v_actual - 2; |
|---|
| 3209 | | - } else { |
|---|
| 3479 | + |
|---|
| 3480 | + if (v_actual < ICE_MIN_MSIX) { |
|---|
| 3481 | + /* error if we can't get minimum vectors */ |
|---|
| 3210 | 3482 | pci_disable_msix(pf->pdev); |
|---|
| 3211 | 3483 | err = -ERANGE; |
|---|
| 3212 | 3484 | goto msix_err; |
|---|
| 3485 | + } else { |
|---|
| 3486 | + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; |
|---|
| 3213 | 3487 | } |
|---|
| 3214 | 3488 | } |
|---|
| 3215 | 3489 | |
|---|
| 3216 | 3490 | return v_actual; |
|---|
| 3217 | 3491 | |
|---|
| 3218 | 3492 | msix_err: |
|---|
| 3219 | | - devm_kfree(&pf->pdev->dev, pf->msix_entries); |
|---|
| 3493 | + devm_kfree(dev, pf->msix_entries); |
|---|
| 3220 | 3494 | goto exit_err; |
|---|
| 3221 | 3495 | |
|---|
| 3496 | +no_hw_vecs_left_err: |
|---|
| 3497 | + dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", |
|---|
| 3498 | + needed, v_left); |
|---|
| 3499 | + err = -ERANGE; |
|---|
| 3222 | 3500 | exit_err: |
|---|
| 3223 | 3501 | pf->num_lan_msix = 0; |
|---|
| 3224 | | - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); |
|---|
| 3225 | 3502 | return err; |
|---|
| 3226 | 3503 | } |
|---|
| 3227 | 3504 | |
|---|
| .. | .. |
|---|
| 3232 | 3509 | static void ice_dis_msix(struct ice_pf *pf) |
|---|
| 3233 | 3510 | { |
|---|
| 3234 | 3511 | pci_disable_msix(pf->pdev); |
|---|
| 3235 | | - devm_kfree(&pf->pdev->dev, pf->msix_entries); |
|---|
| 3512 | + devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); |
|---|
| 3236 | 3513 | pf->msix_entries = NULL; |
|---|
| 3237 | | - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); |
|---|
| 3238 | | -} |
|---|
| 3239 | | - |
|---|
| 3240 | | -/** |
|---|
| 3241 | | - * ice_init_interrupt_scheme - Determine proper interrupt scheme |
|---|
| 3242 | | - * @pf: board private structure to initialize |
|---|
| 3243 | | - */ |
|---|
| 3244 | | -static int ice_init_interrupt_scheme(struct ice_pf *pf) |
|---|
| 3245 | | -{ |
|---|
| 3246 | | - int vectors = 0; |
|---|
| 3247 | | - ssize_t size; |
|---|
| 3248 | | - |
|---|
| 3249 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
|---|
| 3250 | | - vectors = ice_ena_msix_range(pf); |
|---|
| 3251 | | - else |
|---|
| 3252 | | - return -ENODEV; |
|---|
| 3253 | | - |
|---|
| 3254 | | - if (vectors < 0) |
|---|
| 3255 | | - return vectors; |
|---|
| 3256 | | - |
|---|
| 3257 | | - /* set up vector assignment tracking */ |
|---|
| 3258 | | - size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); |
|---|
| 3259 | | - |
|---|
| 3260 | | - pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); |
|---|
| 3261 | | - if (!pf->irq_tracker) { |
|---|
| 3262 | | - ice_dis_msix(pf); |
|---|
| 3263 | | - return -ENOMEM; |
|---|
| 3264 | | - } |
|---|
| 3265 | | - |
|---|
| 3266 | | - pf->irq_tracker->num_entries = vectors; |
|---|
| 3267 | | - |
|---|
| 3268 | | - return 0; |
|---|
| 3269 | 3514 | } |
|---|
| 3270 | 3515 | |
|---|
| 3271 | 3516 | /** |
|---|
| .. | .. |
|---|
| 3274 | 3519 | */ |
|---|
| 3275 | 3520 | static void ice_clear_interrupt_scheme(struct ice_pf *pf) |
|---|
| 3276 | 3521 | { |
|---|
| 3277 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
|---|
| 3278 | | - ice_dis_msix(pf); |
|---|
| 3522 | + ice_dis_msix(pf); |
|---|
| 3279 | 3523 | |
|---|
| 3280 | 3524 | if (pf->irq_tracker) { |
|---|
| 3281 | | - devm_kfree(&pf->pdev->dev, pf->irq_tracker); |
|---|
| 3525 | + devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); |
|---|
| 3282 | 3526 | pf->irq_tracker = NULL; |
|---|
| 3283 | 3527 | } |
|---|
| 3528 | +} |
|---|
| 3529 | + |
|---|
| 3530 | +/** |
|---|
| 3531 | + * ice_init_interrupt_scheme - Determine proper interrupt scheme |
|---|
| 3532 | + * @pf: board private structure to initialize |
|---|
| 3533 | + */ |
|---|
| 3534 | +static int ice_init_interrupt_scheme(struct ice_pf *pf) |
|---|
| 3535 | +{ |
|---|
| 3536 | + int vectors; |
|---|
| 3537 | + |
|---|
| 3538 | + vectors = ice_ena_msix_range(pf); |
|---|
| 3539 | + |
|---|
| 3540 | + if (vectors < 0) |
|---|
| 3541 | + return vectors; |
|---|
| 3542 | + |
|---|
| 3543 | + /* set up vector assignment tracking */ |
|---|
| 3544 | + pf->irq_tracker = |
|---|
| 3545 | + devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) + |
|---|
| 3546 | + (sizeof(u16) * vectors), GFP_KERNEL); |
|---|
| 3547 | + if (!pf->irq_tracker) { |
|---|
| 3548 | + ice_dis_msix(pf); |
|---|
| 3549 | + return -ENOMEM; |
|---|
| 3550 | + } |
|---|
| 3551 | + |
|---|
| 3552 | + /* populate SW interrupts pool with number of OS granted IRQs. */ |
|---|
| 3553 | + pf->num_avail_sw_msix = (u16)vectors; |
|---|
| 3554 | + pf->irq_tracker->num_entries = (u16)vectors; |
|---|
| 3555 | + pf->irq_tracker->end = pf->irq_tracker->num_entries; |
|---|
| 3556 | + |
|---|
| 3557 | + return 0; |
|---|
| 3558 | +} |
|---|
| 3559 | + |
|---|
| 3560 | +/** |
|---|
| 3561 | + * ice_is_wol_supported - check if WoL is supported |
|---|
| 3562 | + * @hw: pointer to hardware info |
|---|
| 3563 | + * |
|---|
| 3564 | + * Check if WoL is supported based on the HW configuration. |
|---|
| 3565 | + * Returns true if NVM supports and enables WoL for this port, false otherwise |
|---|
| 3566 | + */ |
|---|
| 3567 | +bool ice_is_wol_supported(struct ice_hw *hw) |
|---|
| 3568 | +{ |
|---|
| 3569 | + u16 wol_ctrl; |
|---|
| 3570 | + |
|---|
| 3571 | + /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control |
|---|
| 3572 | + * word) indicates WoL is not supported on the corresponding PF ID. |
|---|
| 3573 | + */ |
|---|
| 3574 | + if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) |
|---|
| 3575 | + return false; |
|---|
| 3576 | + |
|---|
| 3577 | + return !(BIT(hw->port_info->lport) & wol_ctrl); |
|---|
| 3578 | +} |
|---|
| 3579 | + |
|---|
| 3580 | +/** |
|---|
| 3581 | + * ice_vsi_recfg_qs - Change the number of queues on a VSI |
|---|
| 3582 | + * @vsi: VSI being changed |
|---|
| 3583 | + * @new_rx: new number of Rx queues |
|---|
| 3584 | + * @new_tx: new number of Tx queues |
|---|
| 3585 | + * |
|---|
| 3586 | + * Only change the number of queues if new_tx, or new_rx is non-0. |
|---|
| 3587 | + * |
|---|
| 3588 | + * Returns 0 on success. |
|---|
| 3589 | + */ |
|---|
| 3590 | +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) |
|---|
| 3591 | +{ |
|---|
| 3592 | + struct ice_pf *pf = vsi->back; |
|---|
| 3593 | + int err = 0, timeout = 50; |
|---|
| 3594 | + |
|---|
| 3595 | + if (!new_rx && !new_tx) |
|---|
| 3596 | + return -EINVAL; |
|---|
| 3597 | + |
|---|
| 3598 | + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { |
|---|
| 3599 | + timeout--; |
|---|
| 3600 | + if (!timeout) |
|---|
| 3601 | + return -EBUSY; |
|---|
| 3602 | + usleep_range(1000, 2000); |
|---|
| 3603 | + } |
|---|
| 3604 | + |
|---|
| 3605 | + if (new_tx) |
|---|
| 3606 | + vsi->req_txq = (u16)new_tx; |
|---|
| 3607 | + if (new_rx) |
|---|
| 3608 | + vsi->req_rxq = (u16)new_rx; |
|---|
| 3609 | + |
|---|
| 3610 | + /* set for the next time the netdev is started */ |
|---|
| 3611 | + if (!netif_running(vsi->netdev)) { |
|---|
| 3612 | + ice_vsi_rebuild(vsi, false); |
|---|
| 3613 | + dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); |
|---|
| 3614 | + goto done; |
|---|
| 3615 | + } |
|---|
| 3616 | + |
|---|
| 3617 | + ice_vsi_close(vsi); |
|---|
| 3618 | + ice_vsi_rebuild(vsi, false); |
|---|
| 3619 | + ice_pf_dcb_recfg(pf); |
|---|
| 3620 | + ice_vsi_open(vsi); |
|---|
| 3621 | +done: |
|---|
| 3622 | + clear_bit(__ICE_CFG_BUSY, pf->state); |
|---|
| 3623 | + return err; |
|---|
| 3624 | +} |
|---|
| 3625 | + |
|---|
| 3626 | +/** |
|---|
| 3627 | + * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode |
|---|
| 3628 | + * @pf: PF to configure |
|---|
| 3629 | + * |
|---|
| 3630 | + * No VLAN offloads/filtering are advertised in safe mode so make sure the PF |
|---|
| 3631 | + * VSI can still Tx/Rx VLAN tagged packets. |
|---|
| 3632 | + */ |
|---|
| 3633 | +static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) |
|---|
| 3634 | +{ |
|---|
| 3635 | + struct ice_vsi *vsi = ice_get_main_vsi(pf); |
|---|
| 3636 | + struct ice_vsi_ctx *ctxt; |
|---|
| 3637 | + enum ice_status status; |
|---|
| 3638 | + struct ice_hw *hw; |
|---|
| 3639 | + |
|---|
| 3640 | + if (!vsi) |
|---|
| 3641 | + return; |
|---|
| 3642 | + |
|---|
| 3643 | + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
|---|
| 3644 | + if (!ctxt) |
|---|
| 3645 | + return; |
|---|
| 3646 | + |
|---|
| 3647 | + hw = &pf->hw; |
|---|
| 3648 | + ctxt->info = vsi->info; |
|---|
| 3649 | + |
|---|
| 3650 | + ctxt->info.valid_sections = |
|---|
| 3651 | + cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | |
|---|
| 3652 | + ICE_AQ_VSI_PROP_SECURITY_VALID | |
|---|
| 3653 | + ICE_AQ_VSI_PROP_SW_VALID); |
|---|
| 3654 | + |
|---|
| 3655 | + /* disable VLAN anti-spoof */ |
|---|
| 3656 | + ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << |
|---|
| 3657 | + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); |
|---|
| 3658 | + |
|---|
| 3659 | + /* disable VLAN pruning and keep all other settings */ |
|---|
| 3660 | + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; |
|---|
| 3661 | + |
|---|
| 3662 | + /* allow all VLANs on Tx and don't strip on Rx */ |
|---|
| 3663 | + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | |
|---|
| 3664 | + ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
|---|
| 3665 | + |
|---|
| 3666 | + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
|---|
| 3667 | + if (status) { |
|---|
| 3668 | + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", |
|---|
| 3669 | + ice_stat_str(status), |
|---|
| 3670 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 3671 | + } else { |
|---|
| 3672 | + vsi->info.sec_flags = ctxt->info.sec_flags; |
|---|
| 3673 | + vsi->info.sw_flags2 = ctxt->info.sw_flags2; |
|---|
| 3674 | + vsi->info.vlan_flags = ctxt->info.vlan_flags; |
|---|
| 3675 | + } |
|---|
| 3676 | + |
|---|
| 3677 | + kfree(ctxt); |
|---|
| 3678 | +} |
|---|
| 3679 | + |
|---|
| 3680 | +/** |
|---|
| 3681 | + * ice_log_pkg_init - log result of DDP package load |
|---|
| 3682 | + * @hw: pointer to hardware info |
|---|
| 3683 | + * @status: status of package load |
|---|
| 3684 | + */ |
|---|
| 3685 | +static void |
|---|
| 3686 | +ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) |
|---|
| 3687 | +{ |
|---|
| 3688 | + struct ice_pf *pf = (struct ice_pf *)hw->back; |
|---|
| 3689 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 3690 | + |
|---|
| 3691 | + switch (*status) { |
|---|
| 3692 | + case ICE_SUCCESS: |
|---|
| 3693 | + /* The package download AdminQ command returned success because |
|---|
| 3694 | + * this download succeeded or ICE_ERR_AQ_NO_WORK since there is |
|---|
| 3695 | + * already a package loaded on the device. |
|---|
| 3696 | + */ |
|---|
| 3697 | + if (hw->pkg_ver.major == hw->active_pkg_ver.major && |
|---|
| 3698 | + hw->pkg_ver.minor == hw->active_pkg_ver.minor && |
|---|
| 3699 | + hw->pkg_ver.update == hw->active_pkg_ver.update && |
|---|
| 3700 | + hw->pkg_ver.draft == hw->active_pkg_ver.draft && |
|---|
| 3701 | + !memcmp(hw->pkg_name, hw->active_pkg_name, |
|---|
| 3702 | + sizeof(hw->pkg_name))) { |
|---|
| 3703 | + if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) |
|---|
| 3704 | + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", |
|---|
| 3705 | + hw->active_pkg_name, |
|---|
| 3706 | + hw->active_pkg_ver.major, |
|---|
| 3707 | + hw->active_pkg_ver.minor, |
|---|
| 3708 | + hw->active_pkg_ver.update, |
|---|
| 3709 | + hw->active_pkg_ver.draft); |
|---|
| 3710 | + else |
|---|
| 3711 | + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", |
|---|
| 3712 | + hw->active_pkg_name, |
|---|
| 3713 | + hw->active_pkg_ver.major, |
|---|
| 3714 | + hw->active_pkg_ver.minor, |
|---|
| 3715 | + hw->active_pkg_ver.update, |
|---|
| 3716 | + hw->active_pkg_ver.draft); |
|---|
| 3717 | + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || |
|---|
| 3718 | + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { |
|---|
| 3719 | + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", |
|---|
| 3720 | + hw->active_pkg_name, |
|---|
| 3721 | + hw->active_pkg_ver.major, |
|---|
| 3722 | + hw->active_pkg_ver.minor, |
|---|
| 3723 | + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
|---|
| 3724 | + *status = ICE_ERR_NOT_SUPPORTED; |
|---|
| 3725 | + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && |
|---|
| 3726 | + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { |
|---|
| 3727 | + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", |
|---|
| 3728 | + hw->active_pkg_name, |
|---|
| 3729 | + hw->active_pkg_ver.major, |
|---|
| 3730 | + hw->active_pkg_ver.minor, |
|---|
| 3731 | + hw->active_pkg_ver.update, |
|---|
| 3732 | + hw->active_pkg_ver.draft, |
|---|
| 3733 | + hw->pkg_name, |
|---|
| 3734 | + hw->pkg_ver.major, |
|---|
| 3735 | + hw->pkg_ver.minor, |
|---|
| 3736 | + hw->pkg_ver.update, |
|---|
| 3737 | + hw->pkg_ver.draft); |
|---|
| 3738 | + } else { |
|---|
| 3739 | + dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); |
|---|
| 3740 | + *status = ICE_ERR_NOT_SUPPORTED; |
|---|
| 3741 | + } |
|---|
| 3742 | + break; |
|---|
| 3743 | + case ICE_ERR_FW_DDP_MISMATCH: |
|---|
| 3744 | + dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); |
|---|
| 3745 | + break; |
|---|
| 3746 | + case ICE_ERR_BUF_TOO_SHORT: |
|---|
| 3747 | + case ICE_ERR_CFG: |
|---|
| 3748 | + dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); |
|---|
| 3749 | + break; |
|---|
| 3750 | + case ICE_ERR_NOT_SUPPORTED: |
|---|
| 3751 | + /* Package File version not supported */ |
|---|
| 3752 | + if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || |
|---|
| 3753 | + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && |
|---|
| 3754 | + hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) |
|---|
| 3755 | + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); |
|---|
| 3756 | + else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || |
|---|
| 3757 | + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && |
|---|
| 3758 | + hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) |
|---|
| 3759 | + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", |
|---|
| 3760 | + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
|---|
| 3761 | + break; |
|---|
| 3762 | + case ICE_ERR_AQ_ERROR: |
|---|
| 3763 | + switch (hw->pkg_dwnld_status) { |
|---|
| 3764 | + case ICE_AQ_RC_ENOSEC: |
|---|
| 3765 | + case ICE_AQ_RC_EBADSIG: |
|---|
| 3766 | + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); |
|---|
| 3767 | + return; |
|---|
| 3768 | + case ICE_AQ_RC_ESVN: |
|---|
| 3769 | + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); |
|---|
| 3770 | + return; |
|---|
| 3771 | + case ICE_AQ_RC_EBADMAN: |
|---|
| 3772 | + case ICE_AQ_RC_EBADBUF: |
|---|
| 3773 | + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); |
|---|
| 3774 | + /* poll for reset to complete */ |
|---|
| 3775 | + if (ice_check_reset(hw)) |
|---|
| 3776 | + dev_err(dev, "Error resetting device. Please reload the driver\n"); |
|---|
| 3777 | + return; |
|---|
| 3778 | + default: |
|---|
| 3779 | + break; |
|---|
| 3780 | + } |
|---|
| 3781 | + fallthrough; |
|---|
| 3782 | + default: |
|---|
| 3783 | + dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", |
|---|
| 3784 | + *status); |
|---|
| 3785 | + break; |
|---|
| 3786 | + } |
|---|
| 3787 | +} |
|---|
| 3788 | + |
|---|
| 3789 | +/** |
|---|
| 3790 | + * ice_load_pkg - load/reload the DDP Package file |
|---|
| 3791 | + * @firmware: firmware structure when firmware requested or NULL for reload |
|---|
| 3792 | + * @pf: pointer to the PF instance |
|---|
| 3793 | + * |
|---|
| 3794 | + * Called on probe and post CORER/GLOBR rebuild to load DDP Package and |
|---|
| 3795 | + * initialize HW tables. |
|---|
| 3796 | + */ |
|---|
| 3797 | +static void |
|---|
| 3798 | +ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) |
|---|
| 3799 | +{ |
|---|
| 3800 | + enum ice_status status = ICE_ERR_PARAM; |
|---|
| 3801 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 3802 | + struct ice_hw *hw = &pf->hw; |
|---|
| 3803 | + |
|---|
| 3804 | + /* Load DDP Package */ |
|---|
| 3805 | + if (firmware && !hw->pkg_copy) { |
|---|
| 3806 | + status = ice_copy_and_init_pkg(hw, firmware->data, |
|---|
| 3807 | + firmware->size); |
|---|
| 3808 | + ice_log_pkg_init(hw, &status); |
|---|
| 3809 | + } else if (!firmware && hw->pkg_copy) { |
|---|
| 3810 | + /* Reload package during rebuild after CORER/GLOBR reset */ |
|---|
| 3811 | + status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); |
|---|
| 3812 | + ice_log_pkg_init(hw, &status); |
|---|
| 3813 | + } else { |
|---|
| 3814 | + dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); |
|---|
| 3815 | + } |
|---|
| 3816 | + |
|---|
| 3817 | + if (status) { |
|---|
| 3818 | + /* Safe Mode */ |
|---|
| 3819 | + clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); |
|---|
| 3820 | + return; |
|---|
| 3821 | + } |
|---|
| 3822 | + |
|---|
| 3823 | + /* Successful download package is the precondition for advanced |
|---|
| 3824 | + * features, hence setting the ICE_FLAG_ADV_FEATURES flag |
|---|
| 3825 | + */ |
|---|
| 3826 | + set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); |
|---|
| 3827 | +} |
|---|
| 3828 | + |
|---|
| 3829 | +/** |
|---|
| 3830 | + * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines |
|---|
| 3831 | + * @pf: pointer to the PF structure |
|---|
| 3832 | + * |
|---|
| 3833 | + * There is no error returned here because the driver should be able to handle |
|---|
| 3834 | + * 128 Byte cache lines, so we only print a warning in case issues are seen, |
|---|
| 3835 | + * specifically with Tx. |
|---|
| 3836 | + */ |
|---|
| 3837 | +static void ice_verify_cacheline_size(struct ice_pf *pf) |
|---|
| 3838 | +{ |
|---|
| 3839 | + if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) |
|---|
| 3840 | + dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", |
|---|
| 3841 | + ICE_CACHE_LINE_BYTES); |
|---|
| 3842 | +} |
|---|
| 3843 | + |
|---|
| 3844 | +/** |
|---|
| 3845 | + * ice_send_version - update firmware with driver version |
|---|
| 3846 | + * @pf: PF struct |
|---|
| 3847 | + * |
|---|
| 3848 | + * Returns ICE_SUCCESS on success, else error code |
|---|
| 3849 | + */ |
|---|
| 3850 | +static enum ice_status ice_send_version(struct ice_pf *pf) |
|---|
| 3851 | +{ |
|---|
| 3852 | + struct ice_driver_ver dv; |
|---|
| 3853 | + |
|---|
| 3854 | + dv.major_ver = 0xff; |
|---|
| 3855 | + dv.minor_ver = 0xff; |
|---|
| 3856 | + dv.build_ver = 0xff; |
|---|
| 3857 | + dv.subbuild_ver = 0; |
|---|
| 3858 | + strscpy((char *)dv.driver_string, UTS_RELEASE, |
|---|
| 3859 | + sizeof(dv.driver_string)); |
|---|
| 3860 | + return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); |
|---|
| 3861 | +} |
|---|
| 3862 | + |
|---|
| 3863 | +/** |
|---|
| 3864 | + * ice_init_fdir - Initialize flow director VSI and configuration |
|---|
| 3865 | + * @pf: pointer to the PF instance |
|---|
| 3866 | + * |
|---|
| 3867 | + * returns 0 on success, negative on error |
|---|
| 3868 | + */ |
|---|
| 3869 | +static int ice_init_fdir(struct ice_pf *pf) |
|---|
| 3870 | +{ |
|---|
| 3871 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 3872 | + struct ice_vsi *ctrl_vsi; |
|---|
| 3873 | + int err; |
|---|
| 3874 | + |
|---|
| 3875 | + /* Side Band Flow Director needs to have a control VSI. |
|---|
| 3876 | + * Allocate it and store it in the PF. |
|---|
| 3877 | + */ |
|---|
| 3878 | + ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); |
|---|
| 3879 | + if (!ctrl_vsi) { |
|---|
| 3880 | + dev_dbg(dev, "could not create control VSI\n"); |
|---|
| 3881 | + return -ENOMEM; |
|---|
| 3882 | + } |
|---|
| 3883 | + |
|---|
| 3884 | + err = ice_vsi_open_ctrl(ctrl_vsi); |
|---|
| 3885 | + if (err) { |
|---|
| 3886 | + dev_dbg(dev, "could not open control VSI\n"); |
|---|
| 3887 | + goto err_vsi_open; |
|---|
| 3888 | + } |
|---|
| 3889 | + |
|---|
| 3890 | + mutex_init(&pf->hw.fdir_fltr_lock); |
|---|
| 3891 | + |
|---|
| 3892 | + err = ice_fdir_create_dflt_rules(pf); |
|---|
| 3893 | + if (err) |
|---|
| 3894 | + goto err_fdir_rule; |
|---|
| 3895 | + |
|---|
| 3896 | + return 0; |
|---|
| 3897 | + |
|---|
| 3898 | +err_fdir_rule: |
|---|
| 3899 | + ice_fdir_release_flows(&pf->hw); |
|---|
| 3900 | + ice_vsi_close(ctrl_vsi); |
|---|
| 3901 | +err_vsi_open: |
|---|
| 3902 | + ice_vsi_release(ctrl_vsi); |
|---|
| 3903 | + if (pf->ctrl_vsi_idx != ICE_NO_VSI) { |
|---|
| 3904 | + pf->vsi[pf->ctrl_vsi_idx] = NULL; |
|---|
| 3905 | + pf->ctrl_vsi_idx = ICE_NO_VSI; |
|---|
| 3906 | + } |
|---|
| 3907 | + return err; |
|---|
| 3908 | +} |
|---|
| 3909 | + |
|---|
| 3910 | +/** |
|---|
| 3911 | + * ice_get_opt_fw_name - return optional firmware file name or NULL |
|---|
| 3912 | + * @pf: pointer to the PF instance |
|---|
| 3913 | + */ |
|---|
| 3914 | +static char *ice_get_opt_fw_name(struct ice_pf *pf) |
|---|
| 3915 | +{ |
|---|
| 3916 | + /* Optional firmware name same as default with additional dash |
|---|
| 3917 | + * followed by a EUI-64 identifier (PCIe Device Serial Number) |
|---|
| 3918 | + */ |
|---|
| 3919 | + struct pci_dev *pdev = pf->pdev; |
|---|
| 3920 | + char *opt_fw_filename; |
|---|
| 3921 | + u64 dsn; |
|---|
| 3922 | + |
|---|
| 3923 | + /* Determine the name of the optional file using the DSN (two |
|---|
| 3924 | + * dwords following the start of the DSN Capability). |
|---|
| 3925 | + */ |
|---|
| 3926 | + dsn = pci_get_dsn(pdev); |
|---|
| 3927 | + if (!dsn) |
|---|
| 3928 | + return NULL; |
|---|
| 3929 | + |
|---|
| 3930 | + opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); |
|---|
| 3931 | + if (!opt_fw_filename) |
|---|
| 3932 | + return NULL; |
|---|
| 3933 | + |
|---|
| 3934 | + snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", |
|---|
| 3935 | + ICE_DDP_PKG_PATH, dsn); |
|---|
| 3936 | + |
|---|
| 3937 | + return opt_fw_filename; |
|---|
| 3938 | +} |
|---|
| 3939 | + |
|---|
| 3940 | +/** |
|---|
| 3941 | + * ice_request_fw - Device initialization routine |
|---|
| 3942 | + * @pf: pointer to the PF instance |
|---|
| 3943 | + */ |
|---|
| 3944 | +static void ice_request_fw(struct ice_pf *pf) |
|---|
| 3945 | +{ |
|---|
| 3946 | + char *opt_fw_filename = ice_get_opt_fw_name(pf); |
|---|
| 3947 | + const struct firmware *firmware = NULL; |
|---|
| 3948 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 3949 | + int err = 0; |
|---|
| 3950 | + |
|---|
| 3951 | + /* optional device-specific DDP (if present) overrides the default DDP |
|---|
| 3952 | + * package file. kernel logs a debug message if the file doesn't exist, |
|---|
| 3953 | + * and warning messages for other errors. |
|---|
| 3954 | + */ |
|---|
| 3955 | + if (opt_fw_filename) { |
|---|
| 3956 | + err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); |
|---|
| 3957 | + if (err) { |
|---|
| 3958 | + kfree(opt_fw_filename); |
|---|
| 3959 | + goto dflt_pkg_load; |
|---|
| 3960 | + } |
|---|
| 3961 | + |
|---|
| 3962 | + /* request for firmware was successful. Download to device */ |
|---|
| 3963 | + ice_load_pkg(firmware, pf); |
|---|
| 3964 | + kfree(opt_fw_filename); |
|---|
| 3965 | + release_firmware(firmware); |
|---|
| 3966 | + return; |
|---|
| 3967 | + } |
|---|
| 3968 | + |
|---|
| 3969 | +dflt_pkg_load: |
|---|
| 3970 | + err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); |
|---|
| 3971 | + if (err) { |
|---|
| 3972 | + dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); |
|---|
| 3973 | + return; |
|---|
| 3974 | + } |
|---|
| 3975 | + |
|---|
| 3976 | + /* request for firmware was successful. Download to device */ |
|---|
| 3977 | + ice_load_pkg(firmware, pf); |
|---|
| 3978 | + release_firmware(firmware); |
|---|
| 3979 | +} |
|---|
| 3980 | + |
|---|
| 3981 | +/** |
|---|
| 3982 | + * ice_print_wake_reason - show the wake up cause in the log |
|---|
| 3983 | + * @pf: pointer to the PF struct |
|---|
| 3984 | + */ |
|---|
| 3985 | +static void ice_print_wake_reason(struct ice_pf *pf) |
|---|
| 3986 | +{ |
|---|
| 3987 | + u32 wus = pf->wakeup_reason; |
|---|
| 3988 | + const char *wake_str; |
|---|
| 3989 | + |
|---|
| 3990 | + /* if no wake event, nothing to print */ |
|---|
| 3991 | + if (!wus) |
|---|
| 3992 | + return; |
|---|
| 3993 | + |
|---|
| 3994 | + if (wus & PFPM_WUS_LNKC_M) |
|---|
| 3995 | + wake_str = "Link\n"; |
|---|
| 3996 | + else if (wus & PFPM_WUS_MAG_M) |
|---|
| 3997 | + wake_str = "Magic Packet\n"; |
|---|
| 3998 | + else if (wus & PFPM_WUS_MNG_M) |
|---|
| 3999 | + wake_str = "Management\n"; |
|---|
| 4000 | + else if (wus & PFPM_WUS_FW_RST_WK_M) |
|---|
| 4001 | + wake_str = "Firmware Reset\n"; |
|---|
| 4002 | + else |
|---|
| 4003 | + wake_str = "Unknown\n"; |
|---|
| 4004 | + |
|---|
| 4005 | + dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); |
|---|
| 3284 | 4006 | } |
|---|
| 3285 | 4007 | |
|---|
| 3286 | 4008 | /** |
|---|
| .. | .. |
|---|
| 3290 | 4012 | * |
|---|
| 3291 | 4013 | * Returns 0 on success, negative on failure |
|---|
| 3292 | 4014 | */ |
|---|
| 3293 | | -static int ice_probe(struct pci_dev *pdev, |
|---|
| 3294 | | - const struct pci_device_id __always_unused *ent) |
|---|
| 4015 | +static int |
|---|
| 4016 | +ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) |
|---|
| 3295 | 4017 | { |
|---|
| 4018 | + struct device *dev = &pdev->dev; |
|---|
| 3296 | 4019 | struct ice_pf *pf; |
|---|
| 3297 | 4020 | struct ice_hw *hw; |
|---|
| 3298 | | - int err; |
|---|
| 4021 | + int i, err; |
|---|
| 3299 | 4022 | |
|---|
| 3300 | | - /* this driver uses devres, see Documentation/driver-model/devres.txt */ |
|---|
| 4023 | + if (pdev->is_virtfn) { |
|---|
| 4024 | + dev_err(dev, "can't probe a virtual function\n"); |
|---|
| 4025 | + return -EINVAL; |
|---|
| 4026 | + } |
|---|
| 4027 | + |
|---|
| 4028 | + /* this driver uses devres, see |
|---|
| 4029 | + * Documentation/driver-api/driver-model/devres.rst |
|---|
| 4030 | + */ |
|---|
| 3301 | 4031 | err = pcim_enable_device(pdev); |
|---|
| 3302 | 4032 | if (err) |
|---|
| 3303 | 4033 | return err; |
|---|
| 3304 | 4034 | |
|---|
| 3305 | 4035 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); |
|---|
| 3306 | 4036 | if (err) { |
|---|
| 3307 | | - dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); |
|---|
| 4037 | + dev_err(dev, "BAR0 I/O map error %d\n", err); |
|---|
| 3308 | 4038 | return err; |
|---|
| 3309 | 4039 | } |
|---|
| 3310 | 4040 | |
|---|
| 3311 | | - pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); |
|---|
| 4041 | + pf = ice_allocate_pf(dev); |
|---|
| 3312 | 4042 | if (!pf) |
|---|
| 3313 | 4043 | return -ENOMEM; |
|---|
| 3314 | 4044 | |
|---|
| 3315 | | - /* set up for high or low dma */ |
|---|
| 3316 | | - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
|---|
| 4045 | + /* set up for high or low DMA */ |
|---|
| 4046 | + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
|---|
| 3317 | 4047 | if (err) |
|---|
| 3318 | | - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
|---|
| 4048 | + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
|---|
| 3319 | 4049 | if (err) { |
|---|
| 3320 | | - dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); |
|---|
| 4050 | + dev_err(dev, "DMA configuration failed: 0x%x\n", err); |
|---|
| 3321 | 4051 | return err; |
|---|
| 3322 | 4052 | } |
|---|
| 3323 | 4053 | |
|---|
| .. | .. |
|---|
| 3327 | 4057 | pf->pdev = pdev; |
|---|
| 3328 | 4058 | pci_set_drvdata(pdev, pf); |
|---|
| 3329 | 4059 | set_bit(__ICE_DOWN, pf->state); |
|---|
| 4060 | + /* Disable service task until DOWN bit is cleared */ |
|---|
| 4061 | + set_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 3330 | 4062 | |
|---|
| 3331 | 4063 | hw = &pf->hw; |
|---|
| 3332 | 4064 | hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; |
|---|
| 4065 | + pci_save_state(pdev); |
|---|
| 4066 | + |
|---|
| 3333 | 4067 | hw->back = pf; |
|---|
| 3334 | 4068 | hw->vendor_id = pdev->vendor; |
|---|
| 3335 | 4069 | hw->device_id = pdev->device; |
|---|
| .. | .. |
|---|
| 3342 | 4076 | |
|---|
| 3343 | 4077 | pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); |
|---|
| 3344 | 4078 | |
|---|
| 4079 | + err = ice_devlink_register(pf); |
|---|
| 4080 | + if (err) { |
|---|
| 4081 | + dev_err(dev, "ice_devlink_register failed: %d\n", err); |
|---|
| 4082 | + goto err_exit_unroll; |
|---|
| 4083 | + } |
|---|
| 4084 | + |
|---|
| 3345 | 4085 | #ifndef CONFIG_DYNAMIC_DEBUG |
|---|
| 3346 | 4086 | if (debug < -1) |
|---|
| 3347 | 4087 | hw->debug_mask = debug; |
|---|
| .. | .. |
|---|
| 3349 | 4089 | |
|---|
| 3350 | 4090 | err = ice_init_hw(hw); |
|---|
| 3351 | 4091 | if (err) { |
|---|
| 3352 | | - dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); |
|---|
| 4092 | + dev_err(dev, "ice_init_hw failed: %d\n", err); |
|---|
| 3353 | 4093 | err = -EIO; |
|---|
| 3354 | 4094 | goto err_exit_unroll; |
|---|
| 3355 | 4095 | } |
|---|
| 3356 | 4096 | |
|---|
| 3357 | | - dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", |
|---|
| 3358 | | - hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, |
|---|
| 3359 | | - hw->api_maj_ver, hw->api_min_ver); |
|---|
| 4097 | + ice_request_fw(pf); |
|---|
| 3360 | 4098 | |
|---|
| 3361 | | - ice_init_pf(pf); |
|---|
| 4099 | + /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be |
|---|
| 4100 | + * set in pf->state, which will cause ice_is_safe_mode to return |
|---|
| 4101 | + * true |
|---|
| 4102 | + */ |
|---|
| 4103 | + if (ice_is_safe_mode(pf)) { |
|---|
| 4104 | + dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); |
|---|
| 4105 | + /* we already got function/device capabilities but these don't |
|---|
| 4106 | + * reflect what the driver needs to do in safe mode. Instead of |
|---|
| 4107 | + * adding conditional logic everywhere to ignore these |
|---|
| 4108 | + * device/function capabilities, override them. |
|---|
| 4109 | + */ |
|---|
| 4110 | + ice_set_safe_mode_caps(hw); |
|---|
| 4111 | + } |
|---|
| 3362 | 4112 | |
|---|
| 3363 | | - ice_determine_q_usage(pf); |
|---|
| 4113 | + err = ice_init_pf(pf); |
|---|
| 4114 | + if (err) { |
|---|
| 4115 | + dev_err(dev, "ice_init_pf failed: %d\n", err); |
|---|
| 4116 | + goto err_init_pf_unroll; |
|---|
| 4117 | + } |
|---|
| 3364 | 4118 | |
|---|
| 3365 | | - pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC, |
|---|
| 3366 | | - hw->func_caps.guaranteed_num_vsi); |
|---|
| 4119 | + ice_devlink_init_regions(pf); |
|---|
| 4120 | + |
|---|
| 4121 | + pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; |
|---|
| 4122 | + pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; |
|---|
| 4123 | + pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; |
|---|
| 4124 | + pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; |
|---|
| 4125 | + i = 0; |
|---|
| 4126 | + if (pf->hw.tnl.valid_count[TNL_VXLAN]) { |
|---|
| 4127 | + pf->hw.udp_tunnel_nic.tables[i].n_entries = |
|---|
| 4128 | + pf->hw.tnl.valid_count[TNL_VXLAN]; |
|---|
| 4129 | + pf->hw.udp_tunnel_nic.tables[i].tunnel_types = |
|---|
| 4130 | + UDP_TUNNEL_TYPE_VXLAN; |
|---|
| 4131 | + i++; |
|---|
| 4132 | + } |
|---|
| 4133 | + if (pf->hw.tnl.valid_count[TNL_GENEVE]) { |
|---|
| 4134 | + pf->hw.udp_tunnel_nic.tables[i].n_entries = |
|---|
| 4135 | + pf->hw.tnl.valid_count[TNL_GENEVE]; |
|---|
| 4136 | + pf->hw.udp_tunnel_nic.tables[i].tunnel_types = |
|---|
| 4137 | + UDP_TUNNEL_TYPE_GENEVE; |
|---|
| 4138 | + i++; |
|---|
| 4139 | + } |
|---|
| 4140 | + |
|---|
| 4141 | + pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; |
|---|
| 3367 | 4142 | if (!pf->num_alloc_vsi) { |
|---|
| 3368 | 4143 | err = -EIO; |
|---|
| 3369 | 4144 | goto err_init_pf_unroll; |
|---|
| 3370 | 4145 | } |
|---|
| 4146 | + if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { |
|---|
| 4147 | + dev_warn(&pf->pdev->dev, |
|---|
| 4148 | + "limiting the VSI count due to UDP tunnel limitation %d > %d\n", |
|---|
| 4149 | + pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); |
|---|
| 4150 | + pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; |
|---|
| 4151 | + } |
|---|
| 3371 | 4152 | |
|---|
| 3372 | | - pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, |
|---|
| 3373 | | - sizeof(struct ice_vsi *), GFP_KERNEL); |
|---|
| 4153 | + pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), |
|---|
| 4154 | + GFP_KERNEL); |
|---|
| 3374 | 4155 | if (!pf->vsi) { |
|---|
| 3375 | 4156 | err = -ENOMEM; |
|---|
| 3376 | 4157 | goto err_init_pf_unroll; |
|---|
| .. | .. |
|---|
| 3378 | 4159 | |
|---|
| 3379 | 4160 | err = ice_init_interrupt_scheme(pf); |
|---|
| 3380 | 4161 | if (err) { |
|---|
| 3381 | | - dev_err(&pdev->dev, |
|---|
| 3382 | | - "ice_init_interrupt_scheme failed: %d\n", err); |
|---|
| 4162 | + dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); |
|---|
| 3383 | 4163 | err = -EIO; |
|---|
| 3384 | | - goto err_init_interrupt_unroll; |
|---|
| 4164 | + goto err_init_vsi_unroll; |
|---|
| 3385 | 4165 | } |
|---|
| 3386 | 4166 | |
|---|
| 3387 | 4167 | /* In case of MSIX we are going to setup the misc vector right here |
|---|
| .. | .. |
|---|
| 3389 | 4169 | * the misc functionality and queue processing is combined in |
|---|
| 3390 | 4170 | * the same vector and that gets setup at open. |
|---|
| 3391 | 4171 | */ |
|---|
| 3392 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
|---|
| 3393 | | - err = ice_req_irq_msix_misc(pf); |
|---|
| 3394 | | - if (err) { |
|---|
| 3395 | | - dev_err(&pdev->dev, |
|---|
| 3396 | | - "setup of misc vector failed: %d\n", err); |
|---|
| 3397 | | - goto err_init_interrupt_unroll; |
|---|
| 3398 | | - } |
|---|
| 4172 | + err = ice_req_irq_msix_misc(pf); |
|---|
| 4173 | + if (err) { |
|---|
| 4174 | + dev_err(dev, "setup of misc vector failed: %d\n", err); |
|---|
| 4175 | + goto err_init_interrupt_unroll; |
|---|
| 3399 | 4176 | } |
|---|
| 3400 | 4177 | |
|---|
| 3401 | 4178 | /* create switch struct for the switch element created by FW on boot */ |
|---|
| 3402 | | - pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), |
|---|
| 3403 | | - GFP_KERNEL); |
|---|
| 4179 | + pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); |
|---|
| 3404 | 4180 | if (!pf->first_sw) { |
|---|
| 3405 | 4181 | err = -ENOMEM; |
|---|
| 3406 | 4182 | goto err_msix_misc_unroll; |
|---|
| 3407 | 4183 | } |
|---|
| 3408 | 4184 | |
|---|
| 3409 | | - pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; |
|---|
| 4185 | + if (hw->evb_veb) |
|---|
| 4186 | + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; |
|---|
| 4187 | + else |
|---|
| 4188 | + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; |
|---|
| 4189 | + |
|---|
| 3410 | 4190 | pf->first_sw->pf = pf; |
|---|
| 3411 | 4191 | |
|---|
| 3412 | 4192 | /* record the sw_id available for later use */ |
|---|
| .. | .. |
|---|
| 3414 | 4194 | |
|---|
| 3415 | 4195 | err = ice_setup_pf_sw(pf); |
|---|
| 3416 | 4196 | if (err) { |
|---|
| 3417 | | - dev_err(&pdev->dev, |
|---|
| 3418 | | - "probe failed due to setup pf switch:%d\n", err); |
|---|
| 4197 | + dev_err(dev, "probe failed due to setup PF switch: %d\n", err); |
|---|
| 3419 | 4198 | goto err_alloc_sw_unroll; |
|---|
| 3420 | 4199 | } |
|---|
| 3421 | 4200 | |
|---|
| 3422 | | - /* Driver is mostly up */ |
|---|
| 3423 | | - clear_bit(__ICE_DOWN, pf->state); |
|---|
| 4201 | + clear_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 4202 | + |
|---|
| 4203 | + /* tell the firmware we are up */ |
|---|
| 4204 | + err = ice_send_version(pf); |
|---|
| 4205 | + if (err) { |
|---|
| 4206 | + dev_err(dev, "probe failed sending driver version %s. error: %d\n", |
|---|
| 4207 | + UTS_RELEASE, err); |
|---|
| 4208 | + goto err_send_version_unroll; |
|---|
| 4209 | + } |
|---|
| 3424 | 4210 | |
|---|
| 3425 | 4211 | /* since everything is good, start the service timer */ |
|---|
| 3426 | 4212 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
|---|
| 3427 | 4213 | |
|---|
| 3428 | 4214 | err = ice_init_link_events(pf->hw.port_info); |
|---|
| 3429 | 4215 | if (err) { |
|---|
| 3430 | | - dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err); |
|---|
| 3431 | | - goto err_alloc_sw_unroll; |
|---|
| 4216 | + dev_err(dev, "ice_init_link_events failed: %d\n", err); |
|---|
| 4217 | + goto err_send_version_unroll; |
|---|
| 3432 | 4218 | } |
|---|
| 3433 | 4219 | |
|---|
| 4220 | + /* not a fatal error if this fails */ |
|---|
| 4221 | + err = ice_init_nvm_phy_type(pf->hw.port_info); |
|---|
| 4222 | + if (err) |
|---|
| 4223 | + dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); |
|---|
| 4224 | + |
|---|
| 4225 | + /* not a fatal error if this fails */ |
|---|
| 4226 | + err = ice_update_link_info(pf->hw.port_info); |
|---|
| 4227 | + if (err) |
|---|
| 4228 | + dev_err(dev, "ice_update_link_info failed: %d\n", err); |
|---|
| 4229 | + |
|---|
| 4230 | + ice_init_link_dflt_override(pf->hw.port_info); |
|---|
| 4231 | + |
|---|
| 4232 | + /* if media available, initialize PHY settings */ |
|---|
| 4233 | + if (pf->hw.port_info->phy.link_info.link_info & |
|---|
| 4234 | + ICE_AQ_MEDIA_AVAILABLE) { |
|---|
| 4235 | + /* not a fatal error if this fails */ |
|---|
| 4236 | + err = ice_init_phy_user_cfg(pf->hw.port_info); |
|---|
| 4237 | + if (err) |
|---|
| 4238 | + dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); |
|---|
| 4239 | + |
|---|
| 4240 | + if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { |
|---|
| 4241 | + struct ice_vsi *vsi = ice_get_main_vsi(pf); |
|---|
| 4242 | + |
|---|
| 4243 | + if (vsi) |
|---|
| 4244 | + ice_configure_phy(vsi); |
|---|
| 4245 | + } |
|---|
| 4246 | + } else { |
|---|
| 4247 | + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
|---|
| 4248 | + } |
|---|
| 4249 | + |
|---|
| 4250 | + ice_verify_cacheline_size(pf); |
|---|
| 4251 | + |
|---|
| 4252 | + /* Save wakeup reason register for later use */ |
|---|
| 4253 | + pf->wakeup_reason = rd32(hw, PFPM_WUS); |
|---|
| 4254 | + |
|---|
| 4255 | + /* check for a power management event */ |
|---|
| 4256 | + ice_print_wake_reason(pf); |
|---|
| 4257 | + |
|---|
| 4258 | + /* clear wake status, all bits */ |
|---|
| 4259 | + wr32(hw, PFPM_WUS, U32_MAX); |
|---|
| 4260 | + |
|---|
| 4261 | + /* Disable WoL at init, wait for user to enable */ |
|---|
| 4262 | + device_set_wakeup_enable(dev, false); |
|---|
| 4263 | + |
|---|
| 4264 | + if (ice_is_safe_mode(pf)) { |
|---|
| 4265 | + ice_set_safe_mode_vlan_cfg(pf); |
|---|
| 4266 | + goto probe_done; |
|---|
| 4267 | + } |
|---|
| 4268 | + |
|---|
| 4269 | + /* initialize DDP driven features */ |
|---|
| 4270 | + |
|---|
| 4271 | + /* Note: Flow director init failure is non-fatal to load */ |
|---|
| 4272 | + if (ice_init_fdir(pf)) |
|---|
| 4273 | + dev_err(dev, "could not initialize flow director\n"); |
|---|
| 4274 | + |
|---|
| 4275 | + /* Note: DCB init failure is non-fatal to load */ |
|---|
| 4276 | + if (ice_init_pf_dcb(pf, false)) { |
|---|
| 4277 | + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
|---|
| 4278 | + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); |
|---|
| 4279 | + } else { |
|---|
| 4280 | + ice_cfg_lldp_mib_change(&pf->hw, true); |
|---|
| 4281 | + } |
|---|
| 4282 | + |
|---|
| 4283 | + /* print PCI link speed and width */ |
|---|
| 4284 | + pcie_print_link_status(pf->pdev); |
|---|
| 4285 | + |
|---|
| 4286 | +probe_done: |
|---|
| 4287 | + /* ready to go, so clear down state bit */ |
|---|
| 4288 | + clear_bit(__ICE_DOWN, pf->state); |
|---|
| 3434 | 4289 | return 0; |
|---|
| 3435 | 4290 | |
|---|
| 4291 | +err_send_version_unroll: |
|---|
| 4292 | + ice_vsi_release_all(pf); |
|---|
| 3436 | 4293 | err_alloc_sw_unroll: |
|---|
| 4294 | + set_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 3437 | 4295 | set_bit(__ICE_DOWN, pf->state); |
|---|
| 3438 | | - devm_kfree(&pf->pdev->dev, pf->first_sw); |
|---|
| 4296 | + devm_kfree(dev, pf->first_sw); |
|---|
| 3439 | 4297 | err_msix_misc_unroll: |
|---|
| 3440 | 4298 | ice_free_irq_msix_misc(pf); |
|---|
| 3441 | 4299 | err_init_interrupt_unroll: |
|---|
| 3442 | 4300 | ice_clear_interrupt_scheme(pf); |
|---|
| 3443 | | - devm_kfree(&pdev->dev, pf->vsi); |
|---|
| 4301 | +err_init_vsi_unroll: |
|---|
| 4302 | + devm_kfree(dev, pf->vsi); |
|---|
| 3444 | 4303 | err_init_pf_unroll: |
|---|
| 3445 | 4304 | ice_deinit_pf(pf); |
|---|
| 4305 | + ice_devlink_destroy_regions(pf); |
|---|
| 3446 | 4306 | ice_deinit_hw(hw); |
|---|
| 3447 | 4307 | err_exit_unroll: |
|---|
| 4308 | + ice_devlink_unregister(pf); |
|---|
| 3448 | 4309 | pci_disable_pcie_error_reporting(pdev); |
|---|
| 4310 | + pci_disable_device(pdev); |
|---|
| 3449 | 4311 | return err; |
|---|
| 4312 | +} |
|---|
| 4313 | + |
|---|
| 4314 | +/** |
|---|
| 4315 | + * ice_set_wake - enable or disable Wake on LAN |
|---|
| 4316 | + * @pf: pointer to the PF struct |
|---|
| 4317 | + * |
|---|
| 4318 | + * Simple helper for WoL control |
|---|
| 4319 | + */ |
|---|
| 4320 | +static void ice_set_wake(struct ice_pf *pf) |
|---|
| 4321 | +{ |
|---|
| 4322 | + struct ice_hw *hw = &pf->hw; |
|---|
| 4323 | + bool wol = pf->wol_ena; |
|---|
| 4324 | + |
|---|
| 4325 | + /* clear wake state, otherwise new wake events won't fire */ |
|---|
| 4326 | + wr32(hw, PFPM_WUS, U32_MAX); |
|---|
| 4327 | + |
|---|
| 4328 | + /* enable / disable APM wake up, no RMW needed */ |
|---|
| 4329 | + wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); |
|---|
| 4330 | + |
|---|
| 4331 | + /* set magic packet filter enabled */ |
|---|
| 4332 | + wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); |
|---|
| 4333 | +} |
|---|
| 4334 | + |
|---|
| 4335 | +/** |
|---|
| 4336 | + * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet |
|---|
| 4337 | + * @pf: pointer to the PF struct |
|---|
| 4338 | + * |
|---|
| 4339 | + * Issue firmware command to enable multicast magic wake, making |
|---|
| 4340 | + * sure that any locally administered address (LAA) is used for |
|---|
| 4341 | + * wake, and that PF reset doesn't undo the LAA. |
|---|
| 4342 | + */ |
|---|
| 4343 | +static void ice_setup_mc_magic_wake(struct ice_pf *pf) |
|---|
| 4344 | +{ |
|---|
| 4345 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 4346 | + struct ice_hw *hw = &pf->hw; |
|---|
| 4347 | + enum ice_status status; |
|---|
| 4348 | + u8 mac_addr[ETH_ALEN]; |
|---|
| 4349 | + struct ice_vsi *vsi; |
|---|
| 4350 | + u8 flags; |
|---|
| 4351 | + |
|---|
| 4352 | + if (!pf->wol_ena) |
|---|
| 4353 | + return; |
|---|
| 4354 | + |
|---|
| 4355 | + vsi = ice_get_main_vsi(pf); |
|---|
| 4356 | + if (!vsi) |
|---|
| 4357 | + return; |
|---|
| 4358 | + |
|---|
| 4359 | + /* Get current MAC address in case it's an LAA */ |
|---|
| 4360 | + if (vsi->netdev) |
|---|
| 4361 | + ether_addr_copy(mac_addr, vsi->netdev->dev_addr); |
|---|
| 4362 | + else |
|---|
| 4363 | + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
|---|
| 4364 | + |
|---|
| 4365 | + flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | |
|---|
| 4366 | + ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | |
|---|
| 4367 | + ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; |
|---|
| 4368 | + |
|---|
| 4369 | + status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); |
|---|
| 4370 | + if (status) |
|---|
| 4371 | + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", |
|---|
| 4372 | + ice_stat_str(status), |
|---|
| 4373 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 3450 | 4374 | } |
|---|
| 3451 | 4375 | |
|---|
| 3452 | 4376 | /** |
|---|
| .. | .. |
|---|
| 3456 | 4380 | static void ice_remove(struct pci_dev *pdev) |
|---|
| 3457 | 4381 | { |
|---|
| 3458 | 4382 | struct ice_pf *pf = pci_get_drvdata(pdev); |
|---|
| 3459 | | - int i = 0; |
|---|
| 3460 | | - int err; |
|---|
| 4383 | + int i; |
|---|
| 3461 | 4384 | |
|---|
| 3462 | | - if (!pf) |
|---|
| 3463 | | - return; |
|---|
| 3464 | | - |
|---|
| 3465 | | - set_bit(__ICE_DOWN, pf->state); |
|---|
| 3466 | | - |
|---|
| 3467 | | - for (i = 0; i < pf->num_alloc_vsi; i++) { |
|---|
| 3468 | | - if (!pf->vsi[i]) |
|---|
| 3469 | | - continue; |
|---|
| 3470 | | - |
|---|
| 3471 | | - err = ice_vsi_release(pf->vsi[i]); |
|---|
| 3472 | | - if (err) |
|---|
| 3473 | | - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", |
|---|
| 3474 | | - i, err); |
|---|
| 4385 | + for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { |
|---|
| 4386 | + if (!ice_is_reset_in_progress(pf->state)) |
|---|
| 4387 | + break; |
|---|
| 4388 | + msleep(100); |
|---|
| 3475 | 4389 | } |
|---|
| 3476 | 4390 | |
|---|
| 4391 | + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { |
|---|
| 4392 | + set_bit(__ICE_VF_RESETS_DISABLED, pf->state); |
|---|
| 4393 | + ice_free_vfs(pf); |
|---|
| 4394 | + } |
|---|
| 4395 | + |
|---|
| 4396 | + set_bit(__ICE_DOWN, pf->state); |
|---|
| 4397 | + ice_service_task_stop(pf); |
|---|
| 4398 | + |
|---|
| 4399 | + ice_aq_cancel_waiting_tasks(pf); |
|---|
| 4400 | + |
|---|
| 4401 | + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); |
|---|
| 4402 | + if (!ice_is_safe_mode(pf)) |
|---|
| 4403 | + ice_remove_arfs(pf); |
|---|
| 4404 | + ice_setup_mc_magic_wake(pf); |
|---|
| 4405 | + ice_vsi_release_all(pf); |
|---|
| 4406 | + ice_set_wake(pf); |
|---|
| 3477 | 4407 | ice_free_irq_msix_misc(pf); |
|---|
| 3478 | | - ice_clear_interrupt_scheme(pf); |
|---|
| 4408 | + ice_for_each_vsi(pf, i) { |
|---|
| 4409 | + if (!pf->vsi[i]) |
|---|
| 4410 | + continue; |
|---|
| 4411 | + ice_vsi_free_q_vectors(pf->vsi[i]); |
|---|
| 4412 | + } |
|---|
| 3479 | 4413 | ice_deinit_pf(pf); |
|---|
| 4414 | + ice_devlink_destroy_regions(pf); |
|---|
| 3480 | 4415 | ice_deinit_hw(&pf->hw); |
|---|
| 4416 | + ice_devlink_unregister(pf); |
|---|
| 4417 | + |
|---|
| 4418 | + /* Issue a PFR as part of the prescribed driver unload flow. Do not |
|---|
| 4419 | + * do it via ice_schedule_reset() since there is no need to rebuild |
|---|
| 4420 | + * and the service task is already stopped. |
|---|
| 4421 | + */ |
|---|
| 4422 | + ice_reset(&pf->hw, ICE_RESET_PFR); |
|---|
| 4423 | + pci_wait_for_pending_transaction(pdev); |
|---|
| 4424 | + ice_clear_interrupt_scheme(pf); |
|---|
| 3481 | 4425 | pci_disable_pcie_error_reporting(pdev); |
|---|
| 4426 | + pci_disable_device(pdev); |
|---|
| 4427 | +} |
|---|
| 4428 | + |
|---|
| 4429 | +/** |
|---|
| 4430 | + * ice_shutdown - PCI callback for shutting down device |
|---|
| 4431 | + * @pdev: PCI device information struct |
|---|
| 4432 | + */ |
|---|
| 4433 | +static void ice_shutdown(struct pci_dev *pdev) |
|---|
| 4434 | +{ |
|---|
| 4435 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
|---|
| 4436 | + |
|---|
| 4437 | + ice_remove(pdev); |
|---|
| 4438 | + |
|---|
| 4439 | + if (system_state == SYSTEM_POWER_OFF) { |
|---|
| 4440 | + pci_wake_from_d3(pdev, pf->wol_ena); |
|---|
| 4441 | + pci_set_power_state(pdev, PCI_D3hot); |
|---|
| 4442 | + } |
|---|
| 4443 | +} |
|---|
| 4444 | + |
|---|
| 4445 | +#ifdef CONFIG_PM |
|---|
| 4446 | +/** |
|---|
| 4447 | + * ice_prepare_for_shutdown - prep for PCI shutdown |
|---|
| 4448 | + * @pf: board private structure |
|---|
| 4449 | + * |
|---|
| 4450 | + * Inform or close all dependent features in prep for PCI device shutdown |
|---|
| 4451 | + */ |
|---|
| 4452 | +static void ice_prepare_for_shutdown(struct ice_pf *pf) |
|---|
| 4453 | +{ |
|---|
| 4454 | + struct ice_hw *hw = &pf->hw; |
|---|
| 4455 | + u32 v; |
|---|
| 4456 | + |
|---|
| 4457 | + /* Notify VFs of impending reset */ |
|---|
| 4458 | + if (ice_check_sq_alive(hw, &hw->mailboxq)) |
|---|
| 4459 | + ice_vc_notify_reset(pf); |
|---|
| 4460 | + |
|---|
| 4461 | + dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); |
|---|
| 4462 | + |
|---|
| 4463 | + /* disable the VSIs and their queues that are not already DOWN */ |
|---|
| 4464 | + ice_pf_dis_all_vsi(pf, false); |
|---|
| 4465 | + |
|---|
| 4466 | + ice_for_each_vsi(pf, v) |
|---|
| 4467 | + if (pf->vsi[v]) |
|---|
| 4468 | + pf->vsi[v]->vsi_num = 0; |
|---|
| 4469 | + |
|---|
| 4470 | + ice_shutdown_all_ctrlq(hw); |
|---|
| 4471 | +} |
|---|
| 4472 | + |
|---|
| 4473 | +/** |
|---|
| 4474 | + * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme |
|---|
| 4475 | + * @pf: board private structure to reinitialize |
|---|
| 4476 | + * |
|---|
| 4477 | + * This routine reinitialize interrupt scheme that was cleared during |
|---|
| 4478 | + * power management suspend callback. |
|---|
| 4479 | + * |
|---|
| 4480 | + * This should be called during resume routine to re-allocate the q_vectors |
|---|
| 4481 | + * and reacquire interrupts. |
|---|
| 4482 | + */ |
|---|
| 4483 | +static int ice_reinit_interrupt_scheme(struct ice_pf *pf) |
|---|
| 4484 | +{ |
|---|
| 4485 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 4486 | + int ret, v; |
|---|
| 4487 | + |
|---|
| 4488 | + /* Since we clear MSIX flag during suspend, we need to |
|---|
| 4489 | + * set it back during resume... |
|---|
| 4490 | + */ |
|---|
| 4491 | + |
|---|
| 4492 | + ret = ice_init_interrupt_scheme(pf); |
|---|
| 4493 | + if (ret) { |
|---|
| 4494 | + dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); |
|---|
| 4495 | + return ret; |
|---|
| 4496 | + } |
|---|
| 4497 | + |
|---|
| 4498 | + /* Remap vectors and rings, after successful re-init interrupts */ |
|---|
| 4499 | + ice_for_each_vsi(pf, v) { |
|---|
| 4500 | + if (!pf->vsi[v]) |
|---|
| 4501 | + continue; |
|---|
| 4502 | + |
|---|
| 4503 | + ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); |
|---|
| 4504 | + if (ret) |
|---|
| 4505 | + goto err_reinit; |
|---|
| 4506 | + ice_vsi_map_rings_to_vectors(pf->vsi[v]); |
|---|
| 4507 | + } |
|---|
| 4508 | + |
|---|
| 4509 | + ret = ice_req_irq_msix_misc(pf); |
|---|
| 4510 | + if (ret) { |
|---|
| 4511 | + dev_err(dev, "Setting up misc vector failed after device suspend %d\n", |
|---|
| 4512 | + ret); |
|---|
| 4513 | + goto err_reinit; |
|---|
| 4514 | + } |
|---|
| 4515 | + |
|---|
| 4516 | + return 0; |
|---|
| 4517 | + |
|---|
| 4518 | +err_reinit: |
|---|
| 4519 | + while (v--) |
|---|
| 4520 | + if (pf->vsi[v]) |
|---|
| 4521 | + ice_vsi_free_q_vectors(pf->vsi[v]); |
|---|
| 4522 | + |
|---|
| 4523 | + return ret; |
|---|
| 4524 | +} |
|---|
| 4525 | + |
|---|
| 4526 | +/** |
|---|
| 4527 | + * ice_suspend |
|---|
| 4528 | + * @dev: generic device information structure |
|---|
| 4529 | + * |
|---|
| 4530 | + * Power Management callback to quiesce the device and prepare |
|---|
| 4531 | + * for D3 transition. |
|---|
| 4532 | + */ |
|---|
| 4533 | +static int __maybe_unused ice_suspend(struct device *dev) |
|---|
| 4534 | +{ |
|---|
| 4535 | + struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 4536 | + struct ice_pf *pf; |
|---|
| 4537 | + int disabled, v; |
|---|
| 4538 | + |
|---|
| 4539 | + pf = pci_get_drvdata(pdev); |
|---|
| 4540 | + |
|---|
| 4541 | + if (!ice_pf_state_is_nominal(pf)) { |
|---|
| 4542 | + dev_err(dev, "Device is not ready, no need to suspend it\n"); |
|---|
| 4543 | + return -EBUSY; |
|---|
| 4544 | + } |
|---|
| 4545 | + |
|---|
| 4546 | + /* Stop watchdog tasks until resume completion. |
|---|
| 4547 | + * Even though it is most likely that the service task is |
|---|
| 4548 | + * disabled if the device is suspended or down, the service task's |
|---|
| 4549 | + * state is controlled by a different state bit, and we should |
|---|
| 4550 | + * store and honor whatever state that bit is in at this point. |
|---|
| 4551 | + */ |
|---|
| 4552 | + disabled = ice_service_task_stop(pf); |
|---|
| 4553 | + |
|---|
| 4554 | + /* Already suspended?, then there is nothing to do */ |
|---|
| 4555 | + if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { |
|---|
| 4556 | + if (!disabled) |
|---|
| 4557 | + ice_service_task_restart(pf); |
|---|
| 4558 | + return 0; |
|---|
| 4559 | + } |
|---|
| 4560 | + |
|---|
| 4561 | + if (test_bit(__ICE_DOWN, pf->state) || |
|---|
| 4562 | + ice_is_reset_in_progress(pf->state)) { |
|---|
| 4563 | + dev_err(dev, "can't suspend device in reset or already down\n"); |
|---|
| 4564 | + if (!disabled) |
|---|
| 4565 | + ice_service_task_restart(pf); |
|---|
| 4566 | + return 0; |
|---|
| 4567 | + } |
|---|
| 4568 | + |
|---|
| 4569 | + ice_setup_mc_magic_wake(pf); |
|---|
| 4570 | + |
|---|
| 4571 | + ice_prepare_for_shutdown(pf); |
|---|
| 4572 | + |
|---|
| 4573 | + ice_set_wake(pf); |
|---|
| 4574 | + |
|---|
| 4575 | + /* Free vectors, clear the interrupt scheme and release IRQs |
|---|
| 4576 | + * for proper hibernation, especially with large number of CPUs. |
|---|
| 4577 | + * Otherwise hibernation might fail when mapping all the vectors back |
|---|
| 4578 | + * to CPU0. |
|---|
| 4579 | + */ |
|---|
| 4580 | + ice_free_irq_msix_misc(pf); |
|---|
| 4581 | + ice_for_each_vsi(pf, v) { |
|---|
| 4582 | + if (!pf->vsi[v]) |
|---|
| 4583 | + continue; |
|---|
| 4584 | + ice_vsi_free_q_vectors(pf->vsi[v]); |
|---|
| 4585 | + } |
|---|
| 4586 | + ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); |
|---|
| 4587 | + ice_clear_interrupt_scheme(pf); |
|---|
| 4588 | + |
|---|
| 4589 | + pci_save_state(pdev); |
|---|
| 4590 | + pci_wake_from_d3(pdev, pf->wol_ena); |
|---|
| 4591 | + pci_set_power_state(pdev, PCI_D3hot); |
|---|
| 4592 | + return 0; |
|---|
| 4593 | +} |
|---|
| 4594 | + |
|---|
| 4595 | +/** |
|---|
| 4596 | + * ice_resume - PM callback for waking up from D3 |
|---|
| 4597 | + * @dev: generic device information structure |
|---|
| 4598 | + */ |
|---|
| 4599 | +static int __maybe_unused ice_resume(struct device *dev) |
|---|
| 4600 | +{ |
|---|
| 4601 | + struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 4602 | + enum ice_reset_req reset_type; |
|---|
| 4603 | + struct ice_pf *pf; |
|---|
| 4604 | + struct ice_hw *hw; |
|---|
| 4605 | + int ret; |
|---|
| 4606 | + |
|---|
| 4607 | + pci_set_power_state(pdev, PCI_D0); |
|---|
| 4608 | + pci_restore_state(pdev); |
|---|
| 4609 | + pci_save_state(pdev); |
|---|
| 4610 | + |
|---|
| 4611 | + if (!pci_device_is_present(pdev)) |
|---|
| 4612 | + return -ENODEV; |
|---|
| 4613 | + |
|---|
| 4614 | + ret = pci_enable_device_mem(pdev); |
|---|
| 4615 | + if (ret) { |
|---|
| 4616 | + dev_err(dev, "Cannot enable device after suspend\n"); |
|---|
| 4617 | + return ret; |
|---|
| 4618 | + } |
|---|
| 4619 | + |
|---|
| 4620 | + pf = pci_get_drvdata(pdev); |
|---|
| 4621 | + hw = &pf->hw; |
|---|
| 4622 | + |
|---|
| 4623 | + pf->wakeup_reason = rd32(hw, PFPM_WUS); |
|---|
| 4624 | + ice_print_wake_reason(pf); |
|---|
| 4625 | + |
|---|
| 4626 | + /* We cleared the interrupt scheme when we suspended, so we need to |
|---|
| 4627 | + * restore it now to resume device functionality. |
|---|
| 4628 | + */ |
|---|
| 4629 | + ret = ice_reinit_interrupt_scheme(pf); |
|---|
| 4630 | + if (ret) |
|---|
| 4631 | + dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); |
|---|
| 4632 | + |
|---|
| 4633 | + clear_bit(__ICE_DOWN, pf->state); |
|---|
| 4634 | + /* Now perform PF reset and rebuild */ |
|---|
| 4635 | + reset_type = ICE_RESET_PFR; |
|---|
| 4636 | + /* re-enable service task for reset, but allow reset to schedule it */ |
|---|
| 4637 | + clear_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 4638 | + |
|---|
| 4639 | + if (ice_schedule_reset(pf, reset_type)) |
|---|
| 4640 | + dev_err(dev, "Reset during resume failed.\n"); |
|---|
| 4641 | + |
|---|
| 4642 | + clear_bit(__ICE_SUSPENDED, pf->state); |
|---|
| 4643 | + ice_service_task_restart(pf); |
|---|
| 4644 | + |
|---|
| 4645 | + /* Restart the service task */ |
|---|
| 4646 | + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
|---|
| 4647 | + |
|---|
| 4648 | + return 0; |
|---|
| 4649 | +} |
|---|
| 4650 | +#endif /* CONFIG_PM */ |
|---|
| 4651 | + |
|---|
| 4652 | +/** |
|---|
| 4653 | + * ice_pci_err_detected - warning that PCI error has been detected |
|---|
| 4654 | + * @pdev: PCI device information struct |
|---|
| 4655 | + * @err: the type of PCI error |
|---|
| 4656 | + * |
|---|
| 4657 | + * Called to warn that something happened on the PCI bus and the error handling |
|---|
| 4658 | + * is in progress. Allows the driver to gracefully prepare/handle PCI errors. |
|---|
| 4659 | + */ |
|---|
| 4660 | +static pci_ers_result_t |
|---|
| 4661 | +ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) |
|---|
| 4662 | +{ |
|---|
| 4663 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
|---|
| 4664 | + |
|---|
| 4665 | + if (!pf) { |
|---|
| 4666 | + dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", |
|---|
| 4667 | + __func__, err); |
|---|
| 4668 | + return PCI_ERS_RESULT_DISCONNECT; |
|---|
| 4669 | + } |
|---|
| 4670 | + |
|---|
| 4671 | + if (!test_bit(__ICE_SUSPENDED, pf->state)) { |
|---|
| 4672 | + ice_service_task_stop(pf); |
|---|
| 4673 | + |
|---|
| 4674 | + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { |
|---|
| 4675 | + set_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 4676 | + ice_prepare_for_reset(pf); |
|---|
| 4677 | + } |
|---|
| 4678 | + } |
|---|
| 4679 | + |
|---|
| 4680 | + return PCI_ERS_RESULT_NEED_RESET; |
|---|
| 4681 | +} |
|---|
| 4682 | + |
|---|
| 4683 | +/** |
|---|
| 4684 | + * ice_pci_err_slot_reset - a PCI slot reset has just happened |
|---|
| 4685 | + * @pdev: PCI device information struct |
|---|
| 4686 | + * |
|---|
| 4687 | + * Called to determine if the driver can recover from the PCI slot reset by |
|---|
| 4688 | + * using a register read to determine if the device is recoverable. |
|---|
| 4689 | + */ |
|---|
| 4690 | +static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) |
|---|
| 4691 | +{ |
|---|
| 4692 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
|---|
| 4693 | + pci_ers_result_t result; |
|---|
| 4694 | + int err; |
|---|
| 4695 | + u32 reg; |
|---|
| 4696 | + |
|---|
| 4697 | + err = pci_enable_device_mem(pdev); |
|---|
| 4698 | + if (err) { |
|---|
| 4699 | + dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", |
|---|
| 4700 | + err); |
|---|
| 4701 | + result = PCI_ERS_RESULT_DISCONNECT; |
|---|
| 4702 | + } else { |
|---|
| 4703 | + pci_set_master(pdev); |
|---|
| 4704 | + pci_restore_state(pdev); |
|---|
| 4705 | + pci_save_state(pdev); |
|---|
| 4706 | + pci_wake_from_d3(pdev, false); |
|---|
| 4707 | + |
|---|
| 4708 | + /* Check for life */ |
|---|
| 4709 | + reg = rd32(&pf->hw, GLGEN_RTRIG); |
|---|
| 4710 | + if (!reg) |
|---|
| 4711 | + result = PCI_ERS_RESULT_RECOVERED; |
|---|
| 4712 | + else |
|---|
| 4713 | + result = PCI_ERS_RESULT_DISCONNECT; |
|---|
| 4714 | + } |
|---|
| 4715 | + |
|---|
| 4716 | + err = pci_aer_clear_nonfatal_status(pdev); |
|---|
| 4717 | + if (err) |
|---|
| 4718 | + dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", |
|---|
| 4719 | + err); |
|---|
| 4720 | + /* non-fatal, continue */ |
|---|
| 4721 | + |
|---|
| 4722 | + return result; |
|---|
| 4723 | +} |
|---|
| 4724 | + |
|---|
| 4725 | +/** |
|---|
| 4726 | + * ice_pci_err_resume - restart operations after PCI error recovery |
|---|
| 4727 | + * @pdev: PCI device information struct |
|---|
| 4728 | + * |
|---|
| 4729 | + * Called to allow the driver to bring things back up after PCI error and/or |
|---|
| 4730 | + * reset recovery have finished |
|---|
| 4731 | + */ |
|---|
| 4732 | +static void ice_pci_err_resume(struct pci_dev *pdev) |
|---|
| 4733 | +{ |
|---|
| 4734 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
|---|
| 4735 | + |
|---|
| 4736 | + if (!pf) { |
|---|
| 4737 | + dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", |
|---|
| 4738 | + __func__); |
|---|
| 4739 | + return; |
|---|
| 4740 | + } |
|---|
| 4741 | + |
|---|
| 4742 | + if (test_bit(__ICE_SUSPENDED, pf->state)) { |
|---|
| 4743 | + dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", |
|---|
| 4744 | + __func__); |
|---|
| 4745 | + return; |
|---|
| 4746 | + } |
|---|
| 4747 | + |
|---|
| 4748 | + ice_restore_all_vfs_msi_state(pdev); |
|---|
| 4749 | + |
|---|
| 4750 | + ice_do_reset(pf, ICE_RESET_PFR); |
|---|
| 4751 | + ice_service_task_restart(pf); |
|---|
| 4752 | + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
|---|
| 4753 | +} |
|---|
| 4754 | + |
|---|
| 4755 | +/** |
|---|
| 4756 | + * ice_pci_err_reset_prepare - prepare device driver for PCI reset |
|---|
| 4757 | + * @pdev: PCI device information struct |
|---|
| 4758 | + */ |
|---|
| 4759 | +static void ice_pci_err_reset_prepare(struct pci_dev *pdev) |
|---|
| 4760 | +{ |
|---|
| 4761 | + struct ice_pf *pf = pci_get_drvdata(pdev); |
|---|
| 4762 | + |
|---|
| 4763 | + if (!test_bit(__ICE_SUSPENDED, pf->state)) { |
|---|
| 4764 | + ice_service_task_stop(pf); |
|---|
| 4765 | + |
|---|
| 4766 | + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { |
|---|
| 4767 | + set_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 4768 | + ice_prepare_for_reset(pf); |
|---|
| 4769 | + } |
|---|
| 4770 | + } |
|---|
| 4771 | +} |
|---|
| 4772 | + |
|---|
| 4773 | +/** |
|---|
| 4774 | + * ice_pci_err_reset_done - PCI reset done, device driver reset can begin |
|---|
| 4775 | + * @pdev: PCI device information struct |
|---|
| 4776 | + */ |
|---|
| 4777 | +static void ice_pci_err_reset_done(struct pci_dev *pdev) |
|---|
| 4778 | +{ |
|---|
| 4779 | + ice_pci_err_resume(pdev); |
|---|
| 3482 | 4780 | } |
|---|
| 3483 | 4781 | |
|---|
| 3484 | 4782 | /* ice_pci_tbl - PCI Device ID Table |
|---|
| .. | .. |
|---|
| 3490 | 4788 | * Class, Class Mask, private data (not used) } |
|---|
| 3491 | 4789 | */ |
|---|
| 3492 | 4790 | static const struct pci_device_id ice_pci_tbl[] = { |
|---|
| 3493 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 }, |
|---|
| 3494 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 }, |
|---|
| 3495 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 }, |
|---|
| 3496 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 }, |
|---|
| 3497 | | - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 }, |
|---|
| 4791 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, |
|---|
| 4792 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, |
|---|
| 4793 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, |
|---|
| 4794 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, |
|---|
| 4795 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, |
|---|
| 4796 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, |
|---|
| 4797 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, |
|---|
| 4798 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, |
|---|
| 4799 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, |
|---|
| 4800 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, |
|---|
| 4801 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, |
|---|
| 4802 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, |
|---|
| 4803 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, |
|---|
| 4804 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, |
|---|
| 4805 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, |
|---|
| 4806 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, |
|---|
| 4807 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, |
|---|
| 4808 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, |
|---|
| 4809 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, |
|---|
| 4810 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, |
|---|
| 4811 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, |
|---|
| 4812 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, |
|---|
| 4813 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, |
|---|
| 4814 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, |
|---|
| 4815 | + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, |
|---|
| 3498 | 4816 | /* required last entry */ |
|---|
| 3499 | 4817 | { 0, } |
|---|
| 3500 | 4818 | }; |
|---|
| 3501 | 4819 | MODULE_DEVICE_TABLE(pci, ice_pci_tbl); |
|---|
| 4820 | + |
|---|
| 4821 | +static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); |
|---|
| 4822 | + |
|---|
| 4823 | +static const struct pci_error_handlers ice_pci_err_handler = { |
|---|
| 4824 | + .error_detected = ice_pci_err_detected, |
|---|
| 4825 | + .slot_reset = ice_pci_err_slot_reset, |
|---|
| 4826 | + .reset_prepare = ice_pci_err_reset_prepare, |
|---|
| 4827 | + .reset_done = ice_pci_err_reset_done, |
|---|
| 4828 | + .resume = ice_pci_err_resume |
|---|
| 4829 | +}; |
|---|
| 3502 | 4830 | |
|---|
| 3503 | 4831 | static struct pci_driver ice_driver = { |
|---|
| 3504 | 4832 | .name = KBUILD_MODNAME, |
|---|
| 3505 | 4833 | .id_table = ice_pci_tbl, |
|---|
| 3506 | 4834 | .probe = ice_probe, |
|---|
| 3507 | 4835 | .remove = ice_remove, |
|---|
| 4836 | +#ifdef CONFIG_PM |
|---|
| 4837 | + .driver.pm = &ice_pm_ops, |
|---|
| 4838 | +#endif /* CONFIG_PM */ |
|---|
| 4839 | + .shutdown = ice_shutdown, |
|---|
| 4840 | + .sriov_configure = ice_sriov_configure, |
|---|
| 4841 | + .err_handler = &ice_pci_err_handler |
|---|
| 3508 | 4842 | }; |
|---|
| 3509 | 4843 | |
|---|
| 3510 | 4844 | /** |
|---|
| .. | .. |
|---|
| 3517 | 4851 | { |
|---|
| 3518 | 4852 | int status; |
|---|
| 3519 | 4853 | |
|---|
| 3520 | | - pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); |
|---|
| 4854 | + pr_info("%s\n", ice_driver_string); |
|---|
| 3521 | 4855 | pr_info("%s\n", ice_copyright); |
|---|
| 3522 | 4856 | |
|---|
| 3523 | | - ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); |
|---|
| 4857 | + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); |
|---|
| 3524 | 4858 | if (!ice_wq) { |
|---|
| 3525 | 4859 | pr_err("Failed to create workqueue\n"); |
|---|
| 3526 | 4860 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 3528 | 4862 | |
|---|
| 3529 | 4863 | status = pci_register_driver(&ice_driver); |
|---|
| 3530 | 4864 | if (status) { |
|---|
| 3531 | | - pr_err("failed to register pci driver, err %d\n", status); |
|---|
| 4865 | + pr_err("failed to register PCI driver, err %d\n", status); |
|---|
| 3532 | 4866 | destroy_workqueue(ice_wq); |
|---|
| 3533 | 4867 | } |
|---|
| 3534 | 4868 | |
|---|
| .. | .. |
|---|
| 3551 | 4885 | module_exit(ice_module_exit); |
|---|
| 3552 | 4886 | |
|---|
| 3553 | 4887 | /** |
|---|
| 3554 | | - * ice_set_mac_address - NDO callback to set mac address |
|---|
| 4888 | + * ice_set_mac_address - NDO callback to set MAC address |
|---|
| 3555 | 4889 | * @netdev: network interface device structure |
|---|
| 3556 | 4890 | * @pi: pointer to an address structure |
|---|
| 3557 | 4891 | * |
|---|
| .. | .. |
|---|
| 3565 | 4899 | struct ice_hw *hw = &pf->hw; |
|---|
| 3566 | 4900 | struct sockaddr *addr = pi; |
|---|
| 3567 | 4901 | enum ice_status status; |
|---|
| 3568 | | - LIST_HEAD(a_mac_list); |
|---|
| 3569 | | - LIST_HEAD(r_mac_list); |
|---|
| 4902 | + u8 old_mac[ETH_ALEN]; |
|---|
| 3570 | 4903 | u8 flags = 0; |
|---|
| 3571 | | - int err; |
|---|
| 4904 | + int err = 0; |
|---|
| 3572 | 4905 | u8 *mac; |
|---|
| 3573 | 4906 | |
|---|
| 3574 | 4907 | mac = (u8 *)addr->sa_data; |
|---|
| .. | .. |
|---|
| 3577 | 4910 | return -EADDRNOTAVAIL; |
|---|
| 3578 | 4911 | |
|---|
| 3579 | 4912 | if (ether_addr_equal(netdev->dev_addr, mac)) { |
|---|
| 3580 | | - netdev_warn(netdev, "already using mac %pM\n", mac); |
|---|
| 4913 | + netdev_dbg(netdev, "already using mac %pM\n", mac); |
|---|
| 3581 | 4914 | return 0; |
|---|
| 3582 | 4915 | } |
|---|
| 3583 | 4916 | |
|---|
| 3584 | 4917 | if (test_bit(__ICE_DOWN, pf->state) || |
|---|
| 3585 | | - ice_is_reset_recovery_pending(pf->state)) { |
|---|
| 4918 | + ice_is_reset_in_progress(pf->state)) { |
|---|
| 3586 | 4919 | netdev_err(netdev, "can't set mac %pM. device not ready\n", |
|---|
| 3587 | 4920 | mac); |
|---|
| 3588 | 4921 | return -EBUSY; |
|---|
| 3589 | 4922 | } |
|---|
| 3590 | 4923 | |
|---|
| 3591 | | - /* When we change the mac address we also have to change the mac address |
|---|
| 3592 | | - * based filter rules that were created previously for the old mac |
|---|
| 3593 | | - * address. So first, we remove the old filter rule using ice_remove_mac |
|---|
| 3594 | | - * and then create a new filter rule using ice_add_mac. Note that for |
|---|
| 3595 | | - * both these operations, we first need to form a "list" of mac |
|---|
| 3596 | | - * addresses (even though in this case, we have only 1 mac address to be |
|---|
| 3597 | | - * added/removed) and this done using ice_add_mac_to_list. Depending on |
|---|
| 3598 | | - * the ensuing operation this "list" of mac addresses is either to be |
|---|
| 3599 | | - * added or removed from the filter. |
|---|
| 3600 | | - */ |
|---|
| 3601 | | - err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); |
|---|
| 4924 | + netif_addr_lock_bh(netdev); |
|---|
| 4925 | + ether_addr_copy(old_mac, netdev->dev_addr); |
|---|
| 4926 | + /* change the netdev's MAC address */ |
|---|
| 4927 | + memcpy(netdev->dev_addr, mac, netdev->addr_len); |
|---|
| 4928 | + netif_addr_unlock_bh(netdev); |
|---|
| 4929 | + |
|---|
| 4930 | + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ |
|---|
| 4931 | + status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); |
|---|
| 4932 | + if (status && status != ICE_ERR_DOES_NOT_EXIST) { |
|---|
| 4933 | + err = -EADDRNOTAVAIL; |
|---|
| 4934 | + goto err_update_filters; |
|---|
| 4935 | + } |
|---|
| 4936 | + |
|---|
| 4937 | + /* Add filter for new MAC. If filter exists, return success */ |
|---|
| 4938 | + status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); |
|---|
| 4939 | + if (status == ICE_ERR_ALREADY_EXISTS) |
|---|
| 4940 | + /* Although this MAC filter is already present in hardware it's |
|---|
| 4941 | + * possible in some cases (e.g. bonding) that dev_addr was |
|---|
| 4942 | + * modified outside of the driver and needs to be restored back |
|---|
| 4943 | + * to this value. |
|---|
| 4944 | + */ |
|---|
| 4945 | + netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); |
|---|
| 4946 | + else if (status) |
|---|
| 4947 | + /* error if the new filter addition failed */ |
|---|
| 4948 | + err = -EADDRNOTAVAIL; |
|---|
| 4949 | + |
|---|
| 4950 | +err_update_filters: |
|---|
| 3602 | 4951 | if (err) { |
|---|
| 3603 | | - err = -EADDRNOTAVAIL; |
|---|
| 3604 | | - goto free_lists; |
|---|
| 3605 | | - } |
|---|
| 3606 | | - |
|---|
| 3607 | | - status = ice_remove_mac(hw, &r_mac_list); |
|---|
| 3608 | | - if (status) { |
|---|
| 3609 | | - err = -EADDRNOTAVAIL; |
|---|
| 3610 | | - goto free_lists; |
|---|
| 3611 | | - } |
|---|
| 3612 | | - |
|---|
| 3613 | | - err = ice_add_mac_to_list(vsi, &a_mac_list, mac); |
|---|
| 3614 | | - if (err) { |
|---|
| 3615 | | - err = -EADDRNOTAVAIL; |
|---|
| 3616 | | - goto free_lists; |
|---|
| 3617 | | - } |
|---|
| 3618 | | - |
|---|
| 3619 | | - status = ice_add_mac(hw, &a_mac_list); |
|---|
| 3620 | | - if (status) { |
|---|
| 3621 | | - err = -EADDRNOTAVAIL; |
|---|
| 3622 | | - goto free_lists; |
|---|
| 3623 | | - } |
|---|
| 3624 | | - |
|---|
| 3625 | | -free_lists: |
|---|
| 3626 | | - /* free list entries */ |
|---|
| 3627 | | - ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); |
|---|
| 3628 | | - ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); |
|---|
| 3629 | | - |
|---|
| 3630 | | - if (err) { |
|---|
| 3631 | | - netdev_err(netdev, "can't set mac %pM. filter update failed\n", |
|---|
| 4952 | + netdev_err(netdev, "can't set MAC %pM. filter update failed\n", |
|---|
| 3632 | 4953 | mac); |
|---|
| 4954 | + netif_addr_lock_bh(netdev); |
|---|
| 4955 | + ether_addr_copy(netdev->dev_addr, old_mac); |
|---|
| 4956 | + netif_addr_unlock_bh(netdev); |
|---|
| 3633 | 4957 | return err; |
|---|
| 3634 | 4958 | } |
|---|
| 3635 | 4959 | |
|---|
| 3636 | | - /* change the netdev's mac address */ |
|---|
| 3637 | | - memcpy(netdev->dev_addr, mac, netdev->addr_len); |
|---|
| 3638 | | - netdev_dbg(vsi->netdev, "updated mac address to %pM\n", |
|---|
| 4960 | + netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", |
|---|
| 3639 | 4961 | netdev->dev_addr); |
|---|
| 3640 | 4962 | |
|---|
| 3641 | | - /* write new mac address to the firmware */ |
|---|
| 4963 | + /* write new MAC address to the firmware */ |
|---|
| 3642 | 4964 | flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; |
|---|
| 3643 | 4965 | status = ice_aq_manage_mac_write(hw, mac, flags, NULL); |
|---|
| 3644 | 4966 | if (status) { |
|---|
| 3645 | | - netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", |
|---|
| 3646 | | - mac); |
|---|
| 4967 | + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", |
|---|
| 4968 | + mac, ice_stat_str(status)); |
|---|
| 3647 | 4969 | } |
|---|
| 3648 | 4970 | return 0; |
|---|
| 3649 | 4971 | } |
|---|
| .. | .. |
|---|
| 3675 | 4997 | } |
|---|
| 3676 | 4998 | |
|---|
| 3677 | 4999 | /** |
|---|
| 5000 | + * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate |
|---|
| 5001 | + * @netdev: network interface device structure |
|---|
| 5002 | + * @queue_index: Queue ID |
|---|
| 5003 | + * @maxrate: maximum bandwidth in Mbps |
|---|
| 5004 | + */ |
|---|
| 5005 | +static int |
|---|
| 5006 | +ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) |
|---|
| 5007 | +{ |
|---|
| 5008 | + struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 5009 | + struct ice_vsi *vsi = np->vsi; |
|---|
| 5010 | + enum ice_status status; |
|---|
| 5011 | + u16 q_handle; |
|---|
| 5012 | + u8 tc; |
|---|
| 5013 | + |
|---|
| 5014 | + /* Validate maxrate requested is within permitted range */ |
|---|
| 5015 | + if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { |
|---|
| 5016 | + netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", |
|---|
| 5017 | + maxrate, queue_index); |
|---|
| 5018 | + return -EINVAL; |
|---|
| 5019 | + } |
|---|
| 5020 | + |
|---|
| 5021 | + q_handle = vsi->tx_rings[queue_index]->q_handle; |
|---|
| 5022 | + tc = ice_dcb_get_tc(vsi, queue_index); |
|---|
| 5023 | + |
|---|
| 5024 | + /* Set BW back to default, when user set maxrate to 0 */ |
|---|
| 5025 | + if (!maxrate) |
|---|
| 5026 | + status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, |
|---|
| 5027 | + q_handle, ICE_MAX_BW); |
|---|
| 5028 | + else |
|---|
| 5029 | + status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, |
|---|
| 5030 | + q_handle, ICE_MAX_BW, maxrate * 1000); |
|---|
| 5031 | + if (status) { |
|---|
| 5032 | + netdev_err(netdev, "Unable to set Tx max rate, error %s\n", |
|---|
| 5033 | + ice_stat_str(status)); |
|---|
| 5034 | + return -EIO; |
|---|
| 5035 | + } |
|---|
| 5036 | + |
|---|
| 5037 | + return 0; |
|---|
| 5038 | +} |
|---|
| 5039 | + |
|---|
| 5040 | +/** |
|---|
| 3678 | 5041 | * ice_fdb_add - add an entry to the hardware database |
|---|
| 3679 | 5042 | * @ndm: the input from the stack |
|---|
| 3680 | 5043 | * @tb: pointer to array of nladdr (unused) |
|---|
| 3681 | 5044 | * @dev: the net device pointer |
|---|
| 3682 | 5045 | * @addr: the MAC address entry being added |
|---|
| 3683 | | - * @vid: VLAN id |
|---|
| 5046 | + * @vid: VLAN ID |
|---|
| 3684 | 5047 | * @flags: instructions from stack about fdb operation |
|---|
| 5048 | + * @extack: netlink extended ack |
|---|
| 3685 | 5049 | */ |
|---|
| 3686 | | -static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], |
|---|
| 3687 | | - struct net_device *dev, const unsigned char *addr, |
|---|
| 3688 | | - u16 vid, u16 flags) |
|---|
| 5050 | +static int |
|---|
| 5051 | +ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], |
|---|
| 5052 | + struct net_device *dev, const unsigned char *addr, u16 vid, |
|---|
| 5053 | + u16 flags, struct netlink_ext_ack __always_unused *extack) |
|---|
| 3689 | 5054 | { |
|---|
| 3690 | 5055 | int err; |
|---|
| 3691 | 5056 | |
|---|
| .. | .. |
|---|
| 3718 | 5083 | * @tb: pointer to array of nladdr (unused) |
|---|
| 3719 | 5084 | * @dev: the net device pointer |
|---|
| 3720 | 5085 | * @addr: the MAC address entry being added |
|---|
| 3721 | | - * @vid: VLAN id |
|---|
| 5086 | + * @vid: VLAN ID |
|---|
| 3722 | 5087 | */ |
|---|
| 3723 | | -static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], |
|---|
| 3724 | | - struct net_device *dev, const unsigned char *addr, |
|---|
| 3725 | | - __always_unused u16 vid) |
|---|
| 5088 | +static int |
|---|
| 5089 | +ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], |
|---|
| 5090 | + struct net_device *dev, const unsigned char *addr, |
|---|
| 5091 | + __always_unused u16 vid) |
|---|
| 3726 | 5092 | { |
|---|
| 3727 | 5093 | int err; |
|---|
| 3728 | 5094 | |
|---|
| .. | .. |
|---|
| 3742 | 5108 | } |
|---|
| 3743 | 5109 | |
|---|
| 3744 | 5110 | /** |
|---|
| 3745 | | - * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx |
|---|
| 3746 | | - * @vsi: the vsi being changed |
|---|
| 3747 | | - */ |
|---|
| 3748 | | -static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) |
|---|
| 3749 | | -{ |
|---|
| 3750 | | - struct device *dev = &vsi->back->pdev->dev; |
|---|
| 3751 | | - struct ice_hw *hw = &vsi->back->hw; |
|---|
| 3752 | | - struct ice_vsi_ctx ctxt = { 0 }; |
|---|
| 3753 | | - enum ice_status status; |
|---|
| 3754 | | - |
|---|
| 3755 | | - /* Here we are configuring the VSI to let the driver add VLAN tags by |
|---|
| 3756 | | - * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag |
|---|
| 3757 | | - * insertion happens in the Tx hot path, in ice_tx_map. |
|---|
| 3758 | | - */ |
|---|
| 3759 | | - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; |
|---|
| 3760 | | - |
|---|
| 3761 | | - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
|---|
| 3762 | | - ctxt.vsi_num = vsi->vsi_num; |
|---|
| 3763 | | - |
|---|
| 3764 | | - status = ice_aq_update_vsi(hw, &ctxt, NULL); |
|---|
| 3765 | | - if (status) { |
|---|
| 3766 | | - dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", |
|---|
| 3767 | | - status, hw->adminq.sq_last_status); |
|---|
| 3768 | | - return -EIO; |
|---|
| 3769 | | - } |
|---|
| 3770 | | - |
|---|
| 3771 | | - vsi->info.vlan_flags = ctxt.info.vlan_flags; |
|---|
| 3772 | | - return 0; |
|---|
| 3773 | | -} |
|---|
| 3774 | | - |
|---|
| 3775 | | -/** |
|---|
| 3776 | | - * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx |
|---|
| 3777 | | - * @vsi: the vsi being changed |
|---|
| 3778 | | - * @ena: boolean value indicating if this is a enable or disable request |
|---|
| 3779 | | - */ |
|---|
| 3780 | | -static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) |
|---|
| 3781 | | -{ |
|---|
| 3782 | | - struct device *dev = &vsi->back->pdev->dev; |
|---|
| 3783 | | - struct ice_hw *hw = &vsi->back->hw; |
|---|
| 3784 | | - struct ice_vsi_ctx ctxt = { 0 }; |
|---|
| 3785 | | - enum ice_status status; |
|---|
| 3786 | | - |
|---|
| 3787 | | - /* Here we are configuring what the VSI should do with the VLAN tag in |
|---|
| 3788 | | - * the Rx packet. We can either leave the tag in the packet or put it in |
|---|
| 3789 | | - * the Rx descriptor. |
|---|
| 3790 | | - */ |
|---|
| 3791 | | - if (ena) { |
|---|
| 3792 | | - /* Strip VLAN tag from Rx packet and put it in the desc */ |
|---|
| 3793 | | - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; |
|---|
| 3794 | | - } else { |
|---|
| 3795 | | - /* Disable stripping. Leave tag in packet */ |
|---|
| 3796 | | - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
|---|
| 3797 | | - } |
|---|
| 3798 | | - |
|---|
| 3799 | | - /* Allow all packets untagged/tagged */ |
|---|
| 3800 | | - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; |
|---|
| 3801 | | - |
|---|
| 3802 | | - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
|---|
| 3803 | | - ctxt.vsi_num = vsi->vsi_num; |
|---|
| 3804 | | - |
|---|
| 3805 | | - status = ice_aq_update_vsi(hw, &ctxt, NULL); |
|---|
| 3806 | | - if (status) { |
|---|
| 3807 | | - dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n", |
|---|
| 3808 | | - ena, status, hw->adminq.sq_last_status); |
|---|
| 3809 | | - return -EIO; |
|---|
| 3810 | | - } |
|---|
| 3811 | | - |
|---|
| 3812 | | - vsi->info.vlan_flags = ctxt.info.vlan_flags; |
|---|
| 3813 | | - return 0; |
|---|
| 3814 | | -} |
|---|
| 3815 | | - |
|---|
| 3816 | | -/** |
|---|
| 3817 | 5111 | * ice_set_features - set the netdev feature flags |
|---|
| 3818 | 5112 | * @netdev: ptr to the netdev being adjusted |
|---|
| 3819 | 5113 | * @features: the feature set that the stack is suggesting |
|---|
| 3820 | 5114 | */ |
|---|
| 3821 | | -static int ice_set_features(struct net_device *netdev, |
|---|
| 3822 | | - netdev_features_t features) |
|---|
| 5115 | +static int |
|---|
| 5116 | +ice_set_features(struct net_device *netdev, netdev_features_t features) |
|---|
| 3823 | 5117 | { |
|---|
| 3824 | 5118 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 3825 | 5119 | struct ice_vsi *vsi = np->vsi; |
|---|
| 5120 | + struct ice_pf *pf = vsi->back; |
|---|
| 3826 | 5121 | int ret = 0; |
|---|
| 5122 | + |
|---|
| 5123 | + /* Don't set any netdev advanced features with device in Safe Mode */ |
|---|
| 5124 | + if (ice_is_safe_mode(vsi->back)) { |
|---|
| 5125 | + dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); |
|---|
| 5126 | + return ret; |
|---|
| 5127 | + } |
|---|
| 5128 | + |
|---|
| 5129 | + /* Do not change setting during reset */ |
|---|
| 5130 | + if (ice_is_reset_in_progress(pf->state)) { |
|---|
| 5131 | + dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); |
|---|
| 5132 | + return -EBUSY; |
|---|
| 5133 | + } |
|---|
| 5134 | + |
|---|
| 5135 | + /* Multiple features can be changed in one call so keep features in |
|---|
| 5136 | + * separate if/else statements to guarantee each feature is checked |
|---|
| 5137 | + */ |
|---|
| 5138 | + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) |
|---|
| 5139 | + ret = ice_vsi_manage_rss_lut(vsi, true); |
|---|
| 5140 | + else if (!(features & NETIF_F_RXHASH) && |
|---|
| 5141 | + netdev->features & NETIF_F_RXHASH) |
|---|
| 5142 | + ret = ice_vsi_manage_rss_lut(vsi, false); |
|---|
| 3827 | 5143 | |
|---|
| 3828 | 5144 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && |
|---|
| 3829 | 5145 | !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
|---|
| .. | .. |
|---|
| 3831 | 5147 | else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && |
|---|
| 3832 | 5148 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
|---|
| 3833 | 5149 | ret = ice_vsi_manage_vlan_stripping(vsi, false); |
|---|
| 3834 | | - else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && |
|---|
| 3835 | | - !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) |
|---|
| 5150 | + |
|---|
| 5151 | + if ((features & NETIF_F_HW_VLAN_CTAG_TX) && |
|---|
| 5152 | + !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) |
|---|
| 3836 | 5153 | ret = ice_vsi_manage_vlan_insertion(vsi); |
|---|
| 3837 | 5154 | else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && |
|---|
| 3838 | 5155 | (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) |
|---|
| 3839 | 5156 | ret = ice_vsi_manage_vlan_insertion(vsi); |
|---|
| 3840 | 5157 | |
|---|
| 5158 | + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
|---|
| 5159 | + !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
|---|
| 5160 | + ret = ice_cfg_vlan_pruning(vsi, true, false); |
|---|
| 5161 | + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
|---|
| 5162 | + (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
|---|
| 5163 | + ret = ice_cfg_vlan_pruning(vsi, false, false); |
|---|
| 5164 | + |
|---|
| 5165 | + if ((features & NETIF_F_NTUPLE) && |
|---|
| 5166 | + !(netdev->features & NETIF_F_NTUPLE)) { |
|---|
| 5167 | + ice_vsi_manage_fdir(vsi, true); |
|---|
| 5168 | + ice_init_arfs(vsi); |
|---|
| 5169 | + } else if (!(features & NETIF_F_NTUPLE) && |
|---|
| 5170 | + (netdev->features & NETIF_F_NTUPLE)) { |
|---|
| 5171 | + ice_vsi_manage_fdir(vsi, false); |
|---|
| 5172 | + ice_clear_arfs(vsi); |
|---|
| 5173 | + } |
|---|
| 5174 | + |
|---|
| 3841 | 5175 | return ret; |
|---|
| 3842 | 5176 | } |
|---|
| 3843 | 5177 | |
|---|
| 3844 | 5178 | /** |
|---|
| 3845 | | - * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI |
|---|
| 3846 | | - * @vsi: VSI to setup vlan properties for |
|---|
| 5179 | + * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI |
|---|
| 5180 | + * @vsi: VSI to setup VLAN properties for |
|---|
| 3847 | 5181 | */ |
|---|
| 3848 | 5182 | static int ice_vsi_vlan_setup(struct ice_vsi *vsi) |
|---|
| 3849 | 5183 | { |
|---|
| .. | .. |
|---|
| 3858 | 5192 | } |
|---|
| 3859 | 5193 | |
|---|
| 3860 | 5194 | /** |
|---|
| 3861 | | - * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up |
|---|
| 3862 | | - * @vsi: the VSI being brought back up |
|---|
| 3863 | | - */ |
|---|
| 3864 | | -static int ice_restore_vlan(struct ice_vsi *vsi) |
|---|
| 3865 | | -{ |
|---|
| 3866 | | - int err; |
|---|
| 3867 | | - u16 vid; |
|---|
| 3868 | | - |
|---|
| 3869 | | - if (!vsi->netdev) |
|---|
| 3870 | | - return -EINVAL; |
|---|
| 3871 | | - |
|---|
| 3872 | | - err = ice_vsi_vlan_setup(vsi); |
|---|
| 3873 | | - if (err) |
|---|
| 3874 | | - return err; |
|---|
| 3875 | | - |
|---|
| 3876 | | - for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { |
|---|
| 3877 | | - err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); |
|---|
| 3878 | | - if (err) |
|---|
| 3879 | | - break; |
|---|
| 3880 | | - } |
|---|
| 3881 | | - |
|---|
| 3882 | | - return err; |
|---|
| 3883 | | -} |
|---|
| 3884 | | - |
|---|
| 3885 | | -/** |
|---|
| 3886 | | - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance |
|---|
| 3887 | | - * @ring: The Tx ring to configure |
|---|
| 3888 | | - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized |
|---|
| 3889 | | - * @pf_q: queue index in the PF space |
|---|
| 3890 | | - * |
|---|
| 3891 | | - * Configure the Tx descriptor ring in TLAN context. |
|---|
| 3892 | | - */ |
|---|
| 3893 | | -static void |
|---|
| 3894 | | -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) |
|---|
| 3895 | | -{ |
|---|
| 3896 | | - struct ice_vsi *vsi = ring->vsi; |
|---|
| 3897 | | - struct ice_hw *hw = &vsi->back->hw; |
|---|
| 3898 | | - |
|---|
| 3899 | | - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; |
|---|
| 3900 | | - |
|---|
| 3901 | | - tlan_ctx->port_num = vsi->port_info->lport; |
|---|
| 3902 | | - |
|---|
| 3903 | | - /* Transmit Queue Length */ |
|---|
| 3904 | | - tlan_ctx->qlen = ring->count; |
|---|
| 3905 | | - |
|---|
| 3906 | | - /* PF number */ |
|---|
| 3907 | | - tlan_ctx->pf_num = hw->pf_id; |
|---|
| 3908 | | - |
|---|
| 3909 | | - /* queue belongs to a specific VSI type |
|---|
| 3910 | | - * VF / VM index should be programmed per vmvf_type setting: |
|---|
| 3911 | | - * for vmvf_type = VF, it is VF number between 0-256 |
|---|
| 3912 | | - * for vmvf_type = VM, it is VM number between 0-767 |
|---|
| 3913 | | - * for PF or EMP this field should be set to zero |
|---|
| 3914 | | - */ |
|---|
| 3915 | | - switch (vsi->type) { |
|---|
| 3916 | | - case ICE_VSI_PF: |
|---|
| 3917 | | - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; |
|---|
| 3918 | | - break; |
|---|
| 3919 | | - default: |
|---|
| 3920 | | - return; |
|---|
| 3921 | | - } |
|---|
| 3922 | | - |
|---|
| 3923 | | - /* make sure the context is associated with the right VSI */ |
|---|
| 3924 | | - tlan_ctx->src_vsi = vsi->vsi_num; |
|---|
| 3925 | | - |
|---|
| 3926 | | - tlan_ctx->tso_ena = ICE_TX_LEGACY; |
|---|
| 3927 | | - tlan_ctx->tso_qnum = pf_q; |
|---|
| 3928 | | - |
|---|
| 3929 | | - /* Legacy or Advanced Host Interface: |
|---|
| 3930 | | - * 0: Advanced Host Interface |
|---|
| 3931 | | - * 1: Legacy Host Interface |
|---|
| 3932 | | - */ |
|---|
| 3933 | | - tlan_ctx->legacy_int = ICE_TX_LEGACY; |
|---|
| 3934 | | -} |
|---|
| 3935 | | - |
|---|
| 3936 | | -/** |
|---|
| 3937 | | - * ice_vsi_cfg_txqs - Configure the VSI for Tx |
|---|
| 3938 | | - * @vsi: the VSI being configured |
|---|
| 3939 | | - * |
|---|
| 3940 | | - * Return 0 on success and a negative value on error |
|---|
| 3941 | | - * Configure the Tx VSI for operation. |
|---|
| 3942 | | - */ |
|---|
| 3943 | | -static int ice_vsi_cfg_txqs(struct ice_vsi *vsi) |
|---|
| 3944 | | -{ |
|---|
| 3945 | | - struct ice_aqc_add_tx_qgrp *qg_buf; |
|---|
| 3946 | | - struct ice_aqc_add_txqs_perq *txq; |
|---|
| 3947 | | - struct ice_pf *pf = vsi->back; |
|---|
| 3948 | | - enum ice_status status; |
|---|
| 3949 | | - u16 buf_len, i, pf_q; |
|---|
| 3950 | | - int err = 0, tc = 0; |
|---|
| 3951 | | - u8 num_q_grps; |
|---|
| 3952 | | - |
|---|
| 3953 | | - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); |
|---|
| 3954 | | - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); |
|---|
| 3955 | | - if (!qg_buf) |
|---|
| 3956 | | - return -ENOMEM; |
|---|
| 3957 | | - |
|---|
| 3958 | | - if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { |
|---|
| 3959 | | - err = -EINVAL; |
|---|
| 3960 | | - goto err_cfg_txqs; |
|---|
| 3961 | | - } |
|---|
| 3962 | | - qg_buf->num_txqs = 1; |
|---|
| 3963 | | - num_q_grps = 1; |
|---|
| 3964 | | - |
|---|
| 3965 | | - /* set up and configure the tx queues */ |
|---|
| 3966 | | - ice_for_each_txq(vsi, i) { |
|---|
| 3967 | | - struct ice_tlan_ctx tlan_ctx = { 0 }; |
|---|
| 3968 | | - |
|---|
| 3969 | | - pf_q = vsi->txq_map[i]; |
|---|
| 3970 | | - ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); |
|---|
| 3971 | | - /* copy context contents into the qg_buf */ |
|---|
| 3972 | | - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); |
|---|
| 3973 | | - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, |
|---|
| 3974 | | - ice_tlan_ctx_info); |
|---|
| 3975 | | - |
|---|
| 3976 | | - /* init queue specific tail reg. It is referred as transmit |
|---|
| 3977 | | - * comm scheduler queue doorbell. |
|---|
| 3978 | | - */ |
|---|
| 3979 | | - vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); |
|---|
| 3980 | | - status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, |
|---|
| 3981 | | - num_q_grps, qg_buf, buf_len, NULL); |
|---|
| 3982 | | - if (status) { |
|---|
| 3983 | | - dev_err(&vsi->back->pdev->dev, |
|---|
| 3984 | | - "Failed to set LAN Tx queue context, error: %d\n", |
|---|
| 3985 | | - status); |
|---|
| 3986 | | - err = -ENODEV; |
|---|
| 3987 | | - goto err_cfg_txqs; |
|---|
| 3988 | | - } |
|---|
| 3989 | | - |
|---|
| 3990 | | - /* Add Tx Queue TEID into the VSI tx ring from the response |
|---|
| 3991 | | - * This will complete configuring and enabling the queue. |
|---|
| 3992 | | - */ |
|---|
| 3993 | | - txq = &qg_buf->txqs[0]; |
|---|
| 3994 | | - if (pf_q == le16_to_cpu(txq->txq_id)) |
|---|
| 3995 | | - vsi->tx_rings[i]->txq_teid = |
|---|
| 3996 | | - le32_to_cpu(txq->q_teid); |
|---|
| 3997 | | - } |
|---|
| 3998 | | -err_cfg_txqs: |
|---|
| 3999 | | - devm_kfree(&pf->pdev->dev, qg_buf); |
|---|
| 4000 | | - return err; |
|---|
| 4001 | | -} |
|---|
| 4002 | | - |
|---|
| 4003 | | -/** |
|---|
| 4004 | | - * ice_setup_rx_ctx - Configure a receive ring context |
|---|
| 4005 | | - * @ring: The Rx ring to configure |
|---|
| 4006 | | - * |
|---|
| 4007 | | - * Configure the Rx descriptor ring in RLAN context. |
|---|
| 4008 | | - */ |
|---|
| 4009 | | -static int ice_setup_rx_ctx(struct ice_ring *ring) |
|---|
| 4010 | | -{ |
|---|
| 4011 | | - struct ice_vsi *vsi = ring->vsi; |
|---|
| 4012 | | - struct ice_hw *hw = &vsi->back->hw; |
|---|
| 4013 | | - u32 rxdid = ICE_RXDID_FLEX_NIC; |
|---|
| 4014 | | - struct ice_rlan_ctx rlan_ctx; |
|---|
| 4015 | | - u32 regval; |
|---|
| 4016 | | - u16 pf_q; |
|---|
| 4017 | | - int err; |
|---|
| 4018 | | - |
|---|
| 4019 | | - /* what is RX queue number in global space of 2K rx queues */ |
|---|
| 4020 | | - pf_q = vsi->rxq_map[ring->q_index]; |
|---|
| 4021 | | - |
|---|
| 4022 | | - /* clear the context structure first */ |
|---|
| 4023 | | - memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
|---|
| 4024 | | - |
|---|
| 4025 | | - rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
|---|
| 4026 | | - |
|---|
| 4027 | | - rlan_ctx.qlen = ring->count; |
|---|
| 4028 | | - |
|---|
| 4029 | | - /* Receive Packet Data Buffer Size. |
|---|
| 4030 | | - * The Packet Data Buffer Size is defined in 128 byte units. |
|---|
| 4031 | | - */ |
|---|
| 4032 | | - rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; |
|---|
| 4033 | | - |
|---|
| 4034 | | - /* use 32 byte descriptors */ |
|---|
| 4035 | | - rlan_ctx.dsize = 1; |
|---|
| 4036 | | - |
|---|
| 4037 | | - /* Strip the Ethernet CRC bytes before the packet is posted to host |
|---|
| 4038 | | - * memory. |
|---|
| 4039 | | - */ |
|---|
| 4040 | | - rlan_ctx.crcstrip = 1; |
|---|
| 4041 | | - |
|---|
| 4042 | | - /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ |
|---|
| 4043 | | - rlan_ctx.l2tsel = 1; |
|---|
| 4044 | | - |
|---|
| 4045 | | - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; |
|---|
| 4046 | | - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; |
|---|
| 4047 | | - rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; |
|---|
| 4048 | | - |
|---|
| 4049 | | - /* This controls whether VLAN is stripped from inner headers |
|---|
| 4050 | | - * The VLAN in the inner L2 header is stripped to the receive |
|---|
| 4051 | | - * descriptor if enabled by this flag. |
|---|
| 4052 | | - */ |
|---|
| 4053 | | - rlan_ctx.showiv = 0; |
|---|
| 4054 | | - |
|---|
| 4055 | | - /* Max packet size for this queue - must not be set to a larger value |
|---|
| 4056 | | - * than 5 x DBUF |
|---|
| 4057 | | - */ |
|---|
| 4058 | | - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, |
|---|
| 4059 | | - ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); |
|---|
| 4060 | | - |
|---|
| 4061 | | - /* Rx queue threshold in units of 64 */ |
|---|
| 4062 | | - rlan_ctx.lrxqthresh = 1; |
|---|
| 4063 | | - |
|---|
| 4064 | | - /* Enable Flexible Descriptors in the queue context which |
|---|
| 4065 | | - * allows this driver to select a specific receive descriptor format |
|---|
| 4066 | | - */ |
|---|
| 4067 | | - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); |
|---|
| 4068 | | - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & |
|---|
| 4069 | | - QRXFLXP_CNTXT_RXDID_IDX_M; |
|---|
| 4070 | | - |
|---|
| 4071 | | - /* increasing context priority to pick up profile id; |
|---|
| 4072 | | - * default is 0x01; setting to 0x03 to ensure profile |
|---|
| 4073 | | - * is programming if prev context is of same priority |
|---|
| 4074 | | - */ |
|---|
| 4075 | | - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & |
|---|
| 4076 | | - QRXFLXP_CNTXT_RXDID_PRIO_M; |
|---|
| 4077 | | - |
|---|
| 4078 | | - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); |
|---|
| 4079 | | - |
|---|
| 4080 | | - /* Absolute queue number out of 2K needs to be passed */ |
|---|
| 4081 | | - err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); |
|---|
| 4082 | | - if (err) { |
|---|
| 4083 | | - dev_err(&vsi->back->pdev->dev, |
|---|
| 4084 | | - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", |
|---|
| 4085 | | - pf_q, err); |
|---|
| 4086 | | - return -EIO; |
|---|
| 4087 | | - } |
|---|
| 4088 | | - |
|---|
| 4089 | | - /* init queue specific tail register */ |
|---|
| 4090 | | - ring->tail = hw->hw_addr + QRX_TAIL(pf_q); |
|---|
| 4091 | | - writel(0, ring->tail); |
|---|
| 4092 | | - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); |
|---|
| 4093 | | - |
|---|
| 4094 | | - return 0; |
|---|
| 4095 | | -} |
|---|
| 4096 | | - |
|---|
| 4097 | | -/** |
|---|
| 4098 | | - * ice_vsi_cfg_rxqs - Configure the VSI for Rx |
|---|
| 4099 | | - * @vsi: the VSI being configured |
|---|
| 4100 | | - * |
|---|
| 4101 | | - * Return 0 on success and a negative value on error |
|---|
| 4102 | | - * Configure the Rx VSI for operation. |
|---|
| 4103 | | - */ |
|---|
| 4104 | | -static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) |
|---|
| 4105 | | -{ |
|---|
| 4106 | | - int err = 0; |
|---|
| 4107 | | - u16 i; |
|---|
| 4108 | | - |
|---|
| 4109 | | - if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) |
|---|
| 4110 | | - vsi->max_frame = vsi->netdev->mtu + |
|---|
| 4111 | | - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
|---|
| 4112 | | - else |
|---|
| 4113 | | - vsi->max_frame = ICE_RXBUF_2048; |
|---|
| 4114 | | - |
|---|
| 4115 | | - vsi->rx_buf_len = ICE_RXBUF_2048; |
|---|
| 4116 | | - /* set up individual rings */ |
|---|
| 4117 | | - for (i = 0; i < vsi->num_rxq && !err; i++) |
|---|
| 4118 | | - err = ice_setup_rx_ctx(vsi->rx_rings[i]); |
|---|
| 4119 | | - |
|---|
| 4120 | | - if (err) { |
|---|
| 4121 | | - dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); |
|---|
| 4122 | | - return -EIO; |
|---|
| 4123 | | - } |
|---|
| 4124 | | - return err; |
|---|
| 4125 | | -} |
|---|
| 4126 | | - |
|---|
| 4127 | | -/** |
|---|
| 4128 | 5195 | * ice_vsi_cfg - Setup the VSI |
|---|
| 4129 | 5196 | * @vsi: the VSI being configured |
|---|
| 4130 | 5197 | * |
|---|
| 4131 | 5198 | * Return 0 on success and negative value on error |
|---|
| 4132 | 5199 | */ |
|---|
| 4133 | | -static int ice_vsi_cfg(struct ice_vsi *vsi) |
|---|
| 5200 | +int ice_vsi_cfg(struct ice_vsi *vsi) |
|---|
| 4134 | 5201 | { |
|---|
| 4135 | 5202 | int err; |
|---|
| 4136 | 5203 | |
|---|
| 4137 | | - if (vsi->netdev) { |
|---|
| 5204 | + if (vsi->netdev && vsi->type == ICE_VSI_PF) { |
|---|
| 4138 | 5205 | ice_set_rx_mode(vsi->netdev); |
|---|
| 4139 | | - err = ice_restore_vlan(vsi); |
|---|
| 5206 | + |
|---|
| 5207 | + err = ice_vsi_vlan_setup(vsi); |
|---|
| 4140 | 5208 | if (err) |
|---|
| 4141 | 5209 | return err; |
|---|
| 4142 | 5210 | } |
|---|
| 5211 | + ice_vsi_cfg_dcb_rings(vsi); |
|---|
| 4143 | 5212 | |
|---|
| 4144 | | - err = ice_vsi_cfg_txqs(vsi); |
|---|
| 5213 | + err = ice_vsi_cfg_lan_txqs(vsi); |
|---|
| 5214 | + if (!err && ice_is_xdp_ena_vsi(vsi)) |
|---|
| 5215 | + err = ice_vsi_cfg_xdp_txqs(vsi); |
|---|
| 4145 | 5216 | if (!err) |
|---|
| 4146 | 5217 | err = ice_vsi_cfg_rxqs(vsi); |
|---|
| 4147 | 5218 | |
|---|
| 4148 | 5219 | return err; |
|---|
| 4149 | | -} |
|---|
| 4150 | | - |
|---|
| 4151 | | -/** |
|---|
| 4152 | | - * ice_vsi_stop_tx_rings - Disable Tx rings |
|---|
| 4153 | | - * @vsi: the VSI being configured |
|---|
| 4154 | | - */ |
|---|
| 4155 | | -static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) |
|---|
| 4156 | | -{ |
|---|
| 4157 | | - struct ice_pf *pf = vsi->back; |
|---|
| 4158 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 4159 | | - enum ice_status status; |
|---|
| 4160 | | - u32 *q_teids, val; |
|---|
| 4161 | | - u16 *q_ids, i; |
|---|
| 4162 | | - int err = 0; |
|---|
| 4163 | | - |
|---|
| 4164 | | - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) |
|---|
| 4165 | | - return -EINVAL; |
|---|
| 4166 | | - |
|---|
| 4167 | | - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), |
|---|
| 4168 | | - GFP_KERNEL); |
|---|
| 4169 | | - if (!q_teids) |
|---|
| 4170 | | - return -ENOMEM; |
|---|
| 4171 | | - |
|---|
| 4172 | | - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), |
|---|
| 4173 | | - GFP_KERNEL); |
|---|
| 4174 | | - if (!q_ids) { |
|---|
| 4175 | | - err = -ENOMEM; |
|---|
| 4176 | | - goto err_alloc_q_ids; |
|---|
| 4177 | | - } |
|---|
| 4178 | | - |
|---|
| 4179 | | - /* set up the tx queue list to be disabled */ |
|---|
| 4180 | | - ice_for_each_txq(vsi, i) { |
|---|
| 4181 | | - u16 v_idx; |
|---|
| 4182 | | - |
|---|
| 4183 | | - if (!vsi->tx_rings || !vsi->tx_rings[i]) { |
|---|
| 4184 | | - err = -EINVAL; |
|---|
| 4185 | | - goto err_out; |
|---|
| 4186 | | - } |
|---|
| 4187 | | - |
|---|
| 4188 | | - q_ids[i] = vsi->txq_map[i]; |
|---|
| 4189 | | - q_teids[i] = vsi->tx_rings[i]->txq_teid; |
|---|
| 4190 | | - |
|---|
| 4191 | | - /* clear cause_ena bit for disabled queues */ |
|---|
| 4192 | | - val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); |
|---|
| 4193 | | - val &= ~QINT_TQCTL_CAUSE_ENA_M; |
|---|
| 4194 | | - wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); |
|---|
| 4195 | | - |
|---|
| 4196 | | - /* software is expected to wait for 100 ns */ |
|---|
| 4197 | | - ndelay(100); |
|---|
| 4198 | | - |
|---|
| 4199 | | - /* trigger a software interrupt for the vector associated to |
|---|
| 4200 | | - * the queue to schedule napi handler |
|---|
| 4201 | | - */ |
|---|
| 4202 | | - v_idx = vsi->tx_rings[i]->q_vector->v_idx; |
|---|
| 4203 | | - wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), |
|---|
| 4204 | | - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); |
|---|
| 4205 | | - } |
|---|
| 4206 | | - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, |
|---|
| 4207 | | - NULL); |
|---|
| 4208 | | - /* if the disable queue command was exercised during an active reset |
|---|
| 4209 | | - * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as |
|---|
| 4210 | | - * the reset operation disables queues at the hardware level anyway. |
|---|
| 4211 | | - */ |
|---|
| 4212 | | - if (status == ICE_ERR_RESET_ONGOING) { |
|---|
| 4213 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 4214 | | - "Reset in progress. LAN Tx queues already disabled\n"); |
|---|
| 4215 | | - } else if (status) { |
|---|
| 4216 | | - dev_err(&pf->pdev->dev, |
|---|
| 4217 | | - "Failed to disable LAN Tx queues, error: %d\n", |
|---|
| 4218 | | - status); |
|---|
| 4219 | | - err = -ENODEV; |
|---|
| 4220 | | - } |
|---|
| 4221 | | - |
|---|
| 4222 | | -err_out: |
|---|
| 4223 | | - devm_kfree(&pf->pdev->dev, q_ids); |
|---|
| 4224 | | - |
|---|
| 4225 | | -err_alloc_q_ids: |
|---|
| 4226 | | - devm_kfree(&pf->pdev->dev, q_teids); |
|---|
| 4227 | | - |
|---|
| 4228 | | - return err; |
|---|
| 4229 | | -} |
|---|
| 4230 | | - |
|---|
| 4231 | | -/** |
|---|
| 4232 | | - * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled |
|---|
| 4233 | | - * @pf: the PF being configured |
|---|
| 4234 | | - * @pf_q: the PF queue |
|---|
| 4235 | | - * @ena: enable or disable state of the queue |
|---|
| 4236 | | - * |
|---|
| 4237 | | - * This routine will wait for the given Rx queue of the PF to reach the |
|---|
| 4238 | | - * enabled or disabled state. |
|---|
| 4239 | | - * Returns -ETIMEDOUT in case of failing to reach the requested state after |
|---|
| 4240 | | - * multiple retries; else will return 0 in case of success. |
|---|
| 4241 | | - */ |
|---|
| 4242 | | -static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) |
|---|
| 4243 | | -{ |
|---|
| 4244 | | - int i; |
|---|
| 4245 | | - |
|---|
| 4246 | | - for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { |
|---|
| 4247 | | - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); |
|---|
| 4248 | | - |
|---|
| 4249 | | - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) |
|---|
| 4250 | | - break; |
|---|
| 4251 | | - |
|---|
| 4252 | | - usleep_range(10, 20); |
|---|
| 4253 | | - } |
|---|
| 4254 | | - if (i >= ICE_Q_WAIT_RETRY_LIMIT) |
|---|
| 4255 | | - return -ETIMEDOUT; |
|---|
| 4256 | | - |
|---|
| 4257 | | - return 0; |
|---|
| 4258 | | -} |
|---|
| 4259 | | - |
|---|
| 4260 | | -/** |
|---|
| 4261 | | - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings |
|---|
| 4262 | | - * @vsi: the VSI being configured |
|---|
| 4263 | | - * @ena: start or stop the rx rings |
|---|
| 4264 | | - */ |
|---|
| 4265 | | -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) |
|---|
| 4266 | | -{ |
|---|
| 4267 | | - struct ice_pf *pf = vsi->back; |
|---|
| 4268 | | - struct ice_hw *hw = &pf->hw; |
|---|
| 4269 | | - int i, j, ret = 0; |
|---|
| 4270 | | - |
|---|
| 4271 | | - for (i = 0; i < vsi->num_rxq; i++) { |
|---|
| 4272 | | - int pf_q = vsi->rxq_map[i]; |
|---|
| 4273 | | - u32 rx_reg; |
|---|
| 4274 | | - |
|---|
| 4275 | | - for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { |
|---|
| 4276 | | - rx_reg = rd32(hw, QRX_CTRL(pf_q)); |
|---|
| 4277 | | - if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == |
|---|
| 4278 | | - ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) |
|---|
| 4279 | | - break; |
|---|
| 4280 | | - usleep_range(1000, 2000); |
|---|
| 4281 | | - } |
|---|
| 4282 | | - |
|---|
| 4283 | | - /* Skip if the queue is already in the requested state */ |
|---|
| 4284 | | - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) |
|---|
| 4285 | | - continue; |
|---|
| 4286 | | - |
|---|
| 4287 | | - /* turn on/off the queue */ |
|---|
| 4288 | | - if (ena) |
|---|
| 4289 | | - rx_reg |= QRX_CTRL_QENA_REQ_M; |
|---|
| 4290 | | - else |
|---|
| 4291 | | - rx_reg &= ~QRX_CTRL_QENA_REQ_M; |
|---|
| 4292 | | - wr32(hw, QRX_CTRL(pf_q), rx_reg); |
|---|
| 4293 | | - |
|---|
| 4294 | | - /* wait for the change to finish */ |
|---|
| 4295 | | - ret = ice_pf_rxq_wait(pf, pf_q, ena); |
|---|
| 4296 | | - if (ret) { |
|---|
| 4297 | | - dev_err(&pf->pdev->dev, |
|---|
| 4298 | | - "VSI idx %d Rx ring %d %sable timeout\n", |
|---|
| 4299 | | - vsi->idx, pf_q, (ena ? "en" : "dis")); |
|---|
| 4300 | | - break; |
|---|
| 4301 | | - } |
|---|
| 4302 | | - } |
|---|
| 4303 | | - |
|---|
| 4304 | | - return ret; |
|---|
| 4305 | | -} |
|---|
| 4306 | | - |
|---|
| 4307 | | -/** |
|---|
| 4308 | | - * ice_vsi_start_rx_rings - start VSI's rx rings |
|---|
| 4309 | | - * @vsi: the VSI whose rings are to be started |
|---|
| 4310 | | - * |
|---|
| 4311 | | - * Returns 0 on success and a negative value on error |
|---|
| 4312 | | - */ |
|---|
| 4313 | | -static int ice_vsi_start_rx_rings(struct ice_vsi *vsi) |
|---|
| 4314 | | -{ |
|---|
| 4315 | | - return ice_vsi_ctrl_rx_rings(vsi, true); |
|---|
| 4316 | | -} |
|---|
| 4317 | | - |
|---|
| 4318 | | -/** |
|---|
| 4319 | | - * ice_vsi_stop_rx_rings - stop VSI's rx rings |
|---|
| 4320 | | - * @vsi: the VSI |
|---|
| 4321 | | - * |
|---|
| 4322 | | - * Returns 0 on success and a negative value on error |
|---|
| 4323 | | - */ |
|---|
| 4324 | | -static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) |
|---|
| 4325 | | -{ |
|---|
| 4326 | | - return ice_vsi_ctrl_rx_rings(vsi, false); |
|---|
| 4327 | | -} |
|---|
| 4328 | | - |
|---|
| 4329 | | -/** |
|---|
| 4330 | | - * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings |
|---|
| 4331 | | - * @vsi: the VSI |
|---|
| 4332 | | - * Returns 0 on success and a negative value on error |
|---|
| 4333 | | - */ |
|---|
| 4334 | | -static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi) |
|---|
| 4335 | | -{ |
|---|
| 4336 | | - int err_tx, err_rx; |
|---|
| 4337 | | - |
|---|
| 4338 | | - err_tx = ice_vsi_stop_tx_rings(vsi); |
|---|
| 4339 | | - if (err_tx) |
|---|
| 4340 | | - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n"); |
|---|
| 4341 | | - |
|---|
| 4342 | | - err_rx = ice_vsi_stop_rx_rings(vsi); |
|---|
| 4343 | | - if (err_rx) |
|---|
| 4344 | | - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n"); |
|---|
| 4345 | | - |
|---|
| 4346 | | - if (err_tx || err_rx) |
|---|
| 4347 | | - return -EIO; |
|---|
| 4348 | | - |
|---|
| 4349 | | - return 0; |
|---|
| 4350 | 5220 | } |
|---|
| 4351 | 5221 | |
|---|
| 4352 | 5222 | /** |
|---|
| .. | .. |
|---|
| 4360 | 5230 | if (!vsi->netdev) |
|---|
| 4361 | 5231 | return; |
|---|
| 4362 | 5232 | |
|---|
| 4363 | | - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { |
|---|
| 5233 | + ice_for_each_q_vector(vsi, q_idx) { |
|---|
| 4364 | 5234 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
|---|
| 4365 | 5235 | |
|---|
| 4366 | 5236 | if (q_vector->rx.ring || q_vector->tx.ring) |
|---|
| .. | .. |
|---|
| 4379 | 5249 | struct ice_pf *pf = vsi->back; |
|---|
| 4380 | 5250 | int err; |
|---|
| 4381 | 5251 | |
|---|
| 4382 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
|---|
| 4383 | | - ice_vsi_cfg_msix(vsi); |
|---|
| 4384 | | - else |
|---|
| 4385 | | - return -ENOTSUPP; |
|---|
| 5252 | + ice_vsi_cfg_msix(vsi); |
|---|
| 4386 | 5253 | |
|---|
| 4387 | 5254 | /* Enable only Rx rings, Tx rings were enabled by the FW when the |
|---|
| 4388 | 5255 | * Tx queue group list was configured and the context bits were |
|---|
| 4389 | 5256 | * programmed using ice_vsi_cfg_txqs |
|---|
| 4390 | 5257 | */ |
|---|
| 4391 | | - err = ice_vsi_start_rx_rings(vsi); |
|---|
| 5258 | + err = ice_vsi_start_all_rx_rings(vsi); |
|---|
| 4392 | 5259 | if (err) |
|---|
| 4393 | 5260 | return err; |
|---|
| 4394 | 5261 | |
|---|
| .. | .. |
|---|
| 4398 | 5265 | |
|---|
| 4399 | 5266 | if (vsi->port_info && |
|---|
| 4400 | 5267 | (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && |
|---|
| 4401 | | - vsi->netdev) { |
|---|
| 5268 | + vsi->netdev && vsi->type == ICE_VSI_PF) { |
|---|
| 4402 | 5269 | ice_print_link_msg(vsi, true); |
|---|
| 4403 | 5270 | netif_tx_start_all_queues(vsi->netdev); |
|---|
| 4404 | 5271 | netif_carrier_on(vsi->netdev); |
|---|
| 4405 | 5272 | } |
|---|
| 4406 | 5273 | |
|---|
| 4407 | | - /* clear this now, and the first stats read will be used as baseline */ |
|---|
| 4408 | | - vsi->stat_offsets_loaded = false; |
|---|
| 5274 | + /* Perform an initial read of the statistics registers now to |
|---|
| 5275 | + * set the baseline so counters are ready when interface is up |
|---|
| 5276 | + */ |
|---|
| 5277 | + ice_update_eth_stats(vsi); |
|---|
| 4409 | 5278 | |
|---|
| 4410 | | - ice_service_task_schedule(pf); |
|---|
| 5279 | + if (vsi->type == ICE_VSI_PF) |
|---|
| 5280 | + ice_service_task_schedule(pf); |
|---|
| 4411 | 5281 | |
|---|
| 4412 | | - return err; |
|---|
| 5282 | + return 0; |
|---|
| 4413 | 5283 | } |
|---|
| 4414 | 5284 | |
|---|
| 4415 | 5285 | /** |
|---|
| .. | .. |
|---|
| 4436 | 5306 | * This function fetches stats from the ring considering the atomic operations |
|---|
| 4437 | 5307 | * that needs to be performed to read u64 values in 32 bit machine. |
|---|
| 4438 | 5308 | */ |
|---|
| 4439 | | -static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, |
|---|
| 4440 | | - u64 *bytes) |
|---|
| 5309 | +static void |
|---|
| 5310 | +ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) |
|---|
| 4441 | 5311 | { |
|---|
| 4442 | 5312 | unsigned int start; |
|---|
| 4443 | 5313 | *pkts = 0; |
|---|
| .. | .. |
|---|
| 4453 | 5323 | } |
|---|
| 4454 | 5324 | |
|---|
| 4455 | 5325 | /** |
|---|
| 4456 | | - * ice_stat_update40 - read 40 bit stat from the chip and update stat values |
|---|
| 4457 | | - * @hw: ptr to the hardware info |
|---|
| 4458 | | - * @hireg: high 32 bit HW register to read from |
|---|
| 4459 | | - * @loreg: low 32 bit HW register to read from |
|---|
| 4460 | | - * @prev_stat_loaded: bool to specify if previous stats are loaded |
|---|
| 4461 | | - * @prev_stat: ptr to previous loaded stat value |
|---|
| 4462 | | - * @cur_stat: ptr to current stat value |
|---|
| 4463 | | - */ |
|---|
| 4464 | | -static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, |
|---|
| 4465 | | - bool prev_stat_loaded, u64 *prev_stat, |
|---|
| 4466 | | - u64 *cur_stat) |
|---|
| 4467 | | -{ |
|---|
| 4468 | | - u64 new_data; |
|---|
| 4469 | | - |
|---|
| 4470 | | - new_data = rd32(hw, loreg); |
|---|
| 4471 | | - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; |
|---|
| 4472 | | - |
|---|
| 4473 | | - /* device stats are not reset at PFR, they likely will not be zeroed |
|---|
| 4474 | | - * when the driver starts. So save the first values read and use them as |
|---|
| 4475 | | - * offsets to be subtracted from the raw values in order to report stats |
|---|
| 4476 | | - * that count from zero. |
|---|
| 4477 | | - */ |
|---|
| 4478 | | - if (!prev_stat_loaded) |
|---|
| 4479 | | - *prev_stat = new_data; |
|---|
| 4480 | | - if (likely(new_data >= *prev_stat)) |
|---|
| 4481 | | - *cur_stat = new_data - *prev_stat; |
|---|
| 4482 | | - else |
|---|
| 4483 | | - /* to manage the potential roll-over */ |
|---|
| 4484 | | - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; |
|---|
| 4485 | | - *cur_stat &= 0xFFFFFFFFFFULL; |
|---|
| 4486 | | -} |
|---|
| 4487 | | - |
|---|
| 4488 | | -/** |
|---|
| 4489 | | - * ice_stat_update32 - read 32 bit stat from the chip and update stat values |
|---|
| 4490 | | - * @hw: ptr to the hardware info |
|---|
| 4491 | | - * @reg: HW register to read from |
|---|
| 4492 | | - * @prev_stat_loaded: bool to specify if previous stats are loaded |
|---|
| 4493 | | - * @prev_stat: ptr to previous loaded stat value |
|---|
| 4494 | | - * @cur_stat: ptr to current stat value |
|---|
| 4495 | | - */ |
|---|
| 4496 | | -static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, |
|---|
| 4497 | | - u64 *prev_stat, u64 *cur_stat) |
|---|
| 4498 | | -{ |
|---|
| 4499 | | - u32 new_data; |
|---|
| 4500 | | - |
|---|
| 4501 | | - new_data = rd32(hw, reg); |
|---|
| 4502 | | - |
|---|
| 4503 | | - /* device stats are not reset at PFR, they likely will not be zeroed |
|---|
| 4504 | | - * when the driver starts. So save the first values read and use them as |
|---|
| 4505 | | - * offsets to be subtracted from the raw values in order to report stats |
|---|
| 4506 | | - * that count from zero. |
|---|
| 4507 | | - */ |
|---|
| 4508 | | - if (!prev_stat_loaded) |
|---|
| 4509 | | - *prev_stat = new_data; |
|---|
| 4510 | | - if (likely(new_data >= *prev_stat)) |
|---|
| 4511 | | - *cur_stat = new_data - *prev_stat; |
|---|
| 4512 | | - else |
|---|
| 4513 | | - /* to manage the potential roll-over */ |
|---|
| 4514 | | - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; |
|---|
| 4515 | | -} |
|---|
| 4516 | | - |
|---|
| 4517 | | -/** |
|---|
| 4518 | | - * ice_update_eth_stats - Update VSI-specific ethernet statistics counters |
|---|
| 5326 | + * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters |
|---|
| 4519 | 5327 | * @vsi: the VSI to be updated |
|---|
| 5328 | + * @rings: rings to work on |
|---|
| 5329 | + * @count: number of rings |
|---|
| 4520 | 5330 | */ |
|---|
| 4521 | | -static void ice_update_eth_stats(struct ice_vsi *vsi) |
|---|
| 5331 | +static void |
|---|
| 5332 | +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, |
|---|
| 5333 | + u16 count) |
|---|
| 4522 | 5334 | { |
|---|
| 4523 | | - struct ice_eth_stats *prev_es, *cur_es; |
|---|
| 4524 | | - struct ice_hw *hw = &vsi->back->hw; |
|---|
| 4525 | | - u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ |
|---|
| 5335 | + struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; |
|---|
| 5336 | + u16 i; |
|---|
| 4526 | 5337 | |
|---|
| 4527 | | - prev_es = &vsi->eth_stats_prev; |
|---|
| 4528 | | - cur_es = &vsi->eth_stats; |
|---|
| 5338 | + for (i = 0; i < count; i++) { |
|---|
| 5339 | + struct ice_ring *ring; |
|---|
| 5340 | + u64 pkts, bytes; |
|---|
| 4529 | 5341 | |
|---|
| 4530 | | - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), |
|---|
| 4531 | | - vsi->stat_offsets_loaded, &prev_es->rx_bytes, |
|---|
| 4532 | | - &cur_es->rx_bytes); |
|---|
| 4533 | | - |
|---|
| 4534 | | - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), |
|---|
| 4535 | | - vsi->stat_offsets_loaded, &prev_es->rx_unicast, |
|---|
| 4536 | | - &cur_es->rx_unicast); |
|---|
| 4537 | | - |
|---|
| 4538 | | - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), |
|---|
| 4539 | | - vsi->stat_offsets_loaded, &prev_es->rx_multicast, |
|---|
| 4540 | | - &cur_es->rx_multicast); |
|---|
| 4541 | | - |
|---|
| 4542 | | - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), |
|---|
| 4543 | | - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, |
|---|
| 4544 | | - &cur_es->rx_broadcast); |
|---|
| 4545 | | - |
|---|
| 4546 | | - ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, |
|---|
| 4547 | | - &prev_es->rx_discards, &cur_es->rx_discards); |
|---|
| 4548 | | - |
|---|
| 4549 | | - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), |
|---|
| 4550 | | - vsi->stat_offsets_loaded, &prev_es->tx_bytes, |
|---|
| 4551 | | - &cur_es->tx_bytes); |
|---|
| 4552 | | - |
|---|
| 4553 | | - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), |
|---|
| 4554 | | - vsi->stat_offsets_loaded, &prev_es->tx_unicast, |
|---|
| 4555 | | - &cur_es->tx_unicast); |
|---|
| 4556 | | - |
|---|
| 4557 | | - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), |
|---|
| 4558 | | - vsi->stat_offsets_loaded, &prev_es->tx_multicast, |
|---|
| 4559 | | - &cur_es->tx_multicast); |
|---|
| 4560 | | - |
|---|
| 4561 | | - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), |
|---|
| 4562 | | - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, |
|---|
| 4563 | | - &cur_es->tx_broadcast); |
|---|
| 4564 | | - |
|---|
| 4565 | | - ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, |
|---|
| 4566 | | - &prev_es->tx_errors, &cur_es->tx_errors); |
|---|
| 4567 | | - |
|---|
| 4568 | | - vsi->stat_offsets_loaded = true; |
|---|
| 5342 | + ring = READ_ONCE(rings[i]); |
|---|
| 5343 | + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); |
|---|
| 5344 | + vsi_stats->tx_packets += pkts; |
|---|
| 5345 | + vsi_stats->tx_bytes += bytes; |
|---|
| 5346 | + vsi->tx_restart += ring->tx_stats.restart_q; |
|---|
| 5347 | + vsi->tx_busy += ring->tx_stats.tx_busy; |
|---|
| 5348 | + vsi->tx_linearize += ring->tx_stats.tx_linearize; |
|---|
| 5349 | + } |
|---|
| 4569 | 5350 | } |
|---|
| 4570 | 5351 | |
|---|
| 4571 | 5352 | /** |
|---|
| .. | .. |
|---|
| 4591 | 5372 | vsi->tx_linearize = 0; |
|---|
| 4592 | 5373 | vsi->rx_buf_failed = 0; |
|---|
| 4593 | 5374 | vsi->rx_page_failed = 0; |
|---|
| 5375 | + vsi->rx_gro_dropped = 0; |
|---|
| 4594 | 5376 | |
|---|
| 4595 | 5377 | rcu_read_lock(); |
|---|
| 4596 | 5378 | |
|---|
| 4597 | 5379 | /* update Tx rings counters */ |
|---|
| 4598 | | - ice_for_each_txq(vsi, i) { |
|---|
| 4599 | | - ring = READ_ONCE(vsi->tx_rings[i]); |
|---|
| 4600 | | - ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); |
|---|
| 4601 | | - vsi_stats->tx_packets += pkts; |
|---|
| 4602 | | - vsi_stats->tx_bytes += bytes; |
|---|
| 4603 | | - vsi->tx_restart += ring->tx_stats.restart_q; |
|---|
| 4604 | | - vsi->tx_busy += ring->tx_stats.tx_busy; |
|---|
| 4605 | | - vsi->tx_linearize += ring->tx_stats.tx_linearize; |
|---|
| 4606 | | - } |
|---|
| 5380 | + ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); |
|---|
| 4607 | 5381 | |
|---|
| 4608 | 5382 | /* update Rx rings counters */ |
|---|
| 4609 | 5383 | ice_for_each_rxq(vsi, i) { |
|---|
| .. | .. |
|---|
| 4613 | 5387 | vsi_stats->rx_bytes += bytes; |
|---|
| 4614 | 5388 | vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; |
|---|
| 4615 | 5389 | vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; |
|---|
| 5390 | + vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; |
|---|
| 4616 | 5391 | } |
|---|
| 5392 | + |
|---|
| 5393 | + /* update XDP Tx rings counters */ |
|---|
| 5394 | + if (ice_is_xdp_ena_vsi(vsi)) |
|---|
| 5395 | + ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, |
|---|
| 5396 | + vsi->num_xdp_txq); |
|---|
| 4617 | 5397 | |
|---|
| 4618 | 5398 | rcu_read_unlock(); |
|---|
| 4619 | 5399 | } |
|---|
| .. | .. |
|---|
| 4622 | 5402 | * ice_update_vsi_stats - Update VSI stats counters |
|---|
| 4623 | 5403 | * @vsi: the VSI to be updated |
|---|
| 4624 | 5404 | */ |
|---|
| 4625 | | -static void ice_update_vsi_stats(struct ice_vsi *vsi) |
|---|
| 5405 | +void ice_update_vsi_stats(struct ice_vsi *vsi) |
|---|
| 4626 | 5406 | { |
|---|
| 4627 | 5407 | struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; |
|---|
| 4628 | 5408 | struct ice_eth_stats *cur_es = &vsi->eth_stats; |
|---|
| .. | .. |
|---|
| 4639 | 5419 | ice_update_eth_stats(vsi); |
|---|
| 4640 | 5420 | |
|---|
| 4641 | 5421 | cur_ns->tx_errors = cur_es->tx_errors; |
|---|
| 4642 | | - cur_ns->rx_dropped = cur_es->rx_discards; |
|---|
| 5422 | + cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; |
|---|
| 4643 | 5423 | cur_ns->tx_dropped = cur_es->tx_discards; |
|---|
| 4644 | 5424 | cur_ns->multicast = cur_es->rx_multicast; |
|---|
| 4645 | 5425 | |
|---|
| .. | .. |
|---|
| 4647 | 5427 | if (vsi->type == ICE_VSI_PF) { |
|---|
| 4648 | 5428 | cur_ns->rx_crc_errors = pf->stats.crc_errors; |
|---|
| 4649 | 5429 | cur_ns->rx_errors = pf->stats.crc_errors + |
|---|
| 4650 | | - pf->stats.illegal_bytes; |
|---|
| 5430 | + pf->stats.illegal_bytes + |
|---|
| 5431 | + pf->stats.rx_len_errors + |
|---|
| 5432 | + pf->stats.rx_undersize + |
|---|
| 5433 | + pf->hw_csum_rx_error + |
|---|
| 5434 | + pf->stats.rx_jabber + |
|---|
| 5435 | + pf->stats.rx_fragments + |
|---|
| 5436 | + pf->stats.rx_oversize; |
|---|
| 4651 | 5437 | cur_ns->rx_length_errors = pf->stats.rx_len_errors; |
|---|
| 5438 | + /* record drops from the port level */ |
|---|
| 5439 | + cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; |
|---|
| 4652 | 5440 | } |
|---|
| 4653 | 5441 | } |
|---|
| 4654 | 5442 | |
|---|
| .. | .. |
|---|
| 4656 | 5444 | * ice_update_pf_stats - Update PF port stats counters |
|---|
| 4657 | 5445 | * @pf: PF whose stats needs to be updated |
|---|
| 4658 | 5446 | */ |
|---|
| 4659 | | -static void ice_update_pf_stats(struct ice_pf *pf) |
|---|
| 5447 | +void ice_update_pf_stats(struct ice_pf *pf) |
|---|
| 4660 | 5448 | { |
|---|
| 4661 | 5449 | struct ice_hw_port_stats *prev_ps, *cur_ps; |
|---|
| 4662 | 5450 | struct ice_hw *hw = &pf->hw; |
|---|
| 4663 | | - u8 pf_id; |
|---|
| 5451 | + u16 fd_ctr_base; |
|---|
| 5452 | + u8 port; |
|---|
| 4664 | 5453 | |
|---|
| 5454 | + port = hw->port_info->lport; |
|---|
| 4665 | 5455 | prev_ps = &pf->stats_prev; |
|---|
| 4666 | 5456 | cur_ps = &pf->stats; |
|---|
| 4667 | | - pf_id = hw->pf_id; |
|---|
| 4668 | 5457 | |
|---|
| 4669 | | - ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), |
|---|
| 4670 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, |
|---|
| 5458 | + ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, |
|---|
| 5459 | + &prev_ps->eth.rx_bytes, |
|---|
| 4671 | 5460 | &cur_ps->eth.rx_bytes); |
|---|
| 4672 | 5461 | |
|---|
| 4673 | | - ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), |
|---|
| 4674 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, |
|---|
| 5462 | + ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, |
|---|
| 5463 | + &prev_ps->eth.rx_unicast, |
|---|
| 4675 | 5464 | &cur_ps->eth.rx_unicast); |
|---|
| 4676 | 5465 | |
|---|
| 4677 | | - ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), |
|---|
| 4678 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, |
|---|
| 5466 | + ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, |
|---|
| 5467 | + &prev_ps->eth.rx_multicast, |
|---|
| 4679 | 5468 | &cur_ps->eth.rx_multicast); |
|---|
| 4680 | 5469 | |
|---|
| 4681 | | - ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), |
|---|
| 4682 | | - pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, |
|---|
| 5470 | + ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, |
|---|
| 5471 | + &prev_ps->eth.rx_broadcast, |
|---|
| 4683 | 5472 | &cur_ps->eth.rx_broadcast); |
|---|
| 4684 | 5473 | |
|---|
| 4685 | | - ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), |
|---|
| 4686 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, |
|---|
| 5474 | + ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, |
|---|
| 5475 | + &prev_ps->eth.rx_discards, |
|---|
| 5476 | + &cur_ps->eth.rx_discards); |
|---|
| 5477 | + |
|---|
| 5478 | + ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, |
|---|
| 5479 | + &prev_ps->eth.tx_bytes, |
|---|
| 4687 | 5480 | &cur_ps->eth.tx_bytes); |
|---|
| 4688 | 5481 | |
|---|
| 4689 | | - ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), |
|---|
| 4690 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, |
|---|
| 5482 | + ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, |
|---|
| 5483 | + &prev_ps->eth.tx_unicast, |
|---|
| 4691 | 5484 | &cur_ps->eth.tx_unicast); |
|---|
| 4692 | 5485 | |
|---|
| 4693 | | - ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), |
|---|
| 4694 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, |
|---|
| 5486 | + ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, |
|---|
| 5487 | + &prev_ps->eth.tx_multicast, |
|---|
| 4695 | 5488 | &cur_ps->eth.tx_multicast); |
|---|
| 4696 | 5489 | |
|---|
| 4697 | | - ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), |
|---|
| 4698 | | - pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, |
|---|
| 5490 | + ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, |
|---|
| 5491 | + &prev_ps->eth.tx_broadcast, |
|---|
| 4699 | 5492 | &cur_ps->eth.tx_broadcast); |
|---|
| 4700 | 5493 | |
|---|
| 4701 | | - ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, |
|---|
| 5494 | + ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, |
|---|
| 4702 | 5495 | &prev_ps->tx_dropped_link_down, |
|---|
| 4703 | 5496 | &cur_ps->tx_dropped_link_down); |
|---|
| 4704 | 5497 | |
|---|
| 4705 | | - ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), |
|---|
| 4706 | | - pf->stat_prev_loaded, &prev_ps->rx_size_64, |
|---|
| 4707 | | - &cur_ps->rx_size_64); |
|---|
| 5498 | + ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, |
|---|
| 5499 | + &prev_ps->rx_size_64, &cur_ps->rx_size_64); |
|---|
| 4708 | 5500 | |
|---|
| 4709 | | - ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), |
|---|
| 4710 | | - pf->stat_prev_loaded, &prev_ps->rx_size_127, |
|---|
| 4711 | | - &cur_ps->rx_size_127); |
|---|
| 5501 | + ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, |
|---|
| 5502 | + &prev_ps->rx_size_127, &cur_ps->rx_size_127); |
|---|
| 4712 | 5503 | |
|---|
| 4713 | | - ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), |
|---|
| 4714 | | - pf->stat_prev_loaded, &prev_ps->rx_size_255, |
|---|
| 4715 | | - &cur_ps->rx_size_255); |
|---|
| 5504 | + ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, |
|---|
| 5505 | + &prev_ps->rx_size_255, &cur_ps->rx_size_255); |
|---|
| 4716 | 5506 | |
|---|
| 4717 | | - ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), |
|---|
| 4718 | | - pf->stat_prev_loaded, &prev_ps->rx_size_511, |
|---|
| 4719 | | - &cur_ps->rx_size_511); |
|---|
| 5507 | + ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, |
|---|
| 5508 | + &prev_ps->rx_size_511, &cur_ps->rx_size_511); |
|---|
| 4720 | 5509 | |
|---|
| 4721 | | - ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), |
|---|
| 4722 | | - GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, |
|---|
| 5510 | + ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, |
|---|
| 4723 | 5511 | &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); |
|---|
| 4724 | 5512 | |
|---|
| 4725 | | - ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), |
|---|
| 4726 | | - GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, |
|---|
| 5513 | + ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, |
|---|
| 4727 | 5514 | &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); |
|---|
| 4728 | 5515 | |
|---|
| 4729 | | - ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), |
|---|
| 4730 | | - GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, |
|---|
| 5516 | + ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, |
|---|
| 4731 | 5517 | &prev_ps->rx_size_big, &cur_ps->rx_size_big); |
|---|
| 4732 | 5518 | |
|---|
| 4733 | | - ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), |
|---|
| 4734 | | - pf->stat_prev_loaded, &prev_ps->tx_size_64, |
|---|
| 4735 | | - &cur_ps->tx_size_64); |
|---|
| 5519 | + ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, |
|---|
| 5520 | + &prev_ps->tx_size_64, &cur_ps->tx_size_64); |
|---|
| 4736 | 5521 | |
|---|
| 4737 | | - ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), |
|---|
| 4738 | | - pf->stat_prev_loaded, &prev_ps->tx_size_127, |
|---|
| 4739 | | - &cur_ps->tx_size_127); |
|---|
| 5522 | + ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, |
|---|
| 5523 | + &prev_ps->tx_size_127, &cur_ps->tx_size_127); |
|---|
| 4740 | 5524 | |
|---|
| 4741 | | - ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), |
|---|
| 4742 | | - pf->stat_prev_loaded, &prev_ps->tx_size_255, |
|---|
| 4743 | | - &cur_ps->tx_size_255); |
|---|
| 5525 | + ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, |
|---|
| 5526 | + &prev_ps->tx_size_255, &cur_ps->tx_size_255); |
|---|
| 4744 | 5527 | |
|---|
| 4745 | | - ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), |
|---|
| 4746 | | - pf->stat_prev_loaded, &prev_ps->tx_size_511, |
|---|
| 4747 | | - &cur_ps->tx_size_511); |
|---|
| 5528 | + ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, |
|---|
| 5529 | + &prev_ps->tx_size_511, &cur_ps->tx_size_511); |
|---|
| 4748 | 5530 | |
|---|
| 4749 | | - ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), |
|---|
| 4750 | | - GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, |
|---|
| 5531 | + ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, |
|---|
| 4751 | 5532 | &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); |
|---|
| 4752 | 5533 | |
|---|
| 4753 | | - ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), |
|---|
| 4754 | | - GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, |
|---|
| 5534 | + ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, |
|---|
| 4755 | 5535 | &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); |
|---|
| 4756 | 5536 | |
|---|
| 4757 | | - ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), |
|---|
| 4758 | | - GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, |
|---|
| 5537 | + ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, |
|---|
| 4759 | 5538 | &prev_ps->tx_size_big, &cur_ps->tx_size_big); |
|---|
| 4760 | 5539 | |
|---|
| 4761 | | - ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, |
|---|
| 5540 | + fd_ctr_base = hw->fd_ctr_base; |
|---|
| 5541 | + |
|---|
| 5542 | + ice_stat_update40(hw, |
|---|
| 5543 | + GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), |
|---|
| 5544 | + pf->stat_prev_loaded, &prev_ps->fd_sb_match, |
|---|
| 5545 | + &cur_ps->fd_sb_match); |
|---|
| 5546 | + ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, |
|---|
| 4762 | 5547 | &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); |
|---|
| 4763 | 5548 | |
|---|
| 4764 | | - ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, |
|---|
| 5549 | + ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, |
|---|
| 4765 | 5550 | &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); |
|---|
| 4766 | 5551 | |
|---|
| 4767 | | - ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, |
|---|
| 5552 | + ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, |
|---|
| 4768 | 5553 | &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); |
|---|
| 4769 | 5554 | |
|---|
| 4770 | | - ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, |
|---|
| 5555 | + ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, |
|---|
| 4771 | 5556 | &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); |
|---|
| 4772 | 5557 | |
|---|
| 4773 | | - ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, |
|---|
| 5558 | + ice_update_dcb_stats(pf); |
|---|
| 5559 | + |
|---|
| 5560 | + ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, |
|---|
| 4774 | 5561 | &prev_ps->crc_errors, &cur_ps->crc_errors); |
|---|
| 4775 | 5562 | |
|---|
| 4776 | | - ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, |
|---|
| 5563 | + ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, |
|---|
| 4777 | 5564 | &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); |
|---|
| 4778 | 5565 | |
|---|
| 4779 | | - ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, |
|---|
| 5566 | + ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, |
|---|
| 4780 | 5567 | &prev_ps->mac_local_faults, |
|---|
| 4781 | 5568 | &cur_ps->mac_local_faults); |
|---|
| 4782 | 5569 | |
|---|
| 4783 | | - ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, |
|---|
| 5570 | + ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, |
|---|
| 4784 | 5571 | &prev_ps->mac_remote_faults, |
|---|
| 4785 | 5572 | &cur_ps->mac_remote_faults); |
|---|
| 4786 | 5573 | |
|---|
| 4787 | | - ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, |
|---|
| 5574 | + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, |
|---|
| 4788 | 5575 | &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); |
|---|
| 4789 | 5576 | |
|---|
| 4790 | | - ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, |
|---|
| 5577 | + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, |
|---|
| 4791 | 5578 | &prev_ps->rx_undersize, &cur_ps->rx_undersize); |
|---|
| 4792 | 5579 | |
|---|
| 4793 | | - ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, |
|---|
| 5580 | + ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, |
|---|
| 4794 | 5581 | &prev_ps->rx_fragments, &cur_ps->rx_fragments); |
|---|
| 4795 | 5582 | |
|---|
| 4796 | | - ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, |
|---|
| 5583 | + ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, |
|---|
| 4797 | 5584 | &prev_ps->rx_oversize, &cur_ps->rx_oversize); |
|---|
| 4798 | 5585 | |
|---|
| 4799 | | - ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, |
|---|
| 5586 | + ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, |
|---|
| 4800 | 5587 | &prev_ps->rx_jabber, &cur_ps->rx_jabber); |
|---|
| 5588 | + |
|---|
| 5589 | + cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; |
|---|
| 4801 | 5590 | |
|---|
| 4802 | 5591 | pf->stat_prev_loaded = true; |
|---|
| 4803 | 5592 | } |
|---|
| .. | .. |
|---|
| 4816 | 5605 | |
|---|
| 4817 | 5606 | vsi_stats = &vsi->net_stats; |
|---|
| 4818 | 5607 | |
|---|
| 4819 | | - if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) |
|---|
| 5608 | + if (!vsi->num_txq || !vsi->num_rxq) |
|---|
| 4820 | 5609 | return; |
|---|
| 5610 | + |
|---|
| 4821 | 5611 | /* netdev packet/byte stats come from ring counter. These are obtained |
|---|
| 4822 | 5612 | * by summing up ring counters (done by ice_update_vsi_ring_stats). |
|---|
| 5613 | + * But, only call the update routine and read the registers if VSI is |
|---|
| 5614 | + * not down. |
|---|
| 4823 | 5615 | */ |
|---|
| 4824 | | - ice_update_vsi_ring_stats(vsi); |
|---|
| 5616 | + if (!test_bit(__ICE_DOWN, vsi->state)) |
|---|
| 5617 | + ice_update_vsi_ring_stats(vsi); |
|---|
| 4825 | 5618 | stats->tx_packets = vsi_stats->tx_packets; |
|---|
| 4826 | 5619 | stats->tx_bytes = vsi_stats->tx_bytes; |
|---|
| 4827 | 5620 | stats->rx_packets = vsi_stats->rx_packets; |
|---|
| .. | .. |
|---|
| 4851 | 5644 | if (!vsi->netdev) |
|---|
| 4852 | 5645 | return; |
|---|
| 4853 | 5646 | |
|---|
| 4854 | | - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { |
|---|
| 5647 | + ice_for_each_q_vector(vsi, q_idx) { |
|---|
| 4855 | 5648 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
|---|
| 4856 | 5649 | |
|---|
| 4857 | 5650 | if (q_vector->rx.ring || q_vector->tx.ring) |
|---|
| .. | .. |
|---|
| 4865 | 5658 | */ |
|---|
| 4866 | 5659 | int ice_down(struct ice_vsi *vsi) |
|---|
| 4867 | 5660 | { |
|---|
| 4868 | | - int i, err; |
|---|
| 5661 | + int i, tx_err, rx_err, link_err = 0; |
|---|
| 4869 | 5662 | |
|---|
| 4870 | 5663 | /* Caller of this function is expected to set the |
|---|
| 4871 | 5664 | * vsi->state __ICE_DOWN bit |
|---|
| .. | .. |
|---|
| 4876 | 5669 | } |
|---|
| 4877 | 5670 | |
|---|
| 4878 | 5671 | ice_vsi_dis_irq(vsi); |
|---|
| 4879 | | - err = ice_vsi_stop_tx_rx_rings(vsi); |
|---|
| 5672 | + |
|---|
| 5673 | + tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); |
|---|
| 5674 | + if (tx_err) |
|---|
| 5675 | + netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", |
|---|
| 5676 | + vsi->vsi_num, tx_err); |
|---|
| 5677 | + if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { |
|---|
| 5678 | + tx_err = ice_vsi_stop_xdp_tx_rings(vsi); |
|---|
| 5679 | + if (tx_err) |
|---|
| 5680 | + netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", |
|---|
| 5681 | + vsi->vsi_num, tx_err); |
|---|
| 5682 | + } |
|---|
| 5683 | + |
|---|
| 5684 | + rx_err = ice_vsi_stop_all_rx_rings(vsi); |
|---|
| 5685 | + if (rx_err) |
|---|
| 5686 | + netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", |
|---|
| 5687 | + vsi->vsi_num, rx_err); |
|---|
| 5688 | + |
|---|
| 4880 | 5689 | ice_napi_disable_all(vsi); |
|---|
| 5690 | + |
|---|
| 5691 | + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { |
|---|
| 5692 | + link_err = ice_force_phys_link_state(vsi, false); |
|---|
| 5693 | + if (link_err) |
|---|
| 5694 | + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", |
|---|
| 5695 | + vsi->vsi_num, link_err); |
|---|
| 5696 | + } |
|---|
| 4881 | 5697 | |
|---|
| 4882 | 5698 | ice_for_each_txq(vsi, i) |
|---|
| 4883 | 5699 | ice_clean_tx_ring(vsi->tx_rings[i]); |
|---|
| .. | .. |
|---|
| 4885 | 5701 | ice_for_each_rxq(vsi, i) |
|---|
| 4886 | 5702 | ice_clean_rx_ring(vsi->rx_rings[i]); |
|---|
| 4887 | 5703 | |
|---|
| 4888 | | - if (err) |
|---|
| 5704 | + if (tx_err || rx_err || link_err) { |
|---|
| 4889 | 5705 | netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", |
|---|
| 4890 | 5706 | vsi->vsi_num, vsi->vsw->sw_id); |
|---|
| 4891 | | - return err; |
|---|
| 5707 | + return -EIO; |
|---|
| 5708 | + } |
|---|
| 5709 | + |
|---|
| 5710 | + return 0; |
|---|
| 4892 | 5711 | } |
|---|
| 4893 | 5712 | |
|---|
| 4894 | 5713 | /** |
|---|
| .. | .. |
|---|
| 4897 | 5716 | * |
|---|
| 4898 | 5717 | * Return 0 on success, negative on failure |
|---|
| 4899 | 5718 | */ |
|---|
| 4900 | | -static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
|---|
| 5719 | +int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
|---|
| 4901 | 5720 | { |
|---|
| 4902 | 5721 | int i, err = 0; |
|---|
| 4903 | 5722 | |
|---|
| 4904 | 5723 | if (!vsi->num_txq) { |
|---|
| 4905 | | - dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", |
|---|
| 5724 | + dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", |
|---|
| 4906 | 5725 | vsi->vsi_num); |
|---|
| 4907 | 5726 | return -EINVAL; |
|---|
| 4908 | 5727 | } |
|---|
| 4909 | 5728 | |
|---|
| 4910 | 5729 | ice_for_each_txq(vsi, i) { |
|---|
| 4911 | | - err = ice_setup_tx_ring(vsi->tx_rings[i]); |
|---|
| 5730 | + struct ice_ring *ring = vsi->tx_rings[i]; |
|---|
| 5731 | + |
|---|
| 5732 | + if (!ring) |
|---|
| 5733 | + return -EINVAL; |
|---|
| 5734 | + |
|---|
| 5735 | + ring->netdev = vsi->netdev; |
|---|
| 5736 | + err = ice_setup_tx_ring(ring); |
|---|
| 4912 | 5737 | if (err) |
|---|
| 4913 | 5738 | break; |
|---|
| 4914 | 5739 | } |
|---|
| .. | .. |
|---|
| 4922 | 5747 | * |
|---|
| 4923 | 5748 | * Return 0 on success, negative on failure |
|---|
| 4924 | 5749 | */ |
|---|
| 4925 | | -static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
|---|
| 5750 | +int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
|---|
| 4926 | 5751 | { |
|---|
| 4927 | 5752 | int i, err = 0; |
|---|
| 4928 | 5753 | |
|---|
| 4929 | 5754 | if (!vsi->num_rxq) { |
|---|
| 4930 | | - dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", |
|---|
| 5755 | + dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", |
|---|
| 4931 | 5756 | vsi->vsi_num); |
|---|
| 4932 | 5757 | return -EINVAL; |
|---|
| 4933 | 5758 | } |
|---|
| 4934 | 5759 | |
|---|
| 4935 | 5760 | ice_for_each_rxq(vsi, i) { |
|---|
| 4936 | | - err = ice_setup_rx_ring(vsi->rx_rings[i]); |
|---|
| 5761 | + struct ice_ring *ring = vsi->rx_rings[i]; |
|---|
| 5762 | + |
|---|
| 5763 | + if (!ring) |
|---|
| 5764 | + return -EINVAL; |
|---|
| 5765 | + |
|---|
| 5766 | + ring->netdev = vsi->netdev; |
|---|
| 5767 | + err = ice_setup_rx_ring(ring); |
|---|
| 4937 | 5768 | if (err) |
|---|
| 4938 | 5769 | break; |
|---|
| 4939 | 5770 | } |
|---|
| .. | .. |
|---|
| 4942 | 5773 | } |
|---|
| 4943 | 5774 | |
|---|
| 4944 | 5775 | /** |
|---|
| 4945 | | - * ice_vsi_req_irq - Request IRQ from the OS |
|---|
| 4946 | | - * @vsi: The VSI IRQ is being requested for |
|---|
| 4947 | | - * @basename: name for the vector |
|---|
| 5776 | + * ice_vsi_open_ctrl - open control VSI for use |
|---|
| 5777 | + * @vsi: the VSI to open |
|---|
| 4948 | 5778 | * |
|---|
| 4949 | | - * Return 0 on success and a negative value on error |
|---|
| 5779 | + * Initialization of the Control VSI |
|---|
| 5780 | + * |
|---|
| 5781 | + * Returns 0 on success, negative value on error |
|---|
| 4950 | 5782 | */ |
|---|
| 4951 | | -static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) |
|---|
| 5783 | +int ice_vsi_open_ctrl(struct ice_vsi *vsi) |
|---|
| 4952 | 5784 | { |
|---|
| 5785 | + char int_name[ICE_INT_NAME_STR_LEN]; |
|---|
| 4953 | 5786 | struct ice_pf *pf = vsi->back; |
|---|
| 4954 | | - int err = -EINVAL; |
|---|
| 5787 | + struct device *dev; |
|---|
| 5788 | + int err; |
|---|
| 4955 | 5789 | |
|---|
| 4956 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
|---|
| 4957 | | - err = ice_vsi_req_irq_msix(vsi, basename); |
|---|
| 5790 | + dev = ice_pf_to_dev(pf); |
|---|
| 5791 | + /* allocate descriptors */ |
|---|
| 5792 | + err = ice_vsi_setup_tx_rings(vsi); |
|---|
| 5793 | + if (err) |
|---|
| 5794 | + goto err_setup_tx; |
|---|
| 5795 | + |
|---|
| 5796 | + err = ice_vsi_setup_rx_rings(vsi); |
|---|
| 5797 | + if (err) |
|---|
| 5798 | + goto err_setup_rx; |
|---|
| 5799 | + |
|---|
| 5800 | + err = ice_vsi_cfg(vsi); |
|---|
| 5801 | + if (err) |
|---|
| 5802 | + goto err_setup_rx; |
|---|
| 5803 | + |
|---|
| 5804 | + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", |
|---|
| 5805 | + dev_driver_string(dev), dev_name(dev)); |
|---|
| 5806 | + err = ice_vsi_req_irq_msix(vsi, int_name); |
|---|
| 5807 | + if (err) |
|---|
| 5808 | + goto err_setup_rx; |
|---|
| 5809 | + |
|---|
| 5810 | + ice_vsi_cfg_msix(vsi); |
|---|
| 5811 | + |
|---|
| 5812 | + err = ice_vsi_start_all_rx_rings(vsi); |
|---|
| 5813 | + if (err) |
|---|
| 5814 | + goto err_up_complete; |
|---|
| 5815 | + |
|---|
| 5816 | + clear_bit(__ICE_DOWN, vsi->state); |
|---|
| 5817 | + ice_vsi_ena_irq(vsi); |
|---|
| 5818 | + |
|---|
| 5819 | + return 0; |
|---|
| 5820 | + |
|---|
| 5821 | +err_up_complete: |
|---|
| 5822 | + ice_down(vsi); |
|---|
| 5823 | +err_setup_rx: |
|---|
| 5824 | + ice_vsi_free_rx_rings(vsi); |
|---|
| 5825 | +err_setup_tx: |
|---|
| 5826 | + ice_vsi_free_tx_rings(vsi); |
|---|
| 4958 | 5827 | |
|---|
| 4959 | 5828 | return err; |
|---|
| 4960 | | -} |
|---|
| 4961 | | - |
|---|
| 4962 | | -/** |
|---|
| 4963 | | - * ice_vsi_free_tx_rings - Free Tx resources for VSI queues |
|---|
| 4964 | | - * @vsi: the VSI having resources freed |
|---|
| 4965 | | - */ |
|---|
| 4966 | | -static void ice_vsi_free_tx_rings(struct ice_vsi *vsi) |
|---|
| 4967 | | -{ |
|---|
| 4968 | | - int i; |
|---|
| 4969 | | - |
|---|
| 4970 | | - if (!vsi->tx_rings) |
|---|
| 4971 | | - return; |
|---|
| 4972 | | - |
|---|
| 4973 | | - ice_for_each_txq(vsi, i) |
|---|
| 4974 | | - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) |
|---|
| 4975 | | - ice_free_tx_ring(vsi->tx_rings[i]); |
|---|
| 4976 | | -} |
|---|
| 4977 | | - |
|---|
| 4978 | | -/** |
|---|
| 4979 | | - * ice_vsi_free_rx_rings - Free Rx resources for VSI queues |
|---|
| 4980 | | - * @vsi: the VSI having resources freed |
|---|
| 4981 | | - */ |
|---|
| 4982 | | -static void ice_vsi_free_rx_rings(struct ice_vsi *vsi) |
|---|
| 4983 | | -{ |
|---|
| 4984 | | - int i; |
|---|
| 4985 | | - |
|---|
| 4986 | | - if (!vsi->rx_rings) |
|---|
| 4987 | | - return; |
|---|
| 4988 | | - |
|---|
| 4989 | | - ice_for_each_rxq(vsi, i) |
|---|
| 4990 | | - if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) |
|---|
| 4991 | | - ice_free_rx_ring(vsi->rx_rings[i]); |
|---|
| 4992 | 5829 | } |
|---|
| 4993 | 5830 | |
|---|
| 4994 | 5831 | /** |
|---|
| .. | .. |
|---|
| 5019 | 5856 | goto err_setup_rx; |
|---|
| 5020 | 5857 | |
|---|
| 5021 | 5858 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", |
|---|
| 5022 | | - dev_driver_string(&pf->pdev->dev), vsi->netdev->name); |
|---|
| 5023 | | - err = ice_vsi_req_irq(vsi, int_name); |
|---|
| 5859 | + dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); |
|---|
| 5860 | + err = ice_vsi_req_irq_msix(vsi, int_name); |
|---|
| 5024 | 5861 | if (err) |
|---|
| 5025 | 5862 | goto err_setup_rx; |
|---|
| 5026 | 5863 | |
|---|
| .. | .. |
|---|
| 5052 | 5889 | } |
|---|
| 5053 | 5890 | |
|---|
| 5054 | 5891 | /** |
|---|
| 5055 | | - * ice_vsi_close - Shut down a VSI |
|---|
| 5056 | | - * @vsi: the VSI being shut down |
|---|
| 5892 | + * ice_vsi_release_all - Delete all VSIs |
|---|
| 5893 | + * @pf: PF from which all VSIs are being removed |
|---|
| 5057 | 5894 | */ |
|---|
| 5058 | | -static void ice_vsi_close(struct ice_vsi *vsi) |
|---|
| 5895 | +static void ice_vsi_release_all(struct ice_pf *pf) |
|---|
| 5059 | 5896 | { |
|---|
| 5060 | | - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) |
|---|
| 5061 | | - ice_down(vsi); |
|---|
| 5897 | + int err, i; |
|---|
| 5062 | 5898 | |
|---|
| 5063 | | - ice_vsi_free_irq(vsi); |
|---|
| 5064 | | - ice_vsi_free_tx_rings(vsi); |
|---|
| 5065 | | - ice_vsi_free_rx_rings(vsi); |
|---|
| 5066 | | -} |
|---|
| 5899 | + if (!pf->vsi) |
|---|
| 5900 | + return; |
|---|
| 5067 | 5901 | |
|---|
| 5068 | | -/** |
|---|
| 5069 | | - * ice_rss_clean - Delete RSS related VSI structures that hold user inputs |
|---|
| 5070 | | - * @vsi: the VSI being removed |
|---|
| 5071 | | - */ |
|---|
| 5072 | | -static void ice_rss_clean(struct ice_vsi *vsi) |
|---|
| 5073 | | -{ |
|---|
| 5074 | | - struct ice_pf *pf; |
|---|
| 5902 | + ice_for_each_vsi(pf, i) { |
|---|
| 5903 | + if (!pf->vsi[i]) |
|---|
| 5904 | + continue; |
|---|
| 5075 | 5905 | |
|---|
| 5076 | | - pf = vsi->back; |
|---|
| 5077 | | - |
|---|
| 5078 | | - if (vsi->rss_hkey_user) |
|---|
| 5079 | | - devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); |
|---|
| 5080 | | - if (vsi->rss_lut_user) |
|---|
| 5081 | | - devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); |
|---|
| 5082 | | -} |
|---|
| 5083 | | - |
|---|
| 5084 | | -/** |
|---|
| 5085 | | - * ice_vsi_release - Delete a VSI and free its resources |
|---|
| 5086 | | - * @vsi: the VSI being removed |
|---|
| 5087 | | - * |
|---|
| 5088 | | - * Returns 0 on success or < 0 on error |
|---|
| 5089 | | - */ |
|---|
| 5090 | | -static int ice_vsi_release(struct ice_vsi *vsi) |
|---|
| 5091 | | -{ |
|---|
| 5092 | | - struct ice_pf *pf; |
|---|
| 5093 | | - |
|---|
| 5094 | | - if (!vsi->back) |
|---|
| 5095 | | - return -ENODEV; |
|---|
| 5096 | | - pf = vsi->back; |
|---|
| 5097 | | - |
|---|
| 5098 | | - if (vsi->netdev) { |
|---|
| 5099 | | - unregister_netdev(vsi->netdev); |
|---|
| 5100 | | - free_netdev(vsi->netdev); |
|---|
| 5101 | | - vsi->netdev = NULL; |
|---|
| 5906 | + err = ice_vsi_release(pf->vsi[i]); |
|---|
| 5907 | + if (err) |
|---|
| 5908 | + dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", |
|---|
| 5909 | + i, err, pf->vsi[i]->vsi_num); |
|---|
| 5102 | 5910 | } |
|---|
| 5911 | +} |
|---|
| 5103 | 5912 | |
|---|
| 5104 | | - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) |
|---|
| 5105 | | - ice_rss_clean(vsi); |
|---|
| 5913 | +/** |
|---|
| 5914 | + * ice_vsi_rebuild_by_type - Rebuild VSI of a given type |
|---|
| 5915 | + * @pf: pointer to the PF instance |
|---|
| 5916 | + * @type: VSI type to rebuild |
|---|
| 5917 | + * |
|---|
| 5918 | + * Iterates through the pf->vsi array and rebuilds VSIs of the requested type |
|---|
| 5919 | + */ |
|---|
| 5920 | +static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) |
|---|
| 5921 | +{ |
|---|
| 5922 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 5923 | + enum ice_status status; |
|---|
| 5924 | + int i, err; |
|---|
| 5106 | 5925 | |
|---|
| 5107 | | - /* Disable VSI and free resources */ |
|---|
| 5108 | | - ice_vsi_dis_irq(vsi); |
|---|
| 5109 | | - ice_vsi_close(vsi); |
|---|
| 5926 | + ice_for_each_vsi(pf, i) { |
|---|
| 5927 | + struct ice_vsi *vsi = pf->vsi[i]; |
|---|
| 5110 | 5928 | |
|---|
| 5111 | | - /* reclaim interrupt vectors back to PF */ |
|---|
| 5112 | | - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); |
|---|
| 5113 | | - pf->num_avail_msix += vsi->num_q_vectors; |
|---|
| 5929 | + if (!vsi || vsi->type != type) |
|---|
| 5930 | + continue; |
|---|
| 5114 | 5931 | |
|---|
| 5115 | | - ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num); |
|---|
| 5116 | | - ice_vsi_delete(vsi); |
|---|
| 5117 | | - ice_vsi_free_q_vectors(vsi); |
|---|
| 5118 | | - ice_vsi_clear_rings(vsi); |
|---|
| 5932 | + /* rebuild the VSI */ |
|---|
| 5933 | + err = ice_vsi_rebuild(vsi, true); |
|---|
| 5934 | + if (err) { |
|---|
| 5935 | + dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", |
|---|
| 5936 | + err, vsi->idx, ice_vsi_type_str(type)); |
|---|
| 5937 | + return err; |
|---|
| 5938 | + } |
|---|
| 5119 | 5939 | |
|---|
| 5120 | | - ice_vsi_put_qs(vsi); |
|---|
| 5121 | | - pf->q_left_tx += vsi->alloc_txq; |
|---|
| 5122 | | - pf->q_left_rx += vsi->alloc_rxq; |
|---|
| 5940 | + /* replay filters for the VSI */ |
|---|
| 5941 | + status = ice_replay_vsi(&pf->hw, vsi->idx); |
|---|
| 5942 | + if (status) { |
|---|
| 5943 | + dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", |
|---|
| 5944 | + ice_stat_str(status), vsi->idx, |
|---|
| 5945 | + ice_vsi_type_str(type)); |
|---|
| 5946 | + return -EIO; |
|---|
| 5947 | + } |
|---|
| 5123 | 5948 | |
|---|
| 5124 | | - ice_vsi_clear(vsi); |
|---|
| 5949 | + /* Re-map HW VSI number, using VSI handle that has been |
|---|
| 5950 | + * previously validated in ice_replay_vsi() call above |
|---|
| 5951 | + */ |
|---|
| 5952 | + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); |
|---|
| 5953 | + |
|---|
| 5954 | + /* enable the VSI */ |
|---|
| 5955 | + err = ice_ena_vsi(vsi, false); |
|---|
| 5956 | + if (err) { |
|---|
| 5957 | + dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", |
|---|
| 5958 | + err, vsi->idx, ice_vsi_type_str(type)); |
|---|
| 5959 | + return err; |
|---|
| 5960 | + } |
|---|
| 5961 | + |
|---|
| 5962 | + dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, |
|---|
| 5963 | + ice_vsi_type_str(type)); |
|---|
| 5964 | + } |
|---|
| 5125 | 5965 | |
|---|
| 5126 | 5966 | return 0; |
|---|
| 5127 | 5967 | } |
|---|
| 5128 | 5968 | |
|---|
| 5129 | 5969 | /** |
|---|
| 5130 | | - * ice_dis_vsi - pause a VSI |
|---|
| 5131 | | - * @vsi: the VSI being paused |
|---|
| 5970 | + * ice_update_pf_netdev_link - Update PF netdev link status |
|---|
| 5971 | + * @pf: pointer to the PF instance |
|---|
| 5132 | 5972 | */ |
|---|
| 5133 | | -static void ice_dis_vsi(struct ice_vsi *vsi) |
|---|
| 5973 | +static void ice_update_pf_netdev_link(struct ice_pf *pf) |
|---|
| 5134 | 5974 | { |
|---|
| 5135 | | - if (test_bit(__ICE_DOWN, vsi->state)) |
|---|
| 5136 | | - return; |
|---|
| 5975 | + bool link_up; |
|---|
| 5976 | + int i; |
|---|
| 5137 | 5977 | |
|---|
| 5138 | | - set_bit(__ICE_NEEDS_RESTART, vsi->state); |
|---|
| 5978 | + ice_for_each_vsi(pf, i) { |
|---|
| 5979 | + struct ice_vsi *vsi = pf->vsi[i]; |
|---|
| 5139 | 5980 | |
|---|
| 5140 | | - if (vsi->netdev && netif_running(vsi->netdev) && |
|---|
| 5141 | | - vsi->type == ICE_VSI_PF) |
|---|
| 5142 | | - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); |
|---|
| 5981 | + if (!vsi || vsi->type != ICE_VSI_PF) |
|---|
| 5982 | + return; |
|---|
| 5143 | 5983 | |
|---|
| 5144 | | - ice_vsi_close(vsi); |
|---|
| 5145 | | -} |
|---|
| 5146 | | - |
|---|
| 5147 | | -/** |
|---|
| 5148 | | - * ice_ena_vsi - resume a VSI |
|---|
| 5149 | | - * @vsi: the VSI being resume |
|---|
| 5150 | | - */ |
|---|
| 5151 | | -static void ice_ena_vsi(struct ice_vsi *vsi) |
|---|
| 5152 | | -{ |
|---|
| 5153 | | - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) |
|---|
| 5154 | | - return; |
|---|
| 5155 | | - |
|---|
| 5156 | | - if (vsi->netdev && netif_running(vsi->netdev)) |
|---|
| 5157 | | - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); |
|---|
| 5158 | | - else if (ice_vsi_open(vsi)) |
|---|
| 5159 | | - /* this clears the DOWN bit */ |
|---|
| 5160 | | - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", |
|---|
| 5161 | | - vsi->vsi_num, vsi->vsw->sw_id); |
|---|
| 5162 | | -} |
|---|
| 5163 | | - |
|---|
| 5164 | | -/** |
|---|
| 5165 | | - * ice_pf_dis_all_vsi - Pause all VSIs on a PF |
|---|
| 5166 | | - * @pf: the PF |
|---|
| 5167 | | - */ |
|---|
| 5168 | | -static void ice_pf_dis_all_vsi(struct ice_pf *pf) |
|---|
| 5169 | | -{ |
|---|
| 5170 | | - int v; |
|---|
| 5171 | | - |
|---|
| 5172 | | - ice_for_each_vsi(pf, v) |
|---|
| 5173 | | - if (pf->vsi[v]) |
|---|
| 5174 | | - ice_dis_vsi(pf->vsi[v]); |
|---|
| 5175 | | -} |
|---|
| 5176 | | - |
|---|
| 5177 | | -/** |
|---|
| 5178 | | - * ice_pf_ena_all_vsi - Resume all VSIs on a PF |
|---|
| 5179 | | - * @pf: the PF |
|---|
| 5180 | | - */ |
|---|
| 5181 | | -static void ice_pf_ena_all_vsi(struct ice_pf *pf) |
|---|
| 5182 | | -{ |
|---|
| 5183 | | - int v; |
|---|
| 5184 | | - |
|---|
| 5185 | | - ice_for_each_vsi(pf, v) |
|---|
| 5186 | | - if (pf->vsi[v]) |
|---|
| 5187 | | - ice_ena_vsi(pf->vsi[v]); |
|---|
| 5984 | + ice_get_link_status(pf->vsi[i]->port_info, &link_up); |
|---|
| 5985 | + if (link_up) { |
|---|
| 5986 | + netif_carrier_on(pf->vsi[i]->netdev); |
|---|
| 5987 | + netif_tx_wake_all_queues(pf->vsi[i]->netdev); |
|---|
| 5988 | + } else { |
|---|
| 5989 | + netif_carrier_off(pf->vsi[i]->netdev); |
|---|
| 5990 | + netif_tx_stop_all_queues(pf->vsi[i]->netdev); |
|---|
| 5991 | + } |
|---|
| 5992 | + } |
|---|
| 5188 | 5993 | } |
|---|
| 5189 | 5994 | |
|---|
| 5190 | 5995 | /** |
|---|
| 5191 | 5996 | * ice_rebuild - rebuild after reset |
|---|
| 5192 | | - * @pf: pf to rebuild |
|---|
| 5997 | + * @pf: PF to rebuild |
|---|
| 5998 | + * @reset_type: type of reset |
|---|
| 5999 | + * |
|---|
| 6000 | + * Do not rebuild VF VSI in this flow because that is already handled via |
|---|
| 6001 | + * ice_reset_all_vfs(). This is because requirements for resetting a VF after a |
|---|
| 6002 | + * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want |
|---|
| 6003 | + * to reset/rebuild all the VF VSI twice. |
|---|
| 5193 | 6004 | */ |
|---|
| 5194 | | -static void ice_rebuild(struct ice_pf *pf) |
|---|
| 6005 | +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) |
|---|
| 5195 | 6006 | { |
|---|
| 5196 | | - struct device *dev = &pf->pdev->dev; |
|---|
| 6007 | + struct device *dev = ice_pf_to_dev(pf); |
|---|
| 5197 | 6008 | struct ice_hw *hw = &pf->hw; |
|---|
| 5198 | 6009 | enum ice_status ret; |
|---|
| 5199 | 6010 | int err; |
|---|
| .. | .. |
|---|
| 5201 | 6012 | if (test_bit(__ICE_DOWN, pf->state)) |
|---|
| 5202 | 6013 | goto clear_recovery; |
|---|
| 5203 | 6014 | |
|---|
| 5204 | | - dev_dbg(dev, "rebuilding pf\n"); |
|---|
| 6015 | + dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); |
|---|
| 5205 | 6016 | |
|---|
| 5206 | 6017 | ret = ice_init_all_ctrlq(hw); |
|---|
| 5207 | 6018 | if (ret) { |
|---|
| 5208 | | - dev_err(dev, "control queues init failed %d\n", ret); |
|---|
| 5209 | | - goto fail_reset; |
|---|
| 6019 | + dev_err(dev, "control queues init failed %s\n", |
|---|
| 6020 | + ice_stat_str(ret)); |
|---|
| 6021 | + goto err_init_ctrlq; |
|---|
| 6022 | + } |
|---|
| 6023 | + |
|---|
| 6024 | + /* if DDP was previously loaded successfully */ |
|---|
| 6025 | + if (!ice_is_safe_mode(pf)) { |
|---|
| 6026 | + /* reload the SW DB of filter tables */ |
|---|
| 6027 | + if (reset_type == ICE_RESET_PFR) |
|---|
| 6028 | + ice_fill_blk_tbls(hw); |
|---|
| 6029 | + else |
|---|
| 6030 | + /* Reload DDP Package after CORER/GLOBR reset */ |
|---|
| 6031 | + ice_load_pkg(NULL, pf); |
|---|
| 5210 | 6032 | } |
|---|
| 5211 | 6033 | |
|---|
| 5212 | 6034 | ret = ice_clear_pf_cfg(hw); |
|---|
| 5213 | 6035 | if (ret) { |
|---|
| 5214 | | - dev_err(dev, "clear PF configuration failed %d\n", ret); |
|---|
| 5215 | | - goto fail_reset; |
|---|
| 6036 | + dev_err(dev, "clear PF configuration failed %s\n", |
|---|
| 6037 | + ice_stat_str(ret)); |
|---|
| 6038 | + goto err_init_ctrlq; |
|---|
| 5216 | 6039 | } |
|---|
| 6040 | + |
|---|
| 6041 | + if (pf->first_sw->dflt_vsi_ena) |
|---|
| 6042 | + dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); |
|---|
| 6043 | + /* clear the default VSI configuration if it exists */ |
|---|
| 6044 | + pf->first_sw->dflt_vsi = NULL; |
|---|
| 6045 | + pf->first_sw->dflt_vsi_ena = false; |
|---|
| 5217 | 6046 | |
|---|
| 5218 | 6047 | ice_clear_pxe_mode(hw); |
|---|
| 5219 | 6048 | |
|---|
| 5220 | 6049 | ret = ice_get_caps(hw); |
|---|
| 5221 | 6050 | if (ret) { |
|---|
| 5222 | | - dev_err(dev, "ice_get_caps failed %d\n", ret); |
|---|
| 5223 | | - goto fail_reset; |
|---|
| 6051 | + dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); |
|---|
| 6052 | + goto err_init_ctrlq; |
|---|
| 5224 | 6053 | } |
|---|
| 5225 | 6054 | |
|---|
| 5226 | | - /* basic nic switch setup */ |
|---|
| 5227 | | - err = ice_setup_pf_sw(pf); |
|---|
| 5228 | | - if (err) { |
|---|
| 5229 | | - dev_err(dev, "ice_setup_pf_sw failed\n"); |
|---|
| 5230 | | - goto fail_reset; |
|---|
| 6055 | + ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); |
|---|
| 6056 | + if (ret) { |
|---|
| 6057 | + dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); |
|---|
| 6058 | + goto err_init_ctrlq; |
|---|
| 5231 | 6059 | } |
|---|
| 6060 | + |
|---|
| 6061 | + err = ice_sched_init_port(hw->port_info); |
|---|
| 6062 | + if (err) |
|---|
| 6063 | + goto err_sched_init_port; |
|---|
| 5232 | 6064 | |
|---|
| 5233 | 6065 | /* start misc vector */ |
|---|
| 5234 | | - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { |
|---|
| 5235 | | - err = ice_req_irq_msix_misc(pf); |
|---|
| 5236 | | - if (err) { |
|---|
| 5237 | | - dev_err(dev, "misc vector setup failed: %d\n", err); |
|---|
| 5238 | | - goto fail_reset; |
|---|
| 6066 | + err = ice_req_irq_msix_misc(pf); |
|---|
| 6067 | + if (err) { |
|---|
| 6068 | + dev_err(dev, "misc vector setup failed: %d\n", err); |
|---|
| 6069 | + goto err_sched_init_port; |
|---|
| 6070 | + } |
|---|
| 6071 | + |
|---|
| 6072 | + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
|---|
| 6073 | + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); |
|---|
| 6074 | + if (!rd32(hw, PFQF_FD_SIZE)) { |
|---|
| 6075 | + u16 unused, guar, b_effort; |
|---|
| 6076 | + |
|---|
| 6077 | + guar = hw->func_caps.fd_fltr_guar; |
|---|
| 6078 | + b_effort = hw->func_caps.fd_fltr_best_effort; |
|---|
| 6079 | + |
|---|
| 6080 | + /* force guaranteed filter pool for PF */ |
|---|
| 6081 | + ice_alloc_fd_guar_item(hw, &unused, guar); |
|---|
| 6082 | + /* force shared filter pool for PF */ |
|---|
| 6083 | + ice_alloc_fd_shrd_item(hw, &unused, b_effort); |
|---|
| 5239 | 6084 | } |
|---|
| 5240 | 6085 | } |
|---|
| 5241 | 6086 | |
|---|
| 5242 | | - /* restart the VSIs that were rebuilt and running before the reset */ |
|---|
| 5243 | | - ice_pf_ena_all_vsi(pf); |
|---|
| 6087 | + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
|---|
| 6088 | + ice_dcb_rebuild(pf); |
|---|
| 5244 | 6089 | |
|---|
| 6090 | + /* rebuild PF VSI */ |
|---|
| 6091 | + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); |
|---|
| 6092 | + if (err) { |
|---|
| 6093 | + dev_err(dev, "PF VSI rebuild failed: %d\n", err); |
|---|
| 6094 | + goto err_vsi_rebuild; |
|---|
| 6095 | + } |
|---|
| 6096 | + |
|---|
| 6097 | + /* If Flow Director is active */ |
|---|
| 6098 | + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
|---|
| 6099 | + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); |
|---|
| 6100 | + if (err) { |
|---|
| 6101 | + dev_err(dev, "control VSI rebuild failed: %d\n", err); |
|---|
| 6102 | + goto err_vsi_rebuild; |
|---|
| 6103 | + } |
|---|
| 6104 | + |
|---|
| 6105 | + /* replay HW Flow Director recipes */ |
|---|
| 6106 | + if (hw->fdir_prof) |
|---|
| 6107 | + ice_fdir_replay_flows(hw); |
|---|
| 6108 | + |
|---|
| 6109 | + /* replay Flow Director filters */ |
|---|
| 6110 | + ice_fdir_replay_fltrs(pf); |
|---|
| 6111 | + |
|---|
| 6112 | + ice_rebuild_arfs(pf); |
|---|
| 6113 | + } |
|---|
| 6114 | + |
|---|
| 6115 | + ice_update_pf_netdev_link(pf); |
|---|
| 6116 | + |
|---|
| 6117 | + /* tell the firmware we are up */ |
|---|
| 6118 | + ret = ice_send_version(pf); |
|---|
| 6119 | + if (ret) { |
|---|
| 6120 | + dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", |
|---|
| 6121 | + ice_stat_str(ret)); |
|---|
| 6122 | + goto err_vsi_rebuild; |
|---|
| 6123 | + } |
|---|
| 6124 | + |
|---|
| 6125 | + ice_replay_post(hw); |
|---|
| 6126 | + |
|---|
| 6127 | + /* if we get here, reset flow is successful */ |
|---|
| 6128 | + clear_bit(__ICE_RESET_FAILED, pf->state); |
|---|
| 5245 | 6129 | return; |
|---|
| 5246 | 6130 | |
|---|
| 5247 | | -fail_reset: |
|---|
| 6131 | +err_vsi_rebuild: |
|---|
| 6132 | +err_sched_init_port: |
|---|
| 6133 | + ice_sched_cleanup_all(hw); |
|---|
| 6134 | +err_init_ctrlq: |
|---|
| 5248 | 6135 | ice_shutdown_all_ctrlq(hw); |
|---|
| 5249 | 6136 | set_bit(__ICE_RESET_FAILED, pf->state); |
|---|
| 5250 | 6137 | clear_recovery: |
|---|
| 5251 | | - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); |
|---|
| 6138 | + /* set this bit in PF state to control service task scheduling */ |
|---|
| 6139 | + set_bit(__ICE_NEEDS_RESTART, pf->state); |
|---|
| 6140 | + dev_err(dev, "Rebuild failed, unload and reload driver\n"); |
|---|
| 6141 | +} |
|---|
| 6142 | + |
|---|
| 6143 | +/** |
|---|
| 6144 | + * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP |
|---|
| 6145 | + * @vsi: Pointer to VSI structure |
|---|
| 6146 | + */ |
|---|
| 6147 | +static int ice_max_xdp_frame_size(struct ice_vsi *vsi) |
|---|
| 6148 | +{ |
|---|
| 6149 | + if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) |
|---|
| 6150 | + return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; |
|---|
| 6151 | + else |
|---|
| 6152 | + return ICE_RXBUF_3072; |
|---|
| 5252 | 6153 | } |
|---|
| 5253 | 6154 | |
|---|
| 5254 | 6155 | /** |
|---|
| .. | .. |
|---|
| 5265 | 6166 | struct ice_pf *pf = vsi->back; |
|---|
| 5266 | 6167 | u8 count = 0; |
|---|
| 5267 | 6168 | |
|---|
| 5268 | | - if (new_mtu == netdev->mtu) { |
|---|
| 5269 | | - netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); |
|---|
| 6169 | + if (new_mtu == (int)netdev->mtu) { |
|---|
| 6170 | + netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); |
|---|
| 5270 | 6171 | return 0; |
|---|
| 5271 | 6172 | } |
|---|
| 5272 | 6173 | |
|---|
| 5273 | | - if (new_mtu < netdev->min_mtu) { |
|---|
| 5274 | | - netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", |
|---|
| 6174 | + if (ice_is_xdp_ena_vsi(vsi)) { |
|---|
| 6175 | + int frame_size = ice_max_xdp_frame_size(vsi); |
|---|
| 6176 | + |
|---|
| 6177 | + if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { |
|---|
| 6178 | + netdev_err(netdev, "max MTU for XDP usage is %d\n", |
|---|
| 6179 | + frame_size - ICE_ETH_PKT_HDR_PAD); |
|---|
| 6180 | + return -EINVAL; |
|---|
| 6181 | + } |
|---|
| 6182 | + } |
|---|
| 6183 | + |
|---|
| 6184 | + if (new_mtu < (int)netdev->min_mtu) { |
|---|
| 6185 | + netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", |
|---|
| 5275 | 6186 | netdev->min_mtu); |
|---|
| 5276 | 6187 | return -EINVAL; |
|---|
| 5277 | | - } else if (new_mtu > netdev->max_mtu) { |
|---|
| 5278 | | - netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", |
|---|
| 6188 | + } else if (new_mtu > (int)netdev->max_mtu) { |
|---|
| 6189 | + netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", |
|---|
| 5279 | 6190 | netdev->min_mtu); |
|---|
| 5280 | 6191 | return -EINVAL; |
|---|
| 5281 | 6192 | } |
|---|
| 5282 | 6193 | /* if a reset is in progress, wait for some time for it to complete */ |
|---|
| 5283 | 6194 | do { |
|---|
| 5284 | | - if (ice_is_reset_recovery_pending(pf->state)) { |
|---|
| 6195 | + if (ice_is_reset_in_progress(pf->state)) { |
|---|
| 5285 | 6196 | count++; |
|---|
| 5286 | 6197 | usleep_range(1000, 2000); |
|---|
| 5287 | 6198 | } else { |
|---|
| .. | .. |
|---|
| 5291 | 6202 | } while (count < 100); |
|---|
| 5292 | 6203 | |
|---|
| 5293 | 6204 | if (count == 100) { |
|---|
| 5294 | | - netdev_err(netdev, "can't change mtu. Device is busy\n"); |
|---|
| 6205 | + netdev_err(netdev, "can't change MTU. Device is busy\n"); |
|---|
| 5295 | 6206 | return -EBUSY; |
|---|
| 5296 | 6207 | } |
|---|
| 5297 | 6208 | |
|---|
| 5298 | | - netdev->mtu = new_mtu; |
|---|
| 6209 | + netdev->mtu = (unsigned int)new_mtu; |
|---|
| 5299 | 6210 | |
|---|
| 5300 | 6211 | /* if VSI is up, bring it down and then back up */ |
|---|
| 5301 | 6212 | if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { |
|---|
| .. | .. |
|---|
| 5303 | 6214 | |
|---|
| 5304 | 6215 | err = ice_down(vsi); |
|---|
| 5305 | 6216 | if (err) { |
|---|
| 5306 | | - netdev_err(netdev, "change mtu if_up err %d\n", err); |
|---|
| 6217 | + netdev_err(netdev, "change MTU if_up err %d\n", err); |
|---|
| 5307 | 6218 | return err; |
|---|
| 5308 | 6219 | } |
|---|
| 5309 | 6220 | |
|---|
| 5310 | 6221 | err = ice_up(vsi); |
|---|
| 5311 | 6222 | if (err) { |
|---|
| 5312 | | - netdev_err(netdev, "change mtu if_up err %d\n", err); |
|---|
| 6223 | + netdev_err(netdev, "change MTU if_up err %d\n", err); |
|---|
| 5313 | 6224 | return err; |
|---|
| 5314 | 6225 | } |
|---|
| 5315 | 6226 | } |
|---|
| 5316 | 6227 | |
|---|
| 5317 | | - netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); |
|---|
| 6228 | + netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); |
|---|
| 5318 | 6229 | return 0; |
|---|
| 6230 | +} |
|---|
| 6231 | + |
|---|
| 6232 | +/** |
|---|
| 6233 | + * ice_aq_str - convert AQ err code to a string |
|---|
| 6234 | + * @aq_err: the AQ error code to convert |
|---|
| 6235 | + */ |
|---|
| 6236 | +const char *ice_aq_str(enum ice_aq_err aq_err) |
|---|
| 6237 | +{ |
|---|
| 6238 | + switch (aq_err) { |
|---|
| 6239 | + case ICE_AQ_RC_OK: |
|---|
| 6240 | + return "OK"; |
|---|
| 6241 | + case ICE_AQ_RC_EPERM: |
|---|
| 6242 | + return "ICE_AQ_RC_EPERM"; |
|---|
| 6243 | + case ICE_AQ_RC_ENOENT: |
|---|
| 6244 | + return "ICE_AQ_RC_ENOENT"; |
|---|
| 6245 | + case ICE_AQ_RC_ENOMEM: |
|---|
| 6246 | + return "ICE_AQ_RC_ENOMEM"; |
|---|
| 6247 | + case ICE_AQ_RC_EBUSY: |
|---|
| 6248 | + return "ICE_AQ_RC_EBUSY"; |
|---|
| 6249 | + case ICE_AQ_RC_EEXIST: |
|---|
| 6250 | + return "ICE_AQ_RC_EEXIST"; |
|---|
| 6251 | + case ICE_AQ_RC_EINVAL: |
|---|
| 6252 | + return "ICE_AQ_RC_EINVAL"; |
|---|
| 6253 | + case ICE_AQ_RC_ENOSPC: |
|---|
| 6254 | + return "ICE_AQ_RC_ENOSPC"; |
|---|
| 6255 | + case ICE_AQ_RC_ENOSYS: |
|---|
| 6256 | + return "ICE_AQ_RC_ENOSYS"; |
|---|
| 6257 | + case ICE_AQ_RC_EMODE: |
|---|
| 6258 | + return "ICE_AQ_RC_EMODE"; |
|---|
| 6259 | + case ICE_AQ_RC_ENOSEC: |
|---|
| 6260 | + return "ICE_AQ_RC_ENOSEC"; |
|---|
| 6261 | + case ICE_AQ_RC_EBADSIG: |
|---|
| 6262 | + return "ICE_AQ_RC_EBADSIG"; |
|---|
| 6263 | + case ICE_AQ_RC_ESVN: |
|---|
| 6264 | + return "ICE_AQ_RC_ESVN"; |
|---|
| 6265 | + case ICE_AQ_RC_EBADMAN: |
|---|
| 6266 | + return "ICE_AQ_RC_EBADMAN"; |
|---|
| 6267 | + case ICE_AQ_RC_EBADBUF: |
|---|
| 6268 | + return "ICE_AQ_RC_EBADBUF"; |
|---|
| 6269 | + } |
|---|
| 6270 | + |
|---|
| 6271 | + return "ICE_AQ_RC_UNKNOWN"; |
|---|
| 6272 | +} |
|---|
| 6273 | + |
|---|
| 6274 | +/** |
|---|
| 6275 | + * ice_stat_str - convert status err code to a string |
|---|
| 6276 | + * @stat_err: the status error code to convert |
|---|
| 6277 | + */ |
|---|
| 6278 | +const char *ice_stat_str(enum ice_status stat_err) |
|---|
| 6279 | +{ |
|---|
| 6280 | + switch (stat_err) { |
|---|
| 6281 | + case ICE_SUCCESS: |
|---|
| 6282 | + return "OK"; |
|---|
| 6283 | + case ICE_ERR_PARAM: |
|---|
| 6284 | + return "ICE_ERR_PARAM"; |
|---|
| 6285 | + case ICE_ERR_NOT_IMPL: |
|---|
| 6286 | + return "ICE_ERR_NOT_IMPL"; |
|---|
| 6287 | + case ICE_ERR_NOT_READY: |
|---|
| 6288 | + return "ICE_ERR_NOT_READY"; |
|---|
| 6289 | + case ICE_ERR_NOT_SUPPORTED: |
|---|
| 6290 | + return "ICE_ERR_NOT_SUPPORTED"; |
|---|
| 6291 | + case ICE_ERR_BAD_PTR: |
|---|
| 6292 | + return "ICE_ERR_BAD_PTR"; |
|---|
| 6293 | + case ICE_ERR_INVAL_SIZE: |
|---|
| 6294 | + return "ICE_ERR_INVAL_SIZE"; |
|---|
| 6295 | + case ICE_ERR_DEVICE_NOT_SUPPORTED: |
|---|
| 6296 | + return "ICE_ERR_DEVICE_NOT_SUPPORTED"; |
|---|
| 6297 | + case ICE_ERR_RESET_FAILED: |
|---|
| 6298 | + return "ICE_ERR_RESET_FAILED"; |
|---|
| 6299 | + case ICE_ERR_FW_API_VER: |
|---|
| 6300 | + return "ICE_ERR_FW_API_VER"; |
|---|
| 6301 | + case ICE_ERR_NO_MEMORY: |
|---|
| 6302 | + return "ICE_ERR_NO_MEMORY"; |
|---|
| 6303 | + case ICE_ERR_CFG: |
|---|
| 6304 | + return "ICE_ERR_CFG"; |
|---|
| 6305 | + case ICE_ERR_OUT_OF_RANGE: |
|---|
| 6306 | + return "ICE_ERR_OUT_OF_RANGE"; |
|---|
| 6307 | + case ICE_ERR_ALREADY_EXISTS: |
|---|
| 6308 | + return "ICE_ERR_ALREADY_EXISTS"; |
|---|
| 6309 | + case ICE_ERR_NVM_CHECKSUM: |
|---|
| 6310 | + return "ICE_ERR_NVM_CHECKSUM"; |
|---|
| 6311 | + case ICE_ERR_BUF_TOO_SHORT: |
|---|
| 6312 | + return "ICE_ERR_BUF_TOO_SHORT"; |
|---|
| 6313 | + case ICE_ERR_NVM_BLANK_MODE: |
|---|
| 6314 | + return "ICE_ERR_NVM_BLANK_MODE"; |
|---|
| 6315 | + case ICE_ERR_IN_USE: |
|---|
| 6316 | + return "ICE_ERR_IN_USE"; |
|---|
| 6317 | + case ICE_ERR_MAX_LIMIT: |
|---|
| 6318 | + return "ICE_ERR_MAX_LIMIT"; |
|---|
| 6319 | + case ICE_ERR_RESET_ONGOING: |
|---|
| 6320 | + return "ICE_ERR_RESET_ONGOING"; |
|---|
| 6321 | + case ICE_ERR_HW_TABLE: |
|---|
| 6322 | + return "ICE_ERR_HW_TABLE"; |
|---|
| 6323 | + case ICE_ERR_DOES_NOT_EXIST: |
|---|
| 6324 | + return "ICE_ERR_DOES_NOT_EXIST"; |
|---|
| 6325 | + case ICE_ERR_FW_DDP_MISMATCH: |
|---|
| 6326 | + return "ICE_ERR_FW_DDP_MISMATCH"; |
|---|
| 6327 | + case ICE_ERR_AQ_ERROR: |
|---|
| 6328 | + return "ICE_ERR_AQ_ERROR"; |
|---|
| 6329 | + case ICE_ERR_AQ_TIMEOUT: |
|---|
| 6330 | + return "ICE_ERR_AQ_TIMEOUT"; |
|---|
| 6331 | + case ICE_ERR_AQ_FULL: |
|---|
| 6332 | + return "ICE_ERR_AQ_FULL"; |
|---|
| 6333 | + case ICE_ERR_AQ_NO_WORK: |
|---|
| 6334 | + return "ICE_ERR_AQ_NO_WORK"; |
|---|
| 6335 | + case ICE_ERR_AQ_EMPTY: |
|---|
| 6336 | + return "ICE_ERR_AQ_EMPTY"; |
|---|
| 6337 | + case ICE_ERR_AQ_FW_CRITICAL: |
|---|
| 6338 | + return "ICE_ERR_AQ_FW_CRITICAL"; |
|---|
| 6339 | + } |
|---|
| 6340 | + |
|---|
| 6341 | + return "ICE_ERR_UNKNOWN"; |
|---|
| 5319 | 6342 | } |
|---|
| 5320 | 6343 | |
|---|
| 5321 | 6344 | /** |
|---|
| .. | .. |
|---|
| 5332 | 6355 | struct ice_pf *pf = vsi->back; |
|---|
| 5333 | 6356 | struct ice_hw *hw = &pf->hw; |
|---|
| 5334 | 6357 | enum ice_status status; |
|---|
| 6358 | + struct device *dev; |
|---|
| 5335 | 6359 | |
|---|
| 6360 | + dev = ice_pf_to_dev(pf); |
|---|
| 5336 | 6361 | if (seed) { |
|---|
| 5337 | 6362 | struct ice_aqc_get_set_rss_keys *buf = |
|---|
| 5338 | 6363 | (struct ice_aqc_get_set_rss_keys *)seed; |
|---|
| 5339 | 6364 | |
|---|
| 5340 | | - status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf); |
|---|
| 6365 | + status = ice_aq_set_rss_key(hw, vsi->idx, buf); |
|---|
| 5341 | 6366 | |
|---|
| 5342 | 6367 | if (status) { |
|---|
| 5343 | | - dev_err(&pf->pdev->dev, |
|---|
| 5344 | | - "Cannot set RSS key, err %d aq_err %d\n", |
|---|
| 5345 | | - status, hw->adminq.rq_last_status); |
|---|
| 6368 | + dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n", |
|---|
| 6369 | + ice_stat_str(status), |
|---|
| 6370 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 5346 | 6371 | return -EIO; |
|---|
| 5347 | 6372 | } |
|---|
| 5348 | 6373 | } |
|---|
| 5349 | 6374 | |
|---|
| 5350 | 6375 | if (lut) { |
|---|
| 5351 | | - status = ice_aq_set_rss_lut(hw, vsi->vsi_num, |
|---|
| 5352 | | - vsi->rss_lut_type, lut, lut_size); |
|---|
| 6376 | + status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, |
|---|
| 6377 | + lut, lut_size); |
|---|
| 5353 | 6378 | if (status) { |
|---|
| 5354 | | - dev_err(&pf->pdev->dev, |
|---|
| 5355 | | - "Cannot set RSS lut, err %d aq_err %d\n", |
|---|
| 5356 | | - status, hw->adminq.rq_last_status); |
|---|
| 6379 | + dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", |
|---|
| 6380 | + ice_stat_str(status), |
|---|
| 6381 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 5357 | 6382 | return -EIO; |
|---|
| 5358 | 6383 | } |
|---|
| 5359 | 6384 | } |
|---|
| .. | .. |
|---|
| 5375 | 6400 | struct ice_pf *pf = vsi->back; |
|---|
| 5376 | 6401 | struct ice_hw *hw = &pf->hw; |
|---|
| 5377 | 6402 | enum ice_status status; |
|---|
| 6403 | + struct device *dev; |
|---|
| 5378 | 6404 | |
|---|
| 6405 | + dev = ice_pf_to_dev(pf); |
|---|
| 5379 | 6406 | if (seed) { |
|---|
| 5380 | 6407 | struct ice_aqc_get_set_rss_keys *buf = |
|---|
| 5381 | 6408 | (struct ice_aqc_get_set_rss_keys *)seed; |
|---|
| 5382 | 6409 | |
|---|
| 5383 | | - status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf); |
|---|
| 6410 | + status = ice_aq_get_rss_key(hw, vsi->idx, buf); |
|---|
| 5384 | 6411 | if (status) { |
|---|
| 5385 | | - dev_err(&pf->pdev->dev, |
|---|
| 5386 | | - "Cannot get RSS key, err %d aq_err %d\n", |
|---|
| 5387 | | - status, hw->adminq.rq_last_status); |
|---|
| 6412 | + dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n", |
|---|
| 6413 | + ice_stat_str(status), |
|---|
| 6414 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 5388 | 6415 | return -EIO; |
|---|
| 5389 | 6416 | } |
|---|
| 5390 | 6417 | } |
|---|
| 5391 | 6418 | |
|---|
| 5392 | 6419 | if (lut) { |
|---|
| 5393 | | - status = ice_aq_get_rss_lut(hw, vsi->vsi_num, |
|---|
| 5394 | | - vsi->rss_lut_type, lut, lut_size); |
|---|
| 6420 | + status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, |
|---|
| 6421 | + lut, lut_size); |
|---|
| 5395 | 6422 | if (status) { |
|---|
| 5396 | | - dev_err(&pf->pdev->dev, |
|---|
| 5397 | | - "Cannot get RSS lut, err %d aq_err %d\n", |
|---|
| 5398 | | - status, hw->adminq.rq_last_status); |
|---|
| 6423 | + dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n", |
|---|
| 6424 | + ice_stat_str(status), |
|---|
| 6425 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 5399 | 6426 | return -EIO; |
|---|
| 5400 | 6427 | } |
|---|
| 5401 | 6428 | } |
|---|
| .. | .. |
|---|
| 5404 | 6431 | } |
|---|
| 5405 | 6432 | |
|---|
| 5406 | 6433 | /** |
|---|
| 6434 | + * ice_bridge_getlink - Get the hardware bridge mode |
|---|
| 6435 | + * @skb: skb buff |
|---|
| 6436 | + * @pid: process ID |
|---|
| 6437 | + * @seq: RTNL message seq |
|---|
| 6438 | + * @dev: the netdev being configured |
|---|
| 6439 | + * @filter_mask: filter mask passed in |
|---|
| 6440 | + * @nlflags: netlink flags passed in |
|---|
| 6441 | + * |
|---|
| 6442 | + * Return the bridge mode (VEB/VEPA) |
|---|
| 6443 | + */ |
|---|
| 6444 | +static int |
|---|
| 6445 | +ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
|---|
| 6446 | + struct net_device *dev, u32 filter_mask, int nlflags) |
|---|
| 6447 | +{ |
|---|
| 6448 | + struct ice_netdev_priv *np = netdev_priv(dev); |
|---|
| 6449 | + struct ice_vsi *vsi = np->vsi; |
|---|
| 6450 | + struct ice_pf *pf = vsi->back; |
|---|
| 6451 | + u16 bmode; |
|---|
| 6452 | + |
|---|
| 6453 | + bmode = pf->first_sw->bridge_mode; |
|---|
| 6454 | + |
|---|
| 6455 | + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, |
|---|
| 6456 | + filter_mask, NULL); |
|---|
| 6457 | +} |
|---|
| 6458 | + |
|---|
| 6459 | +/** |
|---|
| 6460 | + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) |
|---|
| 6461 | + * @vsi: Pointer to VSI structure |
|---|
| 6462 | + * @bmode: Hardware bridge mode (VEB/VEPA) |
|---|
| 6463 | + * |
|---|
| 6464 | + * Returns 0 on success, negative on failure |
|---|
| 6465 | + */ |
|---|
| 6466 | +static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) |
|---|
| 6467 | +{ |
|---|
| 6468 | + struct ice_aqc_vsi_props *vsi_props; |
|---|
| 6469 | + struct ice_hw *hw = &vsi->back->hw; |
|---|
| 6470 | + struct ice_vsi_ctx *ctxt; |
|---|
| 6471 | + enum ice_status status; |
|---|
| 6472 | + int ret = 0; |
|---|
| 6473 | + |
|---|
| 6474 | + vsi_props = &vsi->info; |
|---|
| 6475 | + |
|---|
| 6476 | + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
|---|
| 6477 | + if (!ctxt) |
|---|
| 6478 | + return -ENOMEM; |
|---|
| 6479 | + |
|---|
| 6480 | + ctxt->info = vsi->info; |
|---|
| 6481 | + |
|---|
| 6482 | + if (bmode == BRIDGE_MODE_VEB) |
|---|
| 6483 | + /* change from VEPA to VEB mode */ |
|---|
| 6484 | + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
|---|
| 6485 | + else |
|---|
| 6486 | + /* change from VEB to VEPA mode */ |
|---|
| 6487 | + ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
|---|
| 6488 | + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); |
|---|
| 6489 | + |
|---|
| 6490 | + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
|---|
| 6491 | + if (status) { |
|---|
| 6492 | + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", |
|---|
| 6493 | + bmode, ice_stat_str(status), |
|---|
| 6494 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 6495 | + ret = -EIO; |
|---|
| 6496 | + goto out; |
|---|
| 6497 | + } |
|---|
| 6498 | + /* Update sw flags for book keeping */ |
|---|
| 6499 | + vsi_props->sw_flags = ctxt->info.sw_flags; |
|---|
| 6500 | + |
|---|
| 6501 | +out: |
|---|
| 6502 | + kfree(ctxt); |
|---|
| 6503 | + return ret; |
|---|
| 6504 | +} |
|---|
| 6505 | + |
|---|
| 6506 | +/** |
|---|
| 6507 | + * ice_bridge_setlink - Set the hardware bridge mode |
|---|
| 6508 | + * @dev: the netdev being configured |
|---|
| 6509 | + * @nlh: RTNL message |
|---|
| 6510 | + * @flags: bridge setlink flags |
|---|
| 6511 | + * @extack: netlink extended ack |
|---|
| 6512 | + * |
|---|
| 6513 | + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is |
|---|
| 6514 | + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if |
|---|
| 6515 | + * not already set for all VSIs connected to this switch. And also update the |
|---|
| 6516 | + * unicast switch filter rules for the corresponding switch of the netdev. |
|---|
| 6517 | + */ |
|---|
| 6518 | +static int |
|---|
| 6519 | +ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
|---|
| 6520 | + u16 __always_unused flags, |
|---|
| 6521 | + struct netlink_ext_ack __always_unused *extack) |
|---|
| 6522 | +{ |
|---|
| 6523 | + struct ice_netdev_priv *np = netdev_priv(dev); |
|---|
| 6524 | + struct ice_pf *pf = np->vsi->back; |
|---|
| 6525 | + struct nlattr *attr, *br_spec; |
|---|
| 6526 | + struct ice_hw *hw = &pf->hw; |
|---|
| 6527 | + enum ice_status status; |
|---|
| 6528 | + struct ice_sw *pf_sw; |
|---|
| 6529 | + int rem, v, err = 0; |
|---|
| 6530 | + |
|---|
| 6531 | + pf_sw = pf->first_sw; |
|---|
| 6532 | + /* find the attribute in the netlink message */ |
|---|
| 6533 | + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
|---|
| 6534 | + |
|---|
| 6535 | + nla_for_each_nested(attr, br_spec, rem) { |
|---|
| 6536 | + __u16 mode; |
|---|
| 6537 | + |
|---|
| 6538 | + if (nla_type(attr) != IFLA_BRIDGE_MODE) |
|---|
| 6539 | + continue; |
|---|
| 6540 | + mode = nla_get_u16(attr); |
|---|
| 6541 | + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) |
|---|
| 6542 | + return -EINVAL; |
|---|
| 6543 | + /* Continue if bridge mode is not being flipped */ |
|---|
| 6544 | + if (mode == pf_sw->bridge_mode) |
|---|
| 6545 | + continue; |
|---|
| 6546 | + /* Iterates through the PF VSI list and update the loopback |
|---|
| 6547 | + * mode of the VSI |
|---|
| 6548 | + */ |
|---|
| 6549 | + ice_for_each_vsi(pf, v) { |
|---|
| 6550 | + if (!pf->vsi[v]) |
|---|
| 6551 | + continue; |
|---|
| 6552 | + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); |
|---|
| 6553 | + if (err) |
|---|
| 6554 | + return err; |
|---|
| 6555 | + } |
|---|
| 6556 | + |
|---|
| 6557 | + hw->evb_veb = (mode == BRIDGE_MODE_VEB); |
|---|
| 6558 | + /* Update the unicast switch filter rules for the corresponding |
|---|
| 6559 | + * switch of the netdev |
|---|
| 6560 | + */ |
|---|
| 6561 | + status = ice_update_sw_rule_bridge_mode(hw); |
|---|
| 6562 | + if (status) { |
|---|
| 6563 | + netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", |
|---|
| 6564 | + mode, ice_stat_str(status), |
|---|
| 6565 | + ice_aq_str(hw->adminq.sq_last_status)); |
|---|
| 6566 | + /* revert hw->evb_veb */ |
|---|
| 6567 | + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); |
|---|
| 6568 | + return -EIO; |
|---|
| 6569 | + } |
|---|
| 6570 | + |
|---|
| 6571 | + pf_sw->bridge_mode = mode; |
|---|
| 6572 | + } |
|---|
| 6573 | + |
|---|
| 6574 | + return 0; |
|---|
| 6575 | +} |
|---|
| 6576 | + |
|---|
| 6577 | +/** |
|---|
| 6578 | + * ice_tx_timeout - Respond to a Tx Hang |
|---|
| 6579 | + * @netdev: network interface device structure |
|---|
| 6580 | + * @txqueue: Tx queue |
|---|
| 6581 | + */ |
|---|
| 6582 | +static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
|---|
| 6583 | +{ |
|---|
| 6584 | + struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 6585 | + struct ice_ring *tx_ring = NULL; |
|---|
| 6586 | + struct ice_vsi *vsi = np->vsi; |
|---|
| 6587 | + struct ice_pf *pf = vsi->back; |
|---|
| 6588 | + u32 i; |
|---|
| 6589 | + |
|---|
| 6590 | + pf->tx_timeout_count++; |
|---|
| 6591 | + |
|---|
| 6592 | + /* Check if PFC is enabled for the TC to which the queue belongs |
|---|
| 6593 | + * to. If yes then Tx timeout is not caused by a hung queue, no |
|---|
| 6594 | + * need to reset and rebuild |
|---|
| 6595 | + */ |
|---|
| 6596 | + if (ice_is_pfc_causing_hung_q(pf, txqueue)) { |
|---|
| 6597 | + dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", |
|---|
| 6598 | + txqueue); |
|---|
| 6599 | + return; |
|---|
| 6600 | + } |
|---|
| 6601 | + |
|---|
| 6602 | + /* now that we have an index, find the tx_ring struct */ |
|---|
| 6603 | + for (i = 0; i < vsi->num_txq; i++) |
|---|
| 6604 | + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) |
|---|
| 6605 | + if (txqueue == vsi->tx_rings[i]->q_index) { |
|---|
| 6606 | + tx_ring = vsi->tx_rings[i]; |
|---|
| 6607 | + break; |
|---|
| 6608 | + } |
|---|
| 6609 | + |
|---|
| 6610 | + /* Reset recovery level if enough time has elapsed after last timeout. |
|---|
| 6611 | + * Also ensure no new reset action happens before next timeout period. |
|---|
| 6612 | + */ |
|---|
| 6613 | + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) |
|---|
| 6614 | + pf->tx_timeout_recovery_level = 1; |
|---|
| 6615 | + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + |
|---|
| 6616 | + netdev->watchdog_timeo))) |
|---|
| 6617 | + return; |
|---|
| 6618 | + |
|---|
| 6619 | + if (tx_ring) { |
|---|
| 6620 | + struct ice_hw *hw = &pf->hw; |
|---|
| 6621 | + u32 head, val = 0; |
|---|
| 6622 | + |
|---|
| 6623 | + head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & |
|---|
| 6624 | + QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; |
|---|
| 6625 | + /* Read interrupt register */ |
|---|
| 6626 | + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); |
|---|
| 6627 | + |
|---|
| 6628 | + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", |
|---|
| 6629 | + vsi->vsi_num, txqueue, tx_ring->next_to_clean, |
|---|
| 6630 | + head, tx_ring->next_to_use, val); |
|---|
| 6631 | + } |
|---|
| 6632 | + |
|---|
| 6633 | + pf->tx_timeout_last_recovery = jiffies; |
|---|
| 6634 | + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", |
|---|
| 6635 | + pf->tx_timeout_recovery_level, txqueue); |
|---|
| 6636 | + |
|---|
| 6637 | + switch (pf->tx_timeout_recovery_level) { |
|---|
| 6638 | + case 1: |
|---|
| 6639 | + set_bit(__ICE_PFR_REQ, pf->state); |
|---|
| 6640 | + break; |
|---|
| 6641 | + case 2: |
|---|
| 6642 | + set_bit(__ICE_CORER_REQ, pf->state); |
|---|
| 6643 | + break; |
|---|
| 6644 | + case 3: |
|---|
| 6645 | + set_bit(__ICE_GLOBR_REQ, pf->state); |
|---|
| 6646 | + break; |
|---|
| 6647 | + default: |
|---|
| 6648 | + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); |
|---|
| 6649 | + set_bit(__ICE_DOWN, pf->state); |
|---|
| 6650 | + set_bit(__ICE_NEEDS_RESTART, vsi->state); |
|---|
| 6651 | + set_bit(__ICE_SERVICE_DIS, pf->state); |
|---|
| 6652 | + break; |
|---|
| 6653 | + } |
|---|
| 6654 | + |
|---|
| 6655 | + ice_service_task_schedule(pf); |
|---|
| 6656 | + pf->tx_timeout_recovery_level++; |
|---|
| 6657 | +} |
|---|
| 6658 | + |
|---|
| 6659 | +/** |
|---|
| 5407 | 6660 | * ice_open - Called when a network interface becomes active |
|---|
| 5408 | 6661 | * @netdev: network interface device structure |
|---|
| 5409 | 6662 | * |
|---|
| 5410 | 6663 | * The open entry point is called when a network interface is made |
|---|
| 5411 | | - * active by the system (IFF_UP). At this point all resources needed |
|---|
| 6664 | + * active by the system (IFF_UP). At this point all resources needed |
|---|
| 5412 | 6665 | * for transmit and receive operations are allocated, the interrupt |
|---|
| 5413 | 6666 | * handler is registered with the OS, the netdev watchdog is enabled, |
|---|
| 5414 | 6667 | * and the stack is notified that the interface is ready. |
|---|
| 5415 | 6668 | * |
|---|
| 5416 | 6669 | * Returns 0 on success, negative value on failure |
|---|
| 5417 | 6670 | */ |
|---|
| 5418 | | -static int ice_open(struct net_device *netdev) |
|---|
| 6671 | +int ice_open(struct net_device *netdev) |
|---|
| 6672 | +{ |
|---|
| 6673 | + struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 6674 | + struct ice_pf *pf = np->vsi->back; |
|---|
| 6675 | + |
|---|
| 6676 | + if (ice_is_reset_in_progress(pf->state)) { |
|---|
| 6677 | + netdev_err(netdev, "can't open net device while reset is in progress"); |
|---|
| 6678 | + return -EBUSY; |
|---|
| 6679 | + } |
|---|
| 6680 | + |
|---|
| 6681 | + return ice_open_internal(netdev); |
|---|
| 6682 | +} |
|---|
| 6683 | + |
|---|
| 6684 | +/** |
|---|
| 6685 | + * ice_open_internal - Called when a network interface becomes active |
|---|
| 6686 | + * @netdev: network interface device structure |
|---|
| 6687 | + * |
|---|
| 6688 | + * Internal ice_open implementation. Should not be used directly except for ice_open and reset |
|---|
| 6689 | + * handling routine |
|---|
| 6690 | + * |
|---|
| 6691 | + * Returns 0 on success, negative value on failure |
|---|
| 6692 | + */ |
|---|
| 6693 | +int ice_open_internal(struct net_device *netdev) |
|---|
| 5419 | 6694 | { |
|---|
| 5420 | 6695 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 5421 | 6696 | struct ice_vsi *vsi = np->vsi; |
|---|
| 6697 | + struct ice_pf *pf = vsi->back; |
|---|
| 6698 | + struct ice_port_info *pi; |
|---|
| 5422 | 6699 | int err; |
|---|
| 6700 | + |
|---|
| 6701 | + if (test_bit(__ICE_NEEDS_RESTART, pf->state)) { |
|---|
| 6702 | + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); |
|---|
| 6703 | + return -EIO; |
|---|
| 6704 | + } |
|---|
| 6705 | + |
|---|
| 6706 | + if (test_bit(__ICE_DOWN, pf->state)) { |
|---|
| 6707 | + netdev_err(netdev, "device is not ready yet\n"); |
|---|
| 6708 | + return -EBUSY; |
|---|
| 6709 | + } |
|---|
| 5423 | 6710 | |
|---|
| 5424 | 6711 | netif_carrier_off(netdev); |
|---|
| 5425 | 6712 | |
|---|
| 5426 | | - err = ice_vsi_open(vsi); |
|---|
| 6713 | + pi = vsi->port_info; |
|---|
| 6714 | + err = ice_update_link_info(pi); |
|---|
| 6715 | + if (err) { |
|---|
| 6716 | + netdev_err(netdev, "Failed to get link info, error %d\n", |
|---|
| 6717 | + err); |
|---|
| 6718 | + return err; |
|---|
| 6719 | + } |
|---|
| 5427 | 6720 | |
|---|
| 6721 | + /* Set PHY if there is media, otherwise, turn off PHY */ |
|---|
| 6722 | + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
|---|
| 6723 | + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
|---|
| 6724 | + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { |
|---|
| 6725 | + err = ice_init_phy_user_cfg(pi); |
|---|
| 6726 | + if (err) { |
|---|
| 6727 | + netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", |
|---|
| 6728 | + err); |
|---|
| 6729 | + return err; |
|---|
| 6730 | + } |
|---|
| 6731 | + } |
|---|
| 6732 | + |
|---|
| 6733 | + err = ice_configure_phy(vsi); |
|---|
| 6734 | + if (err) { |
|---|
| 6735 | + netdev_err(netdev, "Failed to set physical link up, error %d\n", |
|---|
| 6736 | + err); |
|---|
| 6737 | + return err; |
|---|
| 6738 | + } |
|---|
| 6739 | + } else { |
|---|
| 6740 | + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
|---|
| 6741 | + err = ice_aq_set_link_restart_an(pi, false, NULL); |
|---|
| 6742 | + if (err) { |
|---|
| 6743 | + netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", |
|---|
| 6744 | + vsi->vsi_num, err); |
|---|
| 6745 | + return err; |
|---|
| 6746 | + } |
|---|
| 6747 | + } |
|---|
| 6748 | + |
|---|
| 6749 | + err = ice_vsi_open(vsi); |
|---|
| 5428 | 6750 | if (err) |
|---|
| 5429 | 6751 | netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", |
|---|
| 5430 | 6752 | vsi->vsi_num, vsi->vsw->sw_id); |
|---|
| 6753 | + |
|---|
| 6754 | + /* Update existing tunnels information */ |
|---|
| 6755 | + udp_tunnel_get_rx_info(netdev); |
|---|
| 6756 | + |
|---|
| 5431 | 6757 | return err; |
|---|
| 5432 | 6758 | } |
|---|
| 5433 | 6759 | |
|---|
| .. | .. |
|---|
| 5436 | 6762 | * @netdev: network interface device structure |
|---|
| 5437 | 6763 | * |
|---|
| 5438 | 6764 | * The stop entry point is called when an interface is de-activated by the OS, |
|---|
| 5439 | | - * and the netdevice enters the DOWN state. The hardware is still under the |
|---|
| 6765 | + * and the netdevice enters the DOWN state. The hardware is still under the |
|---|
| 5440 | 6766 | * driver's control, but the netdev interface is disabled. |
|---|
| 5441 | 6767 | * |
|---|
| 5442 | 6768 | * Returns success only - not allowed to fail |
|---|
| 5443 | 6769 | */ |
|---|
| 5444 | | -static int ice_stop(struct net_device *netdev) |
|---|
| 6770 | +int ice_stop(struct net_device *netdev) |
|---|
| 5445 | 6771 | { |
|---|
| 5446 | 6772 | struct ice_netdev_priv *np = netdev_priv(netdev); |
|---|
| 5447 | 6773 | struct ice_vsi *vsi = np->vsi; |
|---|
| 6774 | + struct ice_pf *pf = vsi->back; |
|---|
| 6775 | + |
|---|
| 6776 | + if (ice_is_reset_in_progress(pf->state)) { |
|---|
| 6777 | + netdev_err(netdev, "can't stop net device while reset is in progress"); |
|---|
| 6778 | + return -EBUSY; |
|---|
| 6779 | + } |
|---|
| 5448 | 6780 | |
|---|
| 5449 | 6781 | ice_vsi_close(vsi); |
|---|
| 5450 | 6782 | |
|---|
| .. | .. |
|---|
| 5462 | 6794 | struct net_device __always_unused *netdev, |
|---|
| 5463 | 6795 | netdev_features_t features) |
|---|
| 5464 | 6796 | { |
|---|
| 6797 | + bool gso = skb_is_gso(skb); |
|---|
| 5465 | 6798 | size_t len; |
|---|
| 5466 | 6799 | |
|---|
| 5467 | 6800 | /* No point in doing any of this if neither checksum nor GSO are |
|---|
| 5468 | | - * being requested for this frame. We can rule out both by just |
|---|
| 6801 | + * being requested for this frame. We can rule out both by just |
|---|
| 5469 | 6802 | * checking for CHECKSUM_PARTIAL |
|---|
| 5470 | 6803 | */ |
|---|
| 5471 | 6804 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
|---|
| 5472 | 6805 | return features; |
|---|
| 5473 | 6806 | |
|---|
| 5474 | 6807 | /* We cannot support GSO if the MSS is going to be less than |
|---|
| 5475 | | - * 64 bytes. If it is then we need to drop support for GSO. |
|---|
| 6808 | + * 64 bytes. If it is then we need to drop support for GSO. |
|---|
| 5476 | 6809 | */ |
|---|
| 5477 | | - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) |
|---|
| 6810 | + if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) |
|---|
| 5478 | 6811 | features &= ~NETIF_F_GSO_MASK; |
|---|
| 5479 | 6812 | |
|---|
| 5480 | | - len = skb_network_header(skb) - skb->data; |
|---|
| 5481 | | - if (len & ~(ICE_TXD_MACLEN_MAX)) |
|---|
| 6813 | + len = skb_network_offset(skb); |
|---|
| 6814 | + if (len > ICE_TXD_MACLEN_MAX || len & 0x1) |
|---|
| 5482 | 6815 | goto out_rm_features; |
|---|
| 5483 | 6816 | |
|---|
| 5484 | | - len = skb_transport_header(skb) - skb_network_header(skb); |
|---|
| 5485 | | - if (len & ~(ICE_TXD_IPLEN_MAX)) |
|---|
| 6817 | + len = skb_network_header_len(skb); |
|---|
| 6818 | + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
|---|
| 5486 | 6819 | goto out_rm_features; |
|---|
| 5487 | 6820 | |
|---|
| 5488 | 6821 | if (skb->encapsulation) { |
|---|
| 5489 | | - len = skb_inner_network_header(skb) - skb_transport_header(skb); |
|---|
| 5490 | | - if (len & ~(ICE_TXD_L4LEN_MAX)) |
|---|
| 5491 | | - goto out_rm_features; |
|---|
| 6822 | + /* this must work for VXLAN frames AND IPIP/SIT frames, and in |
|---|
| 6823 | + * the case of IPIP frames, the transport header pointer is |
|---|
| 6824 | + * after the inner header! So check to make sure that this |
|---|
| 6825 | + * is a GRE or UDP_TUNNEL frame before doing that math. |
|---|
| 6826 | + */ |
|---|
| 6827 | + if (gso && (skb_shinfo(skb)->gso_type & |
|---|
| 6828 | + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { |
|---|
| 6829 | + len = skb_inner_network_header(skb) - |
|---|
| 6830 | + skb_transport_header(skb); |
|---|
| 6831 | + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) |
|---|
| 6832 | + goto out_rm_features; |
|---|
| 6833 | + } |
|---|
| 5492 | 6834 | |
|---|
| 5493 | | - len = skb_inner_transport_header(skb) - |
|---|
| 5494 | | - skb_inner_network_header(skb); |
|---|
| 5495 | | - if (len & ~(ICE_TXD_IPLEN_MAX)) |
|---|
| 6835 | + len = skb_inner_network_header_len(skb); |
|---|
| 6836 | + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
|---|
| 5496 | 6837 | goto out_rm_features; |
|---|
| 5497 | 6838 | } |
|---|
| 5498 | 6839 | |
|---|
| .. | .. |
|---|
| 5500 | 6841 | out_rm_features: |
|---|
| 5501 | 6842 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
|---|
| 5502 | 6843 | } |
|---|
| 6844 | + |
|---|
| 6845 | +static const struct net_device_ops ice_netdev_safe_mode_ops = { |
|---|
| 6846 | + .ndo_open = ice_open, |
|---|
| 6847 | + .ndo_stop = ice_stop, |
|---|
| 6848 | + .ndo_start_xmit = ice_start_xmit, |
|---|
| 6849 | + .ndo_set_mac_address = ice_set_mac_address, |
|---|
| 6850 | + .ndo_validate_addr = eth_validate_addr, |
|---|
| 6851 | + .ndo_change_mtu = ice_change_mtu, |
|---|
| 6852 | + .ndo_get_stats64 = ice_get_stats64, |
|---|
| 6853 | + .ndo_tx_timeout = ice_tx_timeout, |
|---|
| 6854 | + .ndo_bpf = ice_xdp_safe_mode, |
|---|
| 6855 | +}; |
|---|
| 5503 | 6856 | |
|---|
| 5504 | 6857 | static const struct net_device_ops ice_netdev_ops = { |
|---|
| 5505 | 6858 | .ndo_open = ice_open, |
|---|
| .. | .. |
|---|
| 5511 | 6864 | .ndo_validate_addr = eth_validate_addr, |
|---|
| 5512 | 6865 | .ndo_change_mtu = ice_change_mtu, |
|---|
| 5513 | 6866 | .ndo_get_stats64 = ice_get_stats64, |
|---|
| 6867 | + .ndo_set_tx_maxrate = ice_set_tx_maxrate, |
|---|
| 6868 | + .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, |
|---|
| 6869 | + .ndo_set_vf_mac = ice_set_vf_mac, |
|---|
| 6870 | + .ndo_get_vf_config = ice_get_vf_cfg, |
|---|
| 6871 | + .ndo_set_vf_trust = ice_set_vf_trust, |
|---|
| 6872 | + .ndo_set_vf_vlan = ice_set_vf_port_vlan, |
|---|
| 6873 | + .ndo_set_vf_link_state = ice_set_vf_link_state, |
|---|
| 6874 | + .ndo_get_vf_stats = ice_get_vf_stats, |
|---|
| 5514 | 6875 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
|---|
| 5515 | 6876 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, |
|---|
| 5516 | 6877 | .ndo_set_features = ice_set_features, |
|---|
| 6878 | + .ndo_bridge_getlink = ice_bridge_getlink, |
|---|
| 6879 | + .ndo_bridge_setlink = ice_bridge_setlink, |
|---|
| 5517 | 6880 | .ndo_fdb_add = ice_fdb_add, |
|---|
| 5518 | 6881 | .ndo_fdb_del = ice_fdb_del, |
|---|
| 6882 | +#ifdef CONFIG_RFS_ACCEL |
|---|
| 6883 | + .ndo_rx_flow_steer = ice_rx_flow_steer, |
|---|
| 6884 | +#endif |
|---|
| 6885 | + .ndo_tx_timeout = ice_tx_timeout, |
|---|
| 6886 | + .ndo_bpf = ice_xdp, |
|---|
| 6887 | + .ndo_xdp_xmit = ice_xdp_xmit, |
|---|
| 6888 | + .ndo_xsk_wakeup = ice_xsk_wakeup, |
|---|
| 6889 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
|---|
| 6890 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
|---|
| 5519 | 6891 | }; |
|---|