hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/ethernet/intel/ice/ice_common.c
....@@ -4,41 +4,58 @@
44 #include "ice_common.h"
55 #include "ice_sched.h"
66 #include "ice_adminq_cmd.h"
7
+#include "ice_flow.h"
78
8
-#define ICE_PF_RESET_WAIT_COUNT 200
9
-
10
-#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
11
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
12
- ((ICE_RX_OPC_MDID << \
13
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15
- (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16
- GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
-
18
-#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
19
- wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
20
- (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22
- (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24
- (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26
- (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
9
+#define ICE_PF_RESET_WAIT_COUNT 300
2810
2911 /**
3012 * ice_set_mac_type - Sets MAC type
3113 * @hw: pointer to the HW structure
3214 *
3315 * This function sets the MAC type of the adapter based on the
34
- * vendor ID and device ID stored in the hw structure.
16
+ * vendor ID and device ID stored in the HW structure.
3517 */
3618 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
3719 {
3820 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
3921 return ICE_ERR_DEVICE_NOT_SUPPORTED;
4022
41
- hw->mac_type = ICE_MAC_GENERIC;
23
+ switch (hw->device_id) {
24
+ case ICE_DEV_ID_E810C_BACKPLANE:
25
+ case ICE_DEV_ID_E810C_QSFP:
26
+ case ICE_DEV_ID_E810C_SFP:
27
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
28
+ case ICE_DEV_ID_E810_XXV_QSFP:
29
+ case ICE_DEV_ID_E810_XXV_SFP:
30
+ hw->mac_type = ICE_MAC_E810;
31
+ break;
32
+ case ICE_DEV_ID_E823C_10G_BASE_T:
33
+ case ICE_DEV_ID_E823C_BACKPLANE:
34
+ case ICE_DEV_ID_E823C_QSFP:
35
+ case ICE_DEV_ID_E823C_SFP:
36
+ case ICE_DEV_ID_E823C_SGMII:
37
+ case ICE_DEV_ID_E822C_10G_BASE_T:
38
+ case ICE_DEV_ID_E822C_BACKPLANE:
39
+ case ICE_DEV_ID_E822C_QSFP:
40
+ case ICE_DEV_ID_E822C_SFP:
41
+ case ICE_DEV_ID_E822C_SGMII:
42
+ case ICE_DEV_ID_E822L_10G_BASE_T:
43
+ case ICE_DEV_ID_E822L_BACKPLANE:
44
+ case ICE_DEV_ID_E822L_SFP:
45
+ case ICE_DEV_ID_E822L_SGMII:
46
+ case ICE_DEV_ID_E823L_10G_BASE_T:
47
+ case ICE_DEV_ID_E823L_1GBE:
48
+ case ICE_DEV_ID_E823L_BACKPLANE:
49
+ case ICE_DEV_ID_E823L_QSFP:
50
+ case ICE_DEV_ID_E823L_SFP:
51
+ hw->mac_type = ICE_MAC_GENERIC;
52
+ break;
53
+ default:
54
+ hw->mac_type = ICE_MAC_UNKNOWN;
55
+ break;
56
+ }
57
+
58
+ ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
4259 return 0;
4360 }
4461
....@@ -60,7 +77,7 @@
6077
6178 /**
6279 * ice_aq_manage_mac_read - manage MAC address read command
63
- * @hw: pointer to the hw struct
80
+ * @hw: pointer to the HW struct
6481 * @buf: a virtual buffer to hold the manage MAC read response
6582 * @buf_size: Size of the virtual buffer
6683 * @cd: pointer to command details structure or NULL
....@@ -70,7 +87,8 @@
7087 * is returned in user specified buffer. Please interpret user specified
7188 * buffer as "manage_mac_read" response.
7289 * Response such as various MAC addresses are stored in HW struct (port.mac)
73
- * ice_aq_discover_caps is expected to be called before this function is called.
90
+ * ice_discover_dev_caps is expected to be called before this function is
91
+ * called.
7492 */
7593 static enum ice_status
7694 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
....@@ -125,7 +143,7 @@
125143 *
126144 * Returns the various PHY capabilities supported on the Port (0x0600)
127145 */
128
-static enum ice_status
146
+enum ice_status
129147 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
130148 struct ice_aqc_get_phy_caps_data *pcaps,
131149 struct ice_sq_cd *cd)
....@@ -134,11 +152,13 @@
134152 u16 pcaps_size = sizeof(*pcaps);
135153 struct ice_aq_desc desc;
136154 enum ice_status status;
155
+ struct ice_hw *hw;
137156
138157 cmd = &desc.params.get_phy;
139158
140159 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
141160 return ICE_ERR_PARAM;
161
+ hw = pi->hw;
142162
143163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
144164
....@@ -146,12 +166,91 @@
146166 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
147167
148168 cmd->param0 |= cpu_to_le16(report_mode);
149
- status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
169
+ status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
150170
151
- if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
171
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
172
+ report_mode);
173
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
174
+ (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
175
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
176
+ (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
177
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
178
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
179
+ pcaps->low_power_ctrl_an);
180
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
181
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
182
+ pcaps->eeer_value);
183
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
184
+ pcaps->link_fec_options);
185
+ ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
186
+ pcaps->module_compliance_enforcement);
187
+ ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
188
+ pcaps->extended_compliance_code);
189
+ ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
190
+ pcaps->module_type[0]);
191
+ ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
192
+ pcaps->module_type[1]);
193
+ ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
194
+ pcaps->module_type[2]);
195
+
196
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
152197 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
198
+ pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
199
+ memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
200
+ sizeof(pi->phy.link_info.module_type));
201
+ }
153202
154203 return status;
204
+}
205
+
206
+/**
207
+ * ice_aq_get_link_topo_handle - get link topology node return status
208
+ * @pi: port information structure
209
+ * @node_type: requested node type
210
+ * @cd: pointer to command details structure or NULL
211
+ *
212
+ * Get link topology node return status for specified node type (0x06E0)
213
+ *
214
+ * Node type cage can be used to determine if cage is present. If AQC
215
+ * returns error (ENOENT), then no cage present. If no cage present, then
216
+ * connection type is backplane or BASE-T.
217
+ */
218
+static enum ice_status
219
+ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
220
+ struct ice_sq_cd *cd)
221
+{
222
+ struct ice_aqc_get_link_topo *cmd;
223
+ struct ice_aq_desc desc;
224
+
225
+ cmd = &desc.params.get_link_topo;
226
+
227
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
228
+
229
+ cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
230
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
231
+
232
+ /* set node type */
233
+ cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
234
+
235
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
236
+}
237
+
238
+/**
239
+ * ice_is_media_cage_present
240
+ * @pi: port information structure
241
+ *
242
+ * Returns true if media cage is present, else false. If no cage, then
243
+ * media type is backplane or BASE-T.
244
+ */
245
+static bool ice_is_media_cage_present(struct ice_port_info *pi)
246
+{
247
+ /* Node type cage can be used to determine if cage is present. If AQC
248
+ * returns error (ENOENT), then no cage present. If no cage present then
249
+ * connection type is backplane or BASE-T.
250
+ */
251
+ return !ice_aq_get_link_topo_handle(pi,
252
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
253
+ NULL);
155254 }
156255
157256 /**
....@@ -166,8 +265,23 @@
166265 return ICE_MEDIA_UNKNOWN;
167266
168267 hw_link_info = &pi->phy.link_info;
268
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
269
+ /* If more than one media type is selected, report unknown */
270
+ return ICE_MEDIA_UNKNOWN;
169271
170272 if (hw_link_info->phy_type_low) {
273
+ /* 1G SGMII is a special case where some DA cable PHYs
274
+ * may show this as an option when it really shouldn't
275
+ * be since SGMII is meant to be between a MAC and a PHY
276
+ * in a backplane. Try to detect this case and handle it
277
+ */
278
+ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
279
+ (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
280
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
281
+ hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
283
+ return ICE_MEDIA_DA;
284
+
171285 switch (hw_link_info->phy_type_low) {
172286 case ICE_PHY_TYPE_LOW_1000BASE_SX:
173287 case ICE_PHY_TYPE_LOW_1000BASE_LX:
....@@ -176,9 +290,25 @@
176290 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
177291 case ICE_PHY_TYPE_LOW_25GBASE_SR:
178292 case ICE_PHY_TYPE_LOW_25GBASE_LR:
179
- case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
180293 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
181294 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
295
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
296
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
297
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
298
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
299
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
300
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
301
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
302
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
303
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
304
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
305
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
306
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
307
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
308
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
309
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
310
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
311
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
182312 return ICE_MEDIA_FIBER;
183313 case ICE_PHY_TYPE_LOW_100BASE_TX:
184314 case ICE_PHY_TYPE_LOW_1000BASE_T:
....@@ -192,7 +322,22 @@
192322 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
193323 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
194324 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
325
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
326
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
327
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
328
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
329
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
195330 return ICE_MEDIA_DA;
331
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
332
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
333
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
334
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
335
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
336
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
337
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
338
+ if (ice_is_media_cage_present(pi))
339
+ return ICE_MEDIA_DA;
340
+ fallthrough;
196341 case ICE_PHY_TYPE_LOW_1000BASE_KX:
197342 case ICE_PHY_TYPE_LOW_2500BASE_KX:
198343 case ICE_PHY_TYPE_LOW_2500BASE_X:
....@@ -202,10 +347,26 @@
202347 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
203348 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
204349 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
350
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
351
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
352
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
353
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
205354 return ICE_MEDIA_BACKPLANE;
206355 }
356
+ } else {
357
+ switch (hw_link_info->phy_type_high) {
358
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
359
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
360
+ if (ice_is_media_cage_present(pi))
361
+ return ICE_MEDIA_DA;
362
+ fallthrough;
363
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
364
+ return ICE_MEDIA_BACKPLANE;
365
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
366
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
367
+ return ICE_MEDIA_FIBER;
368
+ }
207369 }
208
-
209370 return ICE_MEDIA_UNKNOWN;
210371 }
211372
....@@ -222,21 +383,23 @@
222383 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
223384 struct ice_link_status *link, struct ice_sq_cd *cd)
224385 {
225
- struct ice_link_status *hw_link_info_old, *hw_link_info;
226386 struct ice_aqc_get_link_status_data link_data = { 0 };
227387 struct ice_aqc_get_link_status *resp;
388
+ struct ice_link_status *li_old, *li;
228389 enum ice_media_type *hw_media_type;
229390 struct ice_fc_info *hw_fc_info;
230391 bool tx_pause, rx_pause;
231392 struct ice_aq_desc desc;
232393 enum ice_status status;
394
+ struct ice_hw *hw;
233395 u16 cmd_flags;
234396
235397 if (!pi)
236398 return ICE_ERR_PARAM;
237
- hw_link_info_old = &pi->phy.link_info_old;
399
+ hw = pi->hw;
400
+ li_old = &pi->phy.link_info_old;
238401 hw_media_type = &pi->phy.media_type;
239
- hw_link_info = &pi->phy.link_info;
402
+ li = &pi->phy.link_info;
240403 hw_fc_info = &pi->fc;
241404
242405 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
....@@ -245,24 +408,27 @@
245408 resp->cmd_flags = cpu_to_le16(cmd_flags);
246409 resp->lport_num = pi->lport;
247410
248
- status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
249
- cd);
411
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
250412
251413 if (status)
252414 return status;
253415
254416 /* save off old link status information */
255
- *hw_link_info_old = *hw_link_info;
417
+ *li_old = *li;
256418
257419 /* update current link status information */
258
- hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
259
- hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
420
+ li->link_speed = le16_to_cpu(link_data.link_speed);
421
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
422
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
260423 *hw_media_type = ice_get_media_type(pi);
261
- hw_link_info->link_info = link_data.link_info;
262
- hw_link_info->an_info = link_data.an_info;
263
- hw_link_info->ext_info = link_data.ext_info;
264
- hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
265
- hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
424
+ li->link_info = link_data.link_info;
425
+ li->an_info = link_data.an_info;
426
+ li->ext_info = link_data.ext_info;
427
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
428
+ li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
429
+ li->topo_media_conflict = link_data.topo_media_conflict;
430
+ li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
431
+ ICE_AQ_CFG_PACING_TYPE_M);
266432
267433 /* update fc info */
268434 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
....@@ -276,53 +442,107 @@
276442 else
277443 hw_fc_info->current_mode = ICE_FC_NONE;
278444
279
- hw_link_info->lse_ena =
280
- !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
445
+ li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
446
+
447
+ ice_debug(hw, ICE_DBG_LINK, "get link info\n");
448
+ ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
449
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
450
+ (unsigned long long)li->phy_type_low);
451
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
452
+ (unsigned long long)li->phy_type_high);
453
+ ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
454
+ ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
455
+ ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
456
+ ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
457
+ ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
458
+ ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
459
+ ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
460
+ li->max_frame_size);
461
+ ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
281462
282463 /* save link status information */
283464 if (link)
284
- *link = *hw_link_info;
465
+ *link = *li;
285466
286467 /* flag cleared so calling functions don't call AQ again */
287468 pi->phy.get_link_info = false;
288469
289
- return status;
470
+ return 0;
290471 }
291472
292473 /**
293
- * ice_init_flex_parser - initialize rx flex parser
294
- * @hw: pointer to the hardware structure
474
+ * ice_fill_tx_timer_and_fc_thresh
475
+ * @hw: pointer to the HW struct
476
+ * @cmd: pointer to MAC cfg structure
295477 *
296
- * Function to initialize flex descriptors
478
+ * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
479
+ * descriptor
297480 */
298
-static void ice_init_flex_parser(struct ice_hw *hw)
481
+static void
482
+ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
483
+ struct ice_aqc_set_mac_cfg *cmd)
299484 {
300
- u8 idx = 0;
485
+ u16 fc_thres_val, tx_timer_val;
486
+ u32 val;
301487
302
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
303
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
304
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
305
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
306
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
307
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
308
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
309
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
310
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
311
- ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
312
- idx++);
313
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
314
- ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
315
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
316
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
488
+ /* We read back the transmit timer and FC threshold value of
489
+ * LFC. Thus, we will use index =
490
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
491
+ *
492
+ * Also, because we are operating on transmit timer and FC
493
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
494
+ */
495
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
496
+
497
+ /* Retrieve the transmit timer */
498
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
499
+ tx_timer_val = val &
500
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
501
+ cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
502
+
503
+ /* Retrieve the FC threshold */
504
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
505
+ fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
506
+
507
+ cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
508
+}
509
+
510
+/**
511
+ * ice_aq_set_mac_cfg
512
+ * @hw: pointer to the HW struct
513
+ * @max_frame_size: Maximum Frame Size to be supported
514
+ * @cd: pointer to command details structure or NULL
515
+ *
516
+ * Set MAC configuration (0x0603)
517
+ */
518
+enum ice_status
519
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
520
+{
521
+ struct ice_aqc_set_mac_cfg *cmd;
522
+ struct ice_aq_desc desc;
523
+
524
+ cmd = &desc.params.set_mac_cfg;
525
+
526
+ if (max_frame_size == 0)
527
+ return ICE_ERR_PARAM;
528
+
529
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
530
+
531
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
532
+
533
+ ice_fill_tx_timer_and_fc_thresh(hw, cmd);
534
+
535
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
317536 }
318537
319538 /**
320539 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
321
- * @hw: pointer to the hw struct
540
+ * @hw: pointer to the HW struct
322541 */
323542 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
324543 {
325544 struct ice_switch_info *sw;
545
+ enum ice_status status;
326546
327547 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
328548 sizeof(*hw->switch_info), GFP_KERNEL);
....@@ -333,47 +553,284 @@
333553
334554 INIT_LIST_HEAD(&sw->vsi_list_map_head);
335555
336
- mutex_init(&sw->mac_list_lock);
337
- INIT_LIST_HEAD(&sw->mac_list_head);
338
-
339
- mutex_init(&sw->vlan_list_lock);
340
- INIT_LIST_HEAD(&sw->vlan_list_head);
341
-
342
- mutex_init(&sw->eth_m_list_lock);
343
- INIT_LIST_HEAD(&sw->eth_m_list_head);
344
-
345
- mutex_init(&sw->promisc_list_lock);
346
- INIT_LIST_HEAD(&sw->promisc_list_head);
347
-
348
- mutex_init(&sw->mac_vlan_list_lock);
349
- INIT_LIST_HEAD(&sw->mac_vlan_list_head);
350
-
556
+ status = ice_init_def_sw_recp(hw);
557
+ if (status) {
558
+ devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
559
+ return status;
560
+ }
351561 return 0;
352562 }
353563
354564 /**
355565 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
356
- * @hw: pointer to the hw struct
566
+ * @hw: pointer to the HW struct
357567 */
358568 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
359569 {
360570 struct ice_switch_info *sw = hw->switch_info;
361571 struct ice_vsi_list_map_info *v_pos_map;
362572 struct ice_vsi_list_map_info *v_tmp_map;
573
+ struct ice_sw_recipe *recps;
574
+ u8 i;
363575
364576 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
365577 list_entry) {
366578 list_del(&v_pos_map->list_entry);
367579 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
368580 }
581
+ recps = hw->switch_info->recp_list;
582
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
583
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
369584
370
- mutex_destroy(&sw->mac_list_lock);
371
- mutex_destroy(&sw->vlan_list_lock);
372
- mutex_destroy(&sw->eth_m_list_lock);
373
- mutex_destroy(&sw->promisc_list_lock);
374
- mutex_destroy(&sw->mac_vlan_list_lock);
375
-
585
+ recps[i].root_rid = i;
586
+ mutex_destroy(&recps[i].filt_rule_lock);
587
+ list_for_each_entry_safe(lst_itr, tmp_entry,
588
+ &recps[i].filt_rules, list_entry) {
589
+ list_del(&lst_itr->list_entry);
590
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
591
+ }
592
+ }
593
+ ice_rm_all_sw_replay_rule_info(hw);
594
+ devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
376595 devm_kfree(ice_hw_to_dev(hw), sw);
596
+}
597
+
598
+/**
599
+ * ice_get_fw_log_cfg - get FW logging configuration
600
+ * @hw: pointer to the HW struct
601
+ */
602
+static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
603
+{
604
+ struct ice_aq_desc desc;
605
+ enum ice_status status;
606
+ __le16 *config;
607
+ u16 size;
608
+
609
+ size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
610
+ config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
611
+ if (!config)
612
+ return ICE_ERR_NO_MEMORY;
613
+
614
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
615
+
616
+ status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
617
+ if (!status) {
618
+ u16 i;
619
+
620
+ /* Save FW logging information into the HW structure */
621
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
622
+ u16 v, m, flgs;
623
+
624
+ v = le16_to_cpu(config[i]);
625
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
626
+ flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
627
+
628
+ if (m < ICE_AQC_FW_LOG_ID_MAX)
629
+ hw->fw_log.evnts[m].cur = flgs;
630
+ }
631
+ }
632
+
633
+ devm_kfree(ice_hw_to_dev(hw), config);
634
+
635
+ return status;
636
+}
637
+
638
+/**
639
+ * ice_cfg_fw_log - configure FW logging
640
+ * @hw: pointer to the HW struct
641
+ * @enable: enable certain FW logging events if true, disable all if false
642
+ *
643
+ * This function enables/disables the FW logging via Rx CQ events and a UART
644
+ * port based on predetermined configurations. FW logging via the Rx CQ can be
645
+ * enabled/disabled for individual PF's. However, FW logging via the UART can
646
+ * only be enabled/disabled for all PFs on the same device.
647
+ *
648
+ * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
649
+ * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
650
+ * before initializing the device.
651
+ *
652
+ * When re/configuring FW logging, callers need to update the "cfg" elements of
653
+ * the hw->fw_log.evnts array with the desired logging event configurations for
654
+ * modules of interest. When disabling FW logging completely, the callers can
655
+ * just pass false in the "enable" parameter. On completion, the function will
656
+ * update the "cur" element of the hw->fw_log.evnts array with the resulting
657
+ * logging event configurations of the modules that are being re/configured. FW
658
+ * logging modules that are not part of a reconfiguration operation retain their
659
+ * previous states.
660
+ *
661
+ * Before resetting the device, it is recommended that the driver disables FW
662
+ * logging before shutting down the control queue. When disabling FW logging
663
+ * ("enable" = false), the latest configurations of FW logging events stored in
664
+ * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
665
+ * a device reset.
666
+ *
667
+ * When enabling FW logging to emit log messages via the Rx CQ during the
668
+ * device's initialization phase, a mechanism alternative to interrupt handlers
669
+ * needs to be used to extract FW log messages from the Rx CQ periodically and
670
+ * to prevent the Rx CQ from being full and stalling other types of control
671
+ * messages from FW to SW. Interrupts are typically disabled during the device's
672
+ * initialization phase.
673
+ */
674
+static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
675
+{
676
+ struct ice_aqc_fw_logging *cmd;
677
+ enum ice_status status = 0;
678
+ u16 i, chgs = 0, len = 0;
679
+ struct ice_aq_desc desc;
680
+ __le16 *data = NULL;
681
+ u8 actv_evnts = 0;
682
+ void *buf = NULL;
683
+
684
+ if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
685
+ return 0;
686
+
687
+ /* Disable FW logging only when the control queue is still responsive */
688
+ if (!enable &&
689
+ (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
690
+ return 0;
691
+
692
+ /* Get current FW log settings */
693
+ status = ice_get_fw_log_cfg(hw);
694
+ if (status)
695
+ return status;
696
+
697
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
698
+ cmd = &desc.params.fw_logging;
699
+
700
+ /* Indicate which controls are valid */
701
+ if (hw->fw_log.cq_en)
702
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
703
+
704
+ if (hw->fw_log.uart_en)
705
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
706
+
707
+ if (enable) {
708
+ /* Fill in an array of entries with FW logging modules and
709
+ * logging events being reconfigured.
710
+ */
711
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
712
+ u16 val;
713
+
714
+ /* Keep track of enabled event types */
715
+ actv_evnts |= hw->fw_log.evnts[i].cfg;
716
+
717
+ if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
718
+ continue;
719
+
720
+ if (!data) {
721
+ data = devm_kcalloc(ice_hw_to_dev(hw),
722
+ ICE_AQC_FW_LOG_ID_MAX,
723
+ sizeof(*data),
724
+ GFP_KERNEL);
725
+ if (!data)
726
+ return ICE_ERR_NO_MEMORY;
727
+ }
728
+
729
+ val = i << ICE_AQC_FW_LOG_ID_S;
730
+ val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
731
+ data[chgs++] = cpu_to_le16(val);
732
+ }
733
+
734
+ /* Only enable FW logging if at least one module is specified.
735
+ * If FW logging is currently enabled but all modules are not
736
+ * enabled to emit log messages, disable FW logging altogether.
737
+ */
738
+ if (actv_evnts) {
739
+ /* Leave if there is effectively no change */
740
+ if (!chgs)
741
+ goto out;
742
+
743
+ if (hw->fw_log.cq_en)
744
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
745
+
746
+ if (hw->fw_log.uart_en)
747
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
748
+
749
+ buf = data;
750
+ len = sizeof(*data) * chgs;
751
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
752
+ }
753
+ }
754
+
755
+ status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
756
+ if (!status) {
757
+ /* Update the current configuration to reflect events enabled.
758
+ * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
759
+ * logging mode is enabled for the device. They do not reflect
760
+ * actual modules being enabled to emit log messages. So, their
761
+ * values remain unchanged even when all modules are disabled.
762
+ */
763
+ u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
764
+
765
+ hw->fw_log.actv_evnts = actv_evnts;
766
+ for (i = 0; i < cnt; i++) {
767
+ u16 v, m;
768
+
769
+ if (!enable) {
770
+ /* When disabling all FW logging events as part
771
+ * of device's de-initialization, the original
772
+ * configurations are retained, and can be used
773
+ * to reconfigure FW logging later if the device
774
+ * is re-initialized.
775
+ */
776
+ hw->fw_log.evnts[i].cur = 0;
777
+ continue;
778
+ }
779
+
780
+ v = le16_to_cpu(data[i]);
781
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
782
+ hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
783
+ }
784
+ }
785
+
786
+out:
787
+ if (data)
788
+ devm_kfree(ice_hw_to_dev(hw), data);
789
+
790
+ return status;
791
+}
792
+
793
+/**
794
+ * ice_output_fw_log
795
+ * @hw: pointer to the HW struct
796
+ * @desc: pointer to the AQ message descriptor
797
+ * @buf: pointer to the buffer accompanying the AQ message
798
+ *
799
+ * Formats a FW Log message and outputs it via the standard driver logs.
800
+ */
801
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
802
+{
803
+ ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
804
+ ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
805
+ le16_to_cpu(desc->datalen));
806
+ ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
807
+}
808
+
809
+/**
810
+ * ice_get_itr_intrl_gran
811
+ * @hw: pointer to the HW struct
812
+ *
813
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
814
+ * bandwidth according to the device's configuration during power-on.
815
+ */
816
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
817
+{
818
+ u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
819
+ GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
820
+ GL_PWR_MODE_CTL_CAR_MAX_BW_S;
821
+
822
+ switch (max_agg_bw) {
823
+ case ICE_MAX_AGG_BW_200G:
824
+ case ICE_MAX_AGG_BW_100G:
825
+ case ICE_MAX_AGG_BW_50G:
826
+ hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
827
+ hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
828
+ break;
829
+ case ICE_MAX_AGG_BW_25G:
830
+ hw->itr_gran = ICE_ITR_GRAN_MAX_25;
831
+ hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
832
+ break;
833
+ }
377834 }
378835
379836 /**
....@@ -400,19 +857,24 @@
400857 if (status)
401858 return status;
402859
403
- /* set these values to minimum allowed */
404
- hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
405
- hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
406
- hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
407
- hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
860
+ ice_get_itr_intrl_gran(hw);
408861
409
- status = ice_init_all_ctrlq(hw);
862
+ status = ice_create_all_ctrlq(hw);
410863 if (status)
411864 goto err_unroll_cqinit;
865
+
866
+ /* Enable FW logging. Not fatal if this fails. */
867
+ status = ice_cfg_fw_log(hw, true);
868
+ if (status)
869
+ ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
412870
413871 status = ice_clear_pf_cfg(hw);
414872 if (status)
415873 goto err_unroll_cqinit;
874
+
875
+ /* Set bit to enable Flow Director filters */
876
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
877
+ INIT_LIST_HEAD(&hw->fdir_list_head);
416878
417879 ice_clear_pxe_mode(hw);
418880
....@@ -431,7 +893,7 @@
431893 goto err_unroll_cqinit;
432894 }
433895
434
- /* set the back pointer to hw */
896
+ /* set the back pointer to HW */
435897 hw->port_info->hw = hw;
436898
437899 /* Initialize port_info struct with switch configuration data */
....@@ -441,7 +903,7 @@
441903
442904 hw->evb_veb = true;
443905
444
- /* Query the allocated resources for tx scheduler */
906
+ /* Query the allocated resources for Tx scheduler */
445907 status = ice_sched_query_res_alloc(hw);
446908 if (status) {
447909 ice_debug(hw, ICE_DBG_SCHED,
....@@ -462,7 +924,8 @@
462924
463925 /* Initialize port_info struct with PHY capabilities */
464926 status = ice_aq_get_phy_caps(hw->port_info, false,
465
- ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
927
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
928
+ NULL);
466929 devm_kfree(ice_hw_to_dev(hw), pcaps);
467930 if (status)
468931 goto err_unroll_sched;
....@@ -471,6 +934,17 @@
471934 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
472935 if (status)
473936 goto err_unroll_sched;
937
+
938
+ /* need a valid SW entry point to build a Tx tree */
939
+ if (!hw->sw_entry_point_layer) {
940
+ ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
941
+ status = ICE_ERR_CFG;
942
+ goto err_unroll_sched;
943
+ }
944
+ INIT_LIST_HEAD(&hw->agg_list);
945
+ /* Initialize max burst size */
946
+ if (!hw->max_burst_size)
947
+ ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
474948
475949 status = ice_init_fltr_mgmt_struct(hw);
476950 if (status)
....@@ -493,9 +967,18 @@
493967
494968 if (status)
495969 goto err_unroll_fltr_mgmt_struct;
496
-
497
- ice_init_flex_parser(hw);
498
-
970
+ /* enable jumbo frame support at MAC level */
971
+ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
972
+ if (status)
973
+ goto err_unroll_fltr_mgmt_struct;
974
+ /* Obtain counter base index which would be used by flow director */
975
+ status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
976
+ if (status)
977
+ goto err_unroll_fltr_mgmt_struct;
978
+ status = ice_init_hw_tbls(hw);
979
+ if (status)
980
+ goto err_unroll_fltr_mgmt_struct;
981
+ mutex_init(&hw->tnl_lock);
499982 return 0;
500983
501984 err_unroll_fltr_mgmt_struct:
....@@ -505,25 +988,40 @@
505988 err_unroll_alloc:
506989 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
507990 err_unroll_cqinit:
508
- ice_shutdown_all_ctrlq(hw);
991
+ ice_destroy_all_ctrlq(hw);
509992 return status;
510993 }
511994
512995 /**
513996 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
514997 * @hw: pointer to the hardware structure
998
+ *
999
+ * This should be called only during nominal operation, not as a result of
1000
+ * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1001
+ * applicable initializations if it fails for any reason.
5151002 */
5161003 void ice_deinit_hw(struct ice_hw *hw)
5171004 {
1005
+ ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1006
+ ice_cleanup_fltr_mgmt_struct(hw);
1007
+
5181008 ice_sched_cleanup_all(hw);
519
- ice_shutdown_all_ctrlq(hw);
1009
+ ice_sched_clear_agg(hw);
1010
+ ice_free_seg(hw);
1011
+ ice_free_hw_tbls(hw);
1012
+ mutex_destroy(&hw->tnl_lock);
5201013
5211014 if (hw->port_info) {
5221015 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
5231016 hw->port_info = NULL;
5241017 }
5251018
526
- ice_cleanup_fltr_mgmt_struct(hw);
1019
+ /* Attempt to disable FW logging before shutting down control queues */
1020
+ ice_cfg_fw_log(hw, false);
1021
+ ice_destroy_all_ctrlq(hw);
1022
+
1023
+ /* Clear VSI contexts if not already cleared */
1024
+ ice_clear_all_vsi_ctx(hw);
5271025 }
5281026
5291027 /**
....@@ -532,35 +1030,42 @@
5321030 */
5331031 enum ice_status ice_check_reset(struct ice_hw *hw)
5341032 {
535
- u32 cnt, reg = 0, grst_delay;
1033
+ u32 cnt, reg = 0, grst_timeout, uld_mask;
5361034
5371035 /* Poll for Device Active state in case a recent CORER, GLOBR,
5381036 * or EMPR has occurred. The grst delay value is in 100ms units.
5391037 * Add 1sec for outstanding AQ commands that can take a long time.
5401038 */
541
- grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
542
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
1039
+ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1040
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
5431041
544
- for (cnt = 0; cnt < grst_delay; cnt++) {
1042
+ for (cnt = 0; cnt < grst_timeout; cnt++) {
5451043 mdelay(100);
5461044 reg = rd32(hw, GLGEN_RSTAT);
5471045 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
5481046 break;
5491047 }
5501048
551
- if (cnt == grst_delay) {
1049
+ if (cnt == grst_timeout) {
5521050 ice_debug(hw, ICE_DBG_INIT,
5531051 "Global reset polling failed to complete.\n");
5541052 return ICE_ERR_RESET_FAILED;
5551053 }
5561054
557
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
558
- GLNVM_ULD_GLOBR_DONE_M)
1055
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1056
+ GLNVM_ULD_PCIER_DONE_1_M |\
1057
+ GLNVM_ULD_CORER_DONE_M |\
1058
+ GLNVM_ULD_GLOBR_DONE_M |\
1059
+ GLNVM_ULD_POR_DONE_M |\
1060
+ GLNVM_ULD_POR_DONE_1_M |\
1061
+ GLNVM_ULD_PCIER_DONE_2_M)
1062
+
1063
+ uld_mask = ICE_RESET_DONE_MASK;
5591064
5601065 /* Device is Active; check Global Reset processes are done */
5611066 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
562
- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
563
- if (reg == ICE_RESET_DONE_MASK) {
1067
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
1068
+ if (reg == uld_mask) {
5641069 ice_debug(hw, ICE_DBG_INIT,
5651070 "Global reset processes done. %d\n", cnt);
5661071 break;
....@@ -608,7 +1113,12 @@
6081113
6091114 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
6101115
611
- for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1116
+ /* Wait for the PFR to complete. The wait time is the global config lock
1117
+ * timeout plus the PFR timeout which will account for a possible reset
1118
+ * that is occurring during a download package operation.
1119
+ */
1120
+ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1121
+ ICE_PF_RESET_WAIT_COUNT; cnt++) {
6121122 reg = rd32(hw, PFGEN_CTRL);
6131123 if (!(reg & PFGEN_CTRL_PFSWR_M))
6141124 break;
....@@ -652,6 +1162,8 @@
6521162 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
6531163 val = GLGEN_RTRIG_GLOBR_M;
6541164 break;
1165
+ default:
1166
+ return ICE_ERR_PARAM;
6551167 }
6561168
6571169 val |= rd32(hw, GLGEN_RTRIG);
....@@ -666,9 +1178,9 @@
6661178 * ice_copy_rxq_ctx_to_hw
6671179 * @hw: pointer to the hardware structure
6681180 * @ice_rxq_ctx: pointer to the rxq context
669
- * @rxq_index: the index of the rx queue
1181
+ * @rxq_index: the index of the Rx queue
6701182 *
671
- * Copies rxq context from dense structure to hw register space
1183
+ * Copies rxq context from dense structure to HW register space
6721184 */
6731185 static enum ice_status
6741186 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
....@@ -681,7 +1193,7 @@
6811193 if (rxq_index > QRX_CTRL_MAX_INDEX)
6821194 return ICE_ERR_PARAM;
6831195
684
- /* Copy each dword separately to hw */
1196
+ /* Copy each dword separately to HW */
6851197 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
6861198 wr32(hw, QRX_CONTEXT(i, rxq_index),
6871199 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
....@@ -715,6 +1227,7 @@
7151227 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
7161228 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
7171229 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1230
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
7181231 { 0 }
7191232 };
7201233
....@@ -722,10 +1235,11 @@
7221235 * ice_write_rxq_ctx
7231236 * @hw: pointer to the hardware structure
7241237 * @rlan_ctx: pointer to the rxq context
725
- * @rxq_index: the index of the rx queue
1238
+ * @rxq_index: the index of the Rx queue
7261239 *
7271240 * Converts rxq context from sparse to dense structure and then writes
728
- * it to hw register space
1241
+ * it to HW register space and enables the hardware to prefetch descriptors
1242
+ * instead of only fetching them on demand
7291243 */
7301244 enum ice_status
7311245 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
....@@ -733,7 +1247,12 @@
7331247 {
7341248 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
7351249
736
- ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1250
+ if (!rlan_ctx)
1251
+ return ICE_ERR_BAD_PTR;
1252
+
1253
+ rlan_ctx->prefena = 1;
1254
+
1255
+ ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
7371256 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
7381257 }
7391258
....@@ -748,6 +1267,7 @@
7481267 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
7491268 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
7501269 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1270
+ ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
7511271 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
7521272 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
7531273 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
....@@ -766,64 +1286,21 @@
7661286 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
7671287 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
7681288 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
769
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
1289
+ ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
7701290 { 0 }
7711291 };
7721292
773
-/**
774
- * ice_debug_cq
775
- * @hw: pointer to the hardware structure
776
- * @mask: debug mask
777
- * @desc: pointer to control queue descriptor
778
- * @buf: pointer to command buffer
779
- * @buf_len: max length of buf
780
- *
781
- * Dumps debug log about control command with descriptor contents.
782
- */
783
-void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
784
- void *buf, u16 buf_len)
785
-{
786
- struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
787
- u16 len;
788
-
789
-#ifndef CONFIG_DYNAMIC_DEBUG
790
- if (!(mask & hw->debug_mask))
791
- return;
792
-#endif
793
-
794
- if (!desc)
795
- return;
796
-
797
- len = le16_to_cpu(cq_desc->datalen);
798
-
799
- ice_debug(hw, mask,
800
- "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
801
- le16_to_cpu(cq_desc->opcode),
802
- le16_to_cpu(cq_desc->flags),
803
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
804
- ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
805
- le32_to_cpu(cq_desc->cookie_high),
806
- le32_to_cpu(cq_desc->cookie_low));
807
- ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
808
- le32_to_cpu(cq_desc->params.generic.param0),
809
- le32_to_cpu(cq_desc->params.generic.param1));
810
- ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
811
- le32_to_cpu(cq_desc->params.generic.addr_high),
812
- le32_to_cpu(cq_desc->params.generic.addr_low));
813
- if (buf && cq_desc->datalen != 0) {
814
- ice_debug(hw, mask, "Buffer:\n");
815
- if (buf_len < len)
816
- len = buf_len;
817
-
818
- ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
819
- }
820
-}
821
-
8221293 /* FW Admin Queue command wrappers */
1294
+
1295
+/* Software lock/mutex that is meant to be held while the Global Config Lock
1296
+ * in firmware is acquired by the software to prevent most (but not all) types
1297
+ * of AQ commands from being sent to FW
1298
+ */
1299
+DEFINE_MUTEX(ice_global_cfg_lock_sw);
8231300
8241301 /**
8251302 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
826
- * @hw: pointer to the hw struct
1303
+ * @hw: pointer to the HW struct
8271304 * @desc: descriptor describing the command
8281305 * @buf: buffer to use for indirect commands (NULL for direct commands)
8291306 * @buf_size: size of buffer for indirect commands (0 for direct commands)
....@@ -835,12 +1312,43 @@
8351312 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
8361313 u16 buf_size, struct ice_sq_cd *cd)
8371314 {
838
- return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1315
+ struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1316
+ bool lock_acquired = false;
1317
+ enum ice_status status;
1318
+
1319
+ /* When a package download is in process (i.e. when the firmware's
1320
+ * Global Configuration Lock resource is held), only the Download
1321
+ * Package, Get Version, Get Package Info List and Release Resource
1322
+ * (with resource ID set to Global Config Lock) AdminQ commands are
1323
+ * allowed; all others must block until the package download completes
1324
+ * and the Global Config Lock is released. See also
1325
+ * ice_acquire_global_cfg_lock().
1326
+ */
1327
+ switch (le16_to_cpu(desc->opcode)) {
1328
+ case ice_aqc_opc_download_pkg:
1329
+ case ice_aqc_opc_get_pkg_info_list:
1330
+ case ice_aqc_opc_get_ver:
1331
+ break;
1332
+ case ice_aqc_opc_release_res:
1333
+ if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1334
+ break;
1335
+ fallthrough;
1336
+ default:
1337
+ mutex_lock(&ice_global_cfg_lock_sw);
1338
+ lock_acquired = true;
1339
+ break;
1340
+ }
1341
+
1342
+ status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1343
+ if (lock_acquired)
1344
+ mutex_unlock(&ice_global_cfg_lock_sw);
1345
+
1346
+ return status;
8391347 }
8401348
8411349 /**
8421350 * ice_aq_get_fw_ver
843
- * @hw: pointer to the hw struct
1351
+ * @hw: pointer to the HW struct
8441352 * @cd: pointer to command details structure or NULL
8451353 *
8461354 * Get the firmware version (0x0001) from the admin queue commands
....@@ -873,8 +1381,45 @@
8731381 }
8741382
8751383 /**
1384
+ * ice_aq_send_driver_ver
1385
+ * @hw: pointer to the HW struct
1386
+ * @dv: driver's major, minor version
1387
+ * @cd: pointer to command details structure or NULL
1388
+ *
1389
+ * Send the driver version (0x0002) to the firmware
1390
+ */
1391
+enum ice_status
1392
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1393
+ struct ice_sq_cd *cd)
1394
+{
1395
+ struct ice_aqc_driver_ver *cmd;
1396
+ struct ice_aq_desc desc;
1397
+ u16 len;
1398
+
1399
+ cmd = &desc.params.driver_ver;
1400
+
1401
+ if (!dv)
1402
+ return ICE_ERR_PARAM;
1403
+
1404
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1405
+
1406
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1407
+ cmd->major_ver = dv->major_ver;
1408
+ cmd->minor_ver = dv->minor_ver;
1409
+ cmd->build_ver = dv->build_ver;
1410
+ cmd->subbuild_ver = dv->subbuild_ver;
1411
+
1412
+ len = 0;
1413
+ while (len < sizeof(dv->driver_string) &&
1414
+ isascii(dv->driver_string[len]) && dv->driver_string[len])
1415
+ len++;
1416
+
1417
+ return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1418
+}
1419
+
1420
+/**
8761421 * ice_aq_q_shutdown
877
- * @hw: pointer to the hw struct
1422
+ * @hw: pointer to the HW struct
8781423 * @unloading: is the driver unloading itself
8791424 *
8801425 * Tell the Firmware that we're shutting down the AdminQ and whether
....@@ -890,15 +1435,15 @@
8901435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
8911436
8921437 if (unloading)
893
- cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1438
+ cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
8941439
8951440 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
8961441 }
8971442
8981443 /**
8991444 * ice_aq_req_res
900
- * @hw: pointer to the hw struct
901
- * @res: resource id
1445
+ * @hw: pointer to the HW struct
1446
+ * @res: resource ID
9021447 * @access: access type
9031448 * @sdp_number: resource number
9041449 * @timeout: the maximum time in ms that the driver may hold the resource
....@@ -983,8 +1528,8 @@
9831528
9841529 /**
9851530 * ice_aq_release_res
986
- * @hw: pointer to the hw struct
987
- * @res: resource id
1531
+ * @hw: pointer to the HW struct
1532
+ * @res: resource ID
9881533 * @sdp_number: resource number
9891534 * @cd: pointer to command details structure or NULL
9901535 *
....@@ -1010,7 +1555,7 @@
10101555 /**
10111556 * ice_acquire_res
10121557 * @hw: pointer to the HW structure
1013
- * @res: resource id
1558
+ * @res: resource ID
10141559 * @access: access type (read or write)
10151560 * @timeout: timeout in milliseconds
10161561 *
....@@ -1072,7 +1617,7 @@
10721617 /**
10731618 * ice_release_res
10741619 * @hw: pointer to the HW structure
1075
- * @res: resource id
1620
+ * @res: resource ID
10761621 *
10771622 * This function will release a resource using the proper Admin Command.
10781623 */
....@@ -1084,7 +1629,7 @@
10841629 status = ice_aq_release_res(hw, res, 0, NULL);
10851630
10861631 /* there are some rare cases when trying to release the resource
1087
- * results in an admin Q timeout, so handle them correctly
1632
+ * results in an admin queue timeout, so handle them correctly
10881633 */
10891634 while ((status == ICE_ERR_AQ_TIMEOUT) &&
10901635 (total_delay < hw->adminq.sq_cmd_timeout)) {
....@@ -1095,133 +1640,561 @@
10951640 }
10961641
10971642 /**
1098
- * ice_parse_caps - parse function/device capabilities
1099
- * @hw: pointer to the hw struct
1100
- * @buf: pointer to a buffer containing function/device capability records
1101
- * @cap_count: number of capability records in the list
1102
- * @opc: type of capabilities list to parse
1643
+ * ice_aq_alloc_free_res - command to allocate/free resources
1644
+ * @hw: pointer to the HW struct
1645
+ * @num_entries: number of resource entries in buffer
1646
+ * @buf: Indirect buffer to hold data parameters and response
1647
+ * @buf_size: size of buffer for indirect commands
1648
+ * @opc: pass in the command opcode
1649
+ * @cd: pointer to command details structure or NULL
11031650 *
1104
- * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1651
+ * Helper function to allocate/free resources using the admin queue commands
11051652 */
1106
-static void
1107
-ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1108
- enum ice_adminq_opc opc)
1653
+enum ice_status
1654
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1655
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1656
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
11091657 {
1110
- struct ice_aqc_list_caps_elem *cap_resp;
1111
- struct ice_hw_func_caps *func_p = NULL;
1112
- struct ice_hw_dev_caps *dev_p = NULL;
1113
- struct ice_hw_common_caps *caps;
1114
- u32 i;
1658
+ struct ice_aqc_alloc_free_res_cmd *cmd;
1659
+ struct ice_aq_desc desc;
1660
+
1661
+ cmd = &desc.params.sw_res_ctrl;
11151662
11161663 if (!buf)
1117
- return;
1664
+ return ICE_ERR_PARAM;
11181665
1119
- cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1666
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
1667
+ return ICE_ERR_PARAM;
11201668
1121
- if (opc == ice_aqc_opc_list_dev_caps) {
1122
- dev_p = &hw->dev_caps;
1123
- caps = &dev_p->common_cap;
1124
- } else if (opc == ice_aqc_opc_list_func_caps) {
1125
- func_p = &hw->func_caps;
1126
- caps = &func_p->common_cap;
1127
- } else {
1128
- ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1129
- return;
1669
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
1670
+
1671
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1672
+
1673
+ cmd->num_entries = cpu_to_le16(num_entries);
1674
+
1675
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1676
+}
1677
+
1678
+/**
1679
+ * ice_alloc_hw_res - allocate resource
1680
+ * @hw: pointer to the HW struct
1681
+ * @type: type of resource
1682
+ * @num: number of resources to allocate
1683
+ * @btm: allocate from bottom
1684
+ * @res: pointer to array that will receive the resources
1685
+ */
1686
+enum ice_status
1687
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1688
+{
1689
+ struct ice_aqc_alloc_free_res_elem *buf;
1690
+ enum ice_status status;
1691
+ u16 buf_len;
1692
+
1693
+ buf_len = struct_size(buf, elem, num);
1694
+ buf = kzalloc(buf_len, GFP_KERNEL);
1695
+ if (!buf)
1696
+ return ICE_ERR_NO_MEMORY;
1697
+
1698
+ /* Prepare buffer to allocate resource. */
1699
+ buf->num_elems = cpu_to_le16(num);
1700
+ buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1701
+ ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1702
+ if (btm)
1703
+ buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1704
+
1705
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1706
+ ice_aqc_opc_alloc_res, NULL);
1707
+ if (status)
1708
+ goto ice_alloc_res_exit;
1709
+
1710
+ memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1711
+
1712
+ice_alloc_res_exit:
1713
+ kfree(buf);
1714
+ return status;
1715
+}
1716
+
1717
+/**
1718
+ * ice_free_hw_res - free allocated HW resource
1719
+ * @hw: pointer to the HW struct
1720
+ * @type: type of resource to free
1721
+ * @num: number of resources
1722
+ * @res: pointer to array that contains the resources to free
1723
+ */
1724
+enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1725
+{
1726
+ struct ice_aqc_alloc_free_res_elem *buf;
1727
+ enum ice_status status;
1728
+ u16 buf_len;
1729
+
1730
+ buf_len = struct_size(buf, elem, num);
1731
+ buf = kzalloc(buf_len, GFP_KERNEL);
1732
+ if (!buf)
1733
+ return ICE_ERR_NO_MEMORY;
1734
+
1735
+ /* Prepare buffer to free resource. */
1736
+ buf->num_elems = cpu_to_le16(num);
1737
+ buf->res_type = cpu_to_le16(type);
1738
+ memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1739
+
1740
+ status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1741
+ ice_aqc_opc_free_res, NULL);
1742
+ if (status)
1743
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1744
+
1745
+ kfree(buf);
1746
+ return status;
1747
+}
1748
+
1749
+/**
1750
+ * ice_get_num_per_func - determine number of resources per PF
1751
+ * @hw: pointer to the HW structure
1752
+ * @max: value to be evenly split between each PF
1753
+ *
1754
+ * Determine the number of valid functions by going through the bitmap returned
1755
+ * from parsing capabilities and use this to calculate the number of resources
1756
+ * per PF based on the max value passed in.
1757
+ */
1758
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1759
+{
1760
+ u8 funcs;
1761
+
1762
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
1763
+ funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1764
+ ICE_CAPS_VALID_FUNCS_M);
1765
+
1766
+ if (!funcs)
1767
+ return 0;
1768
+
1769
+ return max / funcs;
1770
+}
1771
+
1772
+/**
1773
+ * ice_parse_common_caps - parse common device/function capabilities
1774
+ * @hw: pointer to the HW struct
1775
+ * @caps: pointer to common capabilities structure
1776
+ * @elem: the capability element to parse
1777
+ * @prefix: message prefix for tracing capabilities
1778
+ *
1779
+ * Given a capability element, extract relevant details into the common
1780
+ * capability structure.
1781
+ *
1782
+ * Returns: true if the capability matches one of the common capability ids,
1783
+ * false otherwise.
1784
+ */
1785
+static bool
1786
+ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1787
+ struct ice_aqc_list_caps_elem *elem, const char *prefix)
1788
+{
1789
+ u32 logical_id = le32_to_cpu(elem->logical_id);
1790
+ u32 phys_id = le32_to_cpu(elem->phys_id);
1791
+ u32 number = le32_to_cpu(elem->number);
1792
+ u16 cap = le16_to_cpu(elem->cap);
1793
+ bool found = true;
1794
+
1795
+ switch (cap) {
1796
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
1797
+ caps->valid_functions = number;
1798
+ ice_debug(hw, ICE_DBG_INIT,
1799
+ "%s: valid_functions (bitmap) = %d\n", prefix,
1800
+ caps->valid_functions);
1801
+ break;
1802
+ case ICE_AQC_CAPS_SRIOV:
1803
+ caps->sr_iov_1_1 = (number == 1);
1804
+ ice_debug(hw, ICE_DBG_INIT,
1805
+ "%s: sr_iov_1_1 = %d\n", prefix,
1806
+ caps->sr_iov_1_1);
1807
+ break;
1808
+ case ICE_AQC_CAPS_DCB:
1809
+ caps->dcb = (number == 1);
1810
+ caps->active_tc_bitmap = logical_id;
1811
+ caps->maxtc = phys_id;
1812
+ ice_debug(hw, ICE_DBG_INIT,
1813
+ "%s: dcb = %d\n", prefix, caps->dcb);
1814
+ ice_debug(hw, ICE_DBG_INIT,
1815
+ "%s: active_tc_bitmap = %d\n", prefix,
1816
+ caps->active_tc_bitmap);
1817
+ ice_debug(hw, ICE_DBG_INIT,
1818
+ "%s: maxtc = %d\n", prefix, caps->maxtc);
1819
+ break;
1820
+ case ICE_AQC_CAPS_RSS:
1821
+ caps->rss_table_size = number;
1822
+ caps->rss_table_entry_width = logical_id;
1823
+ ice_debug(hw, ICE_DBG_INIT,
1824
+ "%s: rss_table_size = %d\n", prefix,
1825
+ caps->rss_table_size);
1826
+ ice_debug(hw, ICE_DBG_INIT,
1827
+ "%s: rss_table_entry_width = %d\n", prefix,
1828
+ caps->rss_table_entry_width);
1829
+ break;
1830
+ case ICE_AQC_CAPS_RXQS:
1831
+ caps->num_rxq = number;
1832
+ caps->rxq_first_id = phys_id;
1833
+ ice_debug(hw, ICE_DBG_INIT,
1834
+ "%s: num_rxq = %d\n", prefix,
1835
+ caps->num_rxq);
1836
+ ice_debug(hw, ICE_DBG_INIT,
1837
+ "%s: rxq_first_id = %d\n", prefix,
1838
+ caps->rxq_first_id);
1839
+ break;
1840
+ case ICE_AQC_CAPS_TXQS:
1841
+ caps->num_txq = number;
1842
+ caps->txq_first_id = phys_id;
1843
+ ice_debug(hw, ICE_DBG_INIT,
1844
+ "%s: num_txq = %d\n", prefix,
1845
+ caps->num_txq);
1846
+ ice_debug(hw, ICE_DBG_INIT,
1847
+ "%s: txq_first_id = %d\n", prefix,
1848
+ caps->txq_first_id);
1849
+ break;
1850
+ case ICE_AQC_CAPS_MSIX:
1851
+ caps->num_msix_vectors = number;
1852
+ caps->msix_vector_first_id = phys_id;
1853
+ ice_debug(hw, ICE_DBG_INIT,
1854
+ "%s: num_msix_vectors = %d\n", prefix,
1855
+ caps->num_msix_vectors);
1856
+ ice_debug(hw, ICE_DBG_INIT,
1857
+ "%s: msix_vector_first_id = %d\n", prefix,
1858
+ caps->msix_vector_first_id);
1859
+ break;
1860
+ case ICE_AQC_CAPS_PENDING_NVM_VER:
1861
+ caps->nvm_update_pending_nvm = true;
1862
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1863
+ break;
1864
+ case ICE_AQC_CAPS_PENDING_OROM_VER:
1865
+ caps->nvm_update_pending_orom = true;
1866
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1867
+ break;
1868
+ case ICE_AQC_CAPS_PENDING_NET_VER:
1869
+ caps->nvm_update_pending_netlist = true;
1870
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1871
+ break;
1872
+ case ICE_AQC_CAPS_NVM_MGMT:
1873
+ caps->nvm_unified_update =
1874
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1875
+ true : false;
1876
+ ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1877
+ caps->nvm_unified_update);
1878
+ break;
1879
+ case ICE_AQC_CAPS_MAX_MTU:
1880
+ caps->max_mtu = number;
1881
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1882
+ prefix, caps->max_mtu);
1883
+ break;
1884
+ default:
1885
+ /* Not one of the recognized common capabilities */
1886
+ found = false;
11301887 }
11311888
1132
- for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1133
- u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1134
- u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1135
- u32 number = le32_to_cpu(cap_resp->number);
1136
- u16 cap = le16_to_cpu(cap_resp->cap);
1889
+ return found;
1890
+}
11371891
1138
- switch (cap) {
1139
- case ICE_AQC_CAPS_VSI:
1140
- if (dev_p) {
1141
- dev_p->num_vsi_allocd_to_host = number;
1142
- ice_debug(hw, ICE_DBG_INIT,
1143
- "HW caps: Dev.VSI cnt = %d\n",
1144
- dev_p->num_vsi_allocd_to_host);
1145
- } else if (func_p) {
1146
- func_p->guaranteed_num_vsi = number;
1147
- ice_debug(hw, ICE_DBG_INIT,
1148
- "HW caps: Func.VSI cnt = %d\n",
1149
- func_p->guaranteed_num_vsi);
1150
- }
1151
- break;
1152
- case ICE_AQC_CAPS_RSS:
1153
- caps->rss_table_size = number;
1154
- caps->rss_table_entry_width = logical_id;
1155
- ice_debug(hw, ICE_DBG_INIT,
1156
- "HW caps: RSS table size = %d\n",
1157
- caps->rss_table_size);
1158
- ice_debug(hw, ICE_DBG_INIT,
1159
- "HW caps: RSS table width = %d\n",
1160
- caps->rss_table_entry_width);
1161
- break;
1162
- case ICE_AQC_CAPS_RXQS:
1163
- caps->num_rxq = number;
1164
- caps->rxq_first_id = phys_id;
1165
- ice_debug(hw, ICE_DBG_INIT,
1166
- "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1167
- ice_debug(hw, ICE_DBG_INIT,
1168
- "HW caps: Rx first queue ID = %d\n",
1169
- caps->rxq_first_id);
1170
- break;
1171
- case ICE_AQC_CAPS_TXQS:
1172
- caps->num_txq = number;
1173
- caps->txq_first_id = phys_id;
1174
- ice_debug(hw, ICE_DBG_INIT,
1175
- "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1176
- ice_debug(hw, ICE_DBG_INIT,
1177
- "HW caps: Tx first queue ID = %d\n",
1178
- caps->txq_first_id);
1179
- break;
1180
- case ICE_AQC_CAPS_MSIX:
1181
- caps->num_msix_vectors = number;
1182
- caps->msix_vector_first_id = phys_id;
1183
- ice_debug(hw, ICE_DBG_INIT,
1184
- "HW caps: MSIX vector count = %d\n",
1185
- caps->num_msix_vectors);
1186
- ice_debug(hw, ICE_DBG_INIT,
1187
- "HW caps: MSIX first vector index = %d\n",
1188
- caps->msix_vector_first_id);
1189
- break;
1190
- case ICE_AQC_CAPS_MAX_MTU:
1191
- caps->max_mtu = number;
1192
- if (dev_p)
1193
- ice_debug(hw, ICE_DBG_INIT,
1194
- "HW caps: Dev.MaxMTU = %d\n",
1195
- caps->max_mtu);
1196
- else if (func_p)
1197
- ice_debug(hw, ICE_DBG_INIT,
1198
- "HW caps: func.MaxMTU = %d\n",
1199
- caps->max_mtu);
1200
- break;
1201
- default:
1202
- ice_debug(hw, ICE_DBG_INIT,
1203
- "HW caps: Unknown capability[%d]: 0x%x\n", i,
1204
- cap);
1205
- break;
1206
- }
1892
+/**
1893
+ * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1894
+ * @hw: pointer to the HW structure
1895
+ * @caps: pointer to capabilities structure to fix
1896
+ *
1897
+ * Re-calculate the capabilities that are dependent on the number of physical
1898
+ * ports; i.e. some features are not supported or function differently on
1899
+ * devices with more than 4 ports.
1900
+ */
1901
+static void
1902
+ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1903
+{
1904
+ /* This assumes device capabilities are always scanned before function
1905
+ * capabilities during the initialization flow.
1906
+ */
1907
+ if (hw->dev_caps.num_funcs > 4) {
1908
+ /* Max 4 TCs per port */
1909
+ caps->maxtc = 4;
1910
+ ice_debug(hw, ICE_DBG_INIT,
1911
+ "reducing maxtc to %d (based on #ports)\n",
1912
+ caps->maxtc);
12071913 }
12081914 }
12091915
12101916 /**
1211
- * ice_aq_discover_caps - query function/device capabilities
1212
- * @hw: pointer to the hw struct
1213
- * @buf: a virtual buffer to hold the capabilities
1214
- * @buf_size: Size of the virtual buffer
1215
- * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
1216
- * @opc: capabilities type to discover - pass in the command opcode
1917
+ * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
1918
+ * @hw: pointer to the HW struct
1919
+ * @func_p: pointer to function capabilities structure
1920
+ * @cap: pointer to the capability element to parse
1921
+ *
1922
+ * Extract function capabilities for ICE_AQC_CAPS_VF.
1923
+ */
1924
+static void
1925
+ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1926
+ struct ice_aqc_list_caps_elem *cap)
1927
+{
1928
+ u32 logical_id = le32_to_cpu(cap->logical_id);
1929
+ u32 number = le32_to_cpu(cap->number);
1930
+
1931
+ func_p->num_allocd_vfs = number;
1932
+ func_p->vf_base_id = logical_id;
1933
+ ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1934
+ func_p->num_allocd_vfs);
1935
+ ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1936
+ func_p->vf_base_id);
1937
+}
1938
+
1939
+/**
1940
+ * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1941
+ * @hw: pointer to the HW struct
1942
+ * @func_p: pointer to function capabilities structure
1943
+ * @cap: pointer to the capability element to parse
1944
+ *
1945
+ * Extract function capabilities for ICE_AQC_CAPS_VSI.
1946
+ */
1947
+static void
1948
+ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1949
+ struct ice_aqc_list_caps_elem *cap)
1950
+{
1951
+ func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1952
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1953
+ le32_to_cpu(cap->number));
1954
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1955
+ func_p->guar_num_vsi);
1956
+}
1957
+
1958
+/**
1959
+ * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1960
+ * @hw: pointer to the HW struct
1961
+ * @func_p: pointer to function capabilities structure
1962
+ *
1963
+ * Extract function capabilities for ICE_AQC_CAPS_FD.
1964
+ */
1965
+static void
1966
+ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
1967
+{
1968
+ u32 reg_val, val;
1969
+
1970
+ reg_val = rd32(hw, GLQF_FD_SIZE);
1971
+ val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1972
+ GLQF_FD_SIZE_FD_GSIZE_S;
1973
+ func_p->fd_fltr_guar =
1974
+ ice_get_num_per_func(hw, val);
1975
+ val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1976
+ GLQF_FD_SIZE_FD_BSIZE_S;
1977
+ func_p->fd_fltr_best_effort = val;
1978
+
1979
+ ice_debug(hw, ICE_DBG_INIT,
1980
+ "func caps: fd_fltr_guar = %d\n",
1981
+ func_p->fd_fltr_guar);
1982
+ ice_debug(hw, ICE_DBG_INIT,
1983
+ "func caps: fd_fltr_best_effort = %d\n",
1984
+ func_p->fd_fltr_best_effort);
1985
+}
1986
+
1987
+/**
1988
+ * ice_parse_func_caps - Parse function capabilities
1989
+ * @hw: pointer to the HW struct
1990
+ * @func_p: pointer to function capabilities structure
1991
+ * @buf: buffer containing the function capability records
1992
+ * @cap_count: the number of capabilities
1993
+ *
1994
+ * Helper function to parse function (0x000A) capabilities list. For
1995
+ * capabilities shared between device and function, this relies on
1996
+ * ice_parse_common_caps.
1997
+ *
1998
+ * Loop through the list of provided capabilities and extract the relevant
1999
+ * data into the function capabilities structured.
2000
+ */
2001
+static void
2002
+ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2003
+ void *buf, u32 cap_count)
2004
+{
2005
+ struct ice_aqc_list_caps_elem *cap_resp;
2006
+ u32 i;
2007
+
2008
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2009
+
2010
+ memset(func_p, 0, sizeof(*func_p));
2011
+
2012
+ for (i = 0; i < cap_count; i++) {
2013
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
2014
+ bool found;
2015
+
2016
+ found = ice_parse_common_caps(hw, &func_p->common_cap,
2017
+ &cap_resp[i], "func caps");
2018
+
2019
+ switch (cap) {
2020
+ case ICE_AQC_CAPS_VF:
2021
+ ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2022
+ break;
2023
+ case ICE_AQC_CAPS_VSI:
2024
+ ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2025
+ break;
2026
+ case ICE_AQC_CAPS_FD:
2027
+ ice_parse_fdir_func_caps(hw, func_p);
2028
+ break;
2029
+ default:
2030
+ /* Don't list common capabilities as unknown */
2031
+ if (!found)
2032
+ ice_debug(hw, ICE_DBG_INIT,
2033
+ "func caps: unknown capability[%d]: 0x%x\n",
2034
+ i, cap);
2035
+ break;
2036
+ }
2037
+ }
2038
+
2039
+ ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2040
+}
2041
+
2042
+/**
2043
+ * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2044
+ * @hw: pointer to the HW struct
2045
+ * @dev_p: pointer to device capabilities structure
2046
+ * @cap: capability element to parse
2047
+ *
2048
+ * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2049
+ */
2050
+static void
2051
+ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2052
+ struct ice_aqc_list_caps_elem *cap)
2053
+{
2054
+ u32 number = le32_to_cpu(cap->number);
2055
+
2056
+ dev_p->num_funcs = hweight32(number);
2057
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2058
+ dev_p->num_funcs);
2059
+}
2060
+
2061
+/**
2062
+ * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2063
+ * @hw: pointer to the HW struct
2064
+ * @dev_p: pointer to device capabilities structure
2065
+ * @cap: capability element to parse
2066
+ *
2067
+ * Parse ICE_AQC_CAPS_VF for device capabilities.
2068
+ */
2069
+static void
2070
+ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2071
+ struct ice_aqc_list_caps_elem *cap)
2072
+{
2073
+ u32 number = le32_to_cpu(cap->number);
2074
+
2075
+ dev_p->num_vfs_exposed = number;
2076
+ ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2077
+ dev_p->num_vfs_exposed);
2078
+}
2079
+
2080
+/**
2081
+ * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2082
+ * @hw: pointer to the HW struct
2083
+ * @dev_p: pointer to device capabilities structure
2084
+ * @cap: capability element to parse
2085
+ *
2086
+ * Parse ICE_AQC_CAPS_VSI for device capabilities.
2087
+ */
2088
+static void
2089
+ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2090
+ struct ice_aqc_list_caps_elem *cap)
2091
+{
2092
+ u32 number = le32_to_cpu(cap->number);
2093
+
2094
+ dev_p->num_vsi_allocd_to_host = number;
2095
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2096
+ dev_p->num_vsi_allocd_to_host);
2097
+}
2098
+
2099
+/**
2100
+ * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2101
+ * @hw: pointer to the HW struct
2102
+ * @dev_p: pointer to device capabilities structure
2103
+ * @cap: capability element to parse
2104
+ *
2105
+ * Parse ICE_AQC_CAPS_FD for device capabilities.
2106
+ */
2107
+static void
2108
+ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2109
+ struct ice_aqc_list_caps_elem *cap)
2110
+{
2111
+ u32 number = le32_to_cpu(cap->number);
2112
+
2113
+ dev_p->num_flow_director_fltr = number;
2114
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2115
+ dev_p->num_flow_director_fltr);
2116
+}
2117
+
2118
+/**
2119
+ * ice_parse_dev_caps - Parse device capabilities
2120
+ * @hw: pointer to the HW struct
2121
+ * @dev_p: pointer to device capabilities structure
2122
+ * @buf: buffer containing the device capability records
2123
+ * @cap_count: the number of capabilities
2124
+ *
2125
+ * Helper device to parse device (0x000B) capabilities list. For
2126
+ * capabilities shared between device and function, this relies on
2127
+ * ice_parse_common_caps.
2128
+ *
2129
+ * Loop through the list of provided capabilities and extract the relevant
2130
+ * data into the device capabilities structured.
2131
+ */
2132
+static void
2133
+ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2134
+ void *buf, u32 cap_count)
2135
+{
2136
+ struct ice_aqc_list_caps_elem *cap_resp;
2137
+ u32 i;
2138
+
2139
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2140
+
2141
+ memset(dev_p, 0, sizeof(*dev_p));
2142
+
2143
+ for (i = 0; i < cap_count; i++) {
2144
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
2145
+ bool found;
2146
+
2147
+ found = ice_parse_common_caps(hw, &dev_p->common_cap,
2148
+ &cap_resp[i], "dev caps");
2149
+
2150
+ switch (cap) {
2151
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
2152
+ ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2153
+ break;
2154
+ case ICE_AQC_CAPS_VF:
2155
+ ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2156
+ break;
2157
+ case ICE_AQC_CAPS_VSI:
2158
+ ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2159
+ break;
2160
+ case ICE_AQC_CAPS_FD:
2161
+ ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2162
+ break;
2163
+ default:
2164
+ /* Don't list common capabilities as unknown */
2165
+ if (!found)
2166
+ ice_debug(hw, ICE_DBG_INIT,
2167
+ "dev caps: unknown capability[%d]: 0x%x\n",
2168
+ i, cap);
2169
+ break;
2170
+ }
2171
+ }
2172
+
2173
+ ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2174
+}
2175
+
2176
+/**
2177
+ * ice_aq_list_caps - query function/device capabilities
2178
+ * @hw: pointer to the HW struct
2179
+ * @buf: a buffer to hold the capabilities
2180
+ * @buf_size: size of the buffer
2181
+ * @cap_count: if not NULL, set to the number of capabilities reported
2182
+ * @opc: capabilities type to discover, device or function
12172183 * @cd: pointer to command details structure or NULL
12182184 *
1219
- * Get the function(0x000a)/device(0x000b) capabilities description from
1220
- * the firmware.
2185
+ * Get the function (0x000A) or device (0x000B) capabilities description from
2186
+ * firmware and store it in the buffer.
2187
+ *
2188
+ * If the cap_count pointer is not NULL, then it is set to the number of
2189
+ * capabilities firmware will report. Note that if the buffer size is too
2190
+ * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2191
+ * cap_count will still be updated in this case. It is recommended that the
2192
+ * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2193
+ * firmware could return) to avoid this.
12212194 */
1222
-static enum ice_status
1223
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
1224
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2195
+enum ice_status
2196
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2197
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
12252198 {
12262199 struct ice_aqc_list_caps *cmd;
12272200 struct ice_aq_desc desc;
....@@ -1234,13 +2207,149 @@
12342207 return ICE_ERR_PARAM;
12352208
12362209 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1237
-
12382210 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1239
- if (!status)
1240
- ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1241
- *data_size = le16_to_cpu(desc.datalen);
2211
+
2212
+ if (cap_count)
2213
+ *cap_count = le32_to_cpu(cmd->count);
12422214
12432215 return status;
2216
+}
2217
+
2218
+/**
2219
+ * ice_discover_dev_caps - Read and extract device capabilities
2220
+ * @hw: pointer to the hardware structure
2221
+ * @dev_caps: pointer to device capabilities structure
2222
+ *
2223
+ * Read the device capabilities and extract them into the dev_caps structure
2224
+ * for later use.
2225
+ */
2226
+enum ice_status
2227
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2228
+{
2229
+ enum ice_status status;
2230
+ u32 cap_count = 0;
2231
+ void *cbuf;
2232
+
2233
+ cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2234
+ if (!cbuf)
2235
+ return ICE_ERR_NO_MEMORY;
2236
+
2237
+ /* Although the driver doesn't know the number of capabilities the
2238
+ * device will return, we can simply send a 4KB buffer, the maximum
2239
+ * possible size that firmware can return.
2240
+ */
2241
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2242
+
2243
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2244
+ ice_aqc_opc_list_dev_caps, NULL);
2245
+ if (!status)
2246
+ ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2247
+ kfree(cbuf);
2248
+
2249
+ return status;
2250
+}
2251
+
2252
+/**
2253
+ * ice_discover_func_caps - Read and extract function capabilities
2254
+ * @hw: pointer to the hardware structure
2255
+ * @func_caps: pointer to function capabilities structure
2256
+ *
2257
+ * Read the function capabilities and extract them into the func_caps structure
2258
+ * for later use.
2259
+ */
2260
+static enum ice_status
2261
+ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2262
+{
2263
+ enum ice_status status;
2264
+ u32 cap_count = 0;
2265
+ void *cbuf;
2266
+
2267
+ cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2268
+ if (!cbuf)
2269
+ return ICE_ERR_NO_MEMORY;
2270
+
2271
+ /* Although the driver doesn't know the number of capabilities the
2272
+ * device will return, we can simply send a 4KB buffer, the maximum
2273
+ * possible size that firmware can return.
2274
+ */
2275
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2276
+
2277
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2278
+ ice_aqc_opc_list_func_caps, NULL);
2279
+ if (!status)
2280
+ ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2281
+ kfree(cbuf);
2282
+
2283
+ return status;
2284
+}
2285
+
2286
+/**
2287
+ * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2288
+ * @hw: pointer to the hardware structure
2289
+ */
2290
+void ice_set_safe_mode_caps(struct ice_hw *hw)
2291
+{
2292
+ struct ice_hw_func_caps *func_caps = &hw->func_caps;
2293
+ struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2294
+ struct ice_hw_common_caps cached_caps;
2295
+ u32 num_funcs;
2296
+
2297
+ /* cache some func_caps values that should be restored after memset */
2298
+ cached_caps = func_caps->common_cap;
2299
+
2300
+ /* unset func capabilities */
2301
+ memset(func_caps, 0, sizeof(*func_caps));
2302
+
2303
+#define ICE_RESTORE_FUNC_CAP(name) \
2304
+ func_caps->common_cap.name = cached_caps.name
2305
+
2306
+ /* restore cached values */
2307
+ ICE_RESTORE_FUNC_CAP(valid_functions);
2308
+ ICE_RESTORE_FUNC_CAP(txq_first_id);
2309
+ ICE_RESTORE_FUNC_CAP(rxq_first_id);
2310
+ ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2311
+ ICE_RESTORE_FUNC_CAP(max_mtu);
2312
+ ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2313
+ ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2314
+ ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2315
+ ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2316
+
2317
+ /* one Tx and one Rx queue in safe mode */
2318
+ func_caps->common_cap.num_rxq = 1;
2319
+ func_caps->common_cap.num_txq = 1;
2320
+
2321
+ /* two MSIX vectors, one for traffic and one for misc causes */
2322
+ func_caps->common_cap.num_msix_vectors = 2;
2323
+ func_caps->guar_num_vsi = 1;
2324
+
2325
+ /* cache some dev_caps values that should be restored after memset */
2326
+ cached_caps = dev_caps->common_cap;
2327
+ num_funcs = dev_caps->num_funcs;
2328
+
2329
+ /* unset dev capabilities */
2330
+ memset(dev_caps, 0, sizeof(*dev_caps));
2331
+
2332
+#define ICE_RESTORE_DEV_CAP(name) \
2333
+ dev_caps->common_cap.name = cached_caps.name
2334
+
2335
+ /* restore cached values */
2336
+ ICE_RESTORE_DEV_CAP(valid_functions);
2337
+ ICE_RESTORE_DEV_CAP(txq_first_id);
2338
+ ICE_RESTORE_DEV_CAP(rxq_first_id);
2339
+ ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2340
+ ICE_RESTORE_DEV_CAP(max_mtu);
2341
+ ICE_RESTORE_DEV_CAP(nvm_unified_update);
2342
+ ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2343
+ ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2344
+ ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2345
+ dev_caps->num_funcs = num_funcs;
2346
+
2347
+ /* one Tx and one Rx queue per function in safe mode */
2348
+ dev_caps->common_cap.num_rxq = num_funcs;
2349
+ dev_caps->common_cap.num_txq = num_funcs;
2350
+
2351
+ /* two MSIX vectors per function */
2352
+ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
12442353 }
12452354
12462355 /**
....@@ -1250,49 +2359,17 @@
12502359 enum ice_status ice_get_caps(struct ice_hw *hw)
12512360 {
12522361 enum ice_status status;
1253
- u16 data_size = 0;
1254
- u16 cbuf_len;
1255
- u8 retries;
12562362
1257
- /* The driver doesn't know how many capabilities the device will return
1258
- * so the buffer size required isn't known ahead of time. The driver
1259
- * starts with cbuf_len and if this turns out to be insufficient, the
1260
- * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
1261
- * The driver then allocates the buffer of this size and retries the
1262
- * operation. So it follows that the retry count is 2.
1263
- */
1264
-#define ICE_GET_CAP_BUF_COUNT 40
1265
-#define ICE_GET_CAP_RETRY_COUNT 2
2363
+ status = ice_discover_dev_caps(hw, &hw->dev_caps);
2364
+ if (status)
2365
+ return status;
12662366
1267
- cbuf_len = ICE_GET_CAP_BUF_COUNT *
1268
- sizeof(struct ice_aqc_list_caps_elem);
1269
-
1270
- retries = ICE_GET_CAP_RETRY_COUNT;
1271
-
1272
- do {
1273
- void *cbuf;
1274
-
1275
- cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1276
- if (!cbuf)
1277
- return ICE_ERR_NO_MEMORY;
1278
-
1279
- status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
1280
- ice_aqc_opc_list_func_caps, NULL);
1281
- devm_kfree(ice_hw_to_dev(hw), cbuf);
1282
-
1283
- if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1284
- break;
1285
-
1286
- /* If ENOMEM is returned, try again with bigger buffer */
1287
- cbuf_len = data_size;
1288
- } while (--retries);
1289
-
1290
- return status;
2367
+ return ice_discover_func_caps(hw, &hw->func_caps);
12912368 }
12922369
12932370 /**
12942371 * ice_aq_manage_mac_write - manage MAC address write command
1295
- * @hw: pointer to the hw struct
2372
+ * @hw: pointer to the HW struct
12962373 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
12972374 * @flags: flags to control write behavior
12982375 * @cd: pointer to command details structure or NULL
....@@ -1300,7 +2377,7 @@
13002377 * This function is used to write MAC address to the NVM (0x0108).
13012378 */
13022379 enum ice_status
1303
-ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
2380
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
13042381 struct ice_sq_cd *cd)
13052382 {
13062383 struct ice_aqc_manage_mac_write *cmd;
....@@ -1310,17 +2387,14 @@
13102387 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
13112388
13122389 cmd->flags = flags;
1313
-
1314
- /* Prep values for flags, sah, sal */
1315
- cmd->sah = htons(*((u16 *)mac_addr));
1316
- cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
2390
+ ether_addr_copy(cmd->mac_addr, mac_addr);
13172391
13182392 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
13192393 }
13202394
13212395 /**
13222396 * ice_aq_clear_pxe_mode
1323
- * @hw: pointer to the hw struct
2397
+ * @hw: pointer to the HW struct
13242398 *
13252399 * Tell the firmware that the driver is taking over from PXE (0x0110).
13262400 */
....@@ -1336,7 +2410,7 @@
13362410
13372411 /**
13382412 * ice_clear_pxe_mode - clear pxe operations mode
1339
- * @hw: pointer to the hw struct
2413
+ * @hw: pointer to the HW struct
13402414 *
13412415 * Make sure all PXE mode settings are cleared, including things
13422416 * like descriptor fetch/write-back mode.
....@@ -1348,9 +2422,185 @@
13482422 }
13492423
13502424 /**
2425
+ * ice_get_link_speed_based_on_phy_type - returns link speed
2426
+ * @phy_type_low: lower part of phy_type
2427
+ * @phy_type_high: higher part of phy_type
2428
+ *
2429
+ * This helper function will convert an entry in PHY type structure
2430
+ * [phy_type_low, phy_type_high] to its corresponding link speed.
2431
+ * Note: In the structure of [phy_type_low, phy_type_high], there should
2432
+ * be one bit set, as this function will convert one PHY type to its
2433
+ * speed.
2434
+ * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2435
+ * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2436
+ */
2437
+static u16
2438
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2439
+{
2440
+ u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2441
+ u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2442
+
2443
+ switch (phy_type_low) {
2444
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
2445
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
2446
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2447
+ break;
2448
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
2449
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
2450
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
2451
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
2452
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
2453
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2454
+ break;
2455
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
2456
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
2457
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
2458
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2459
+ break;
2460
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
2461
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
2462
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2463
+ break;
2464
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
2465
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2466
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
2467
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
2468
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2469
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2470
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2471
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2472
+ break;
2473
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
2474
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
2475
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2476
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2477
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
2478
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
2479
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
2480
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2481
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2482
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2483
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2484
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2485
+ break;
2486
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2487
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2488
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2489
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2490
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2491
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
2492
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2493
+ break;
2494
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2495
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2496
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2497
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2498
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2499
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
2500
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2501
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
2502
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
2503
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
2504
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
2505
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
2506
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2507
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2508
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
2509
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2510
+ break;
2511
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2512
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2513
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2514
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2515
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2516
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
2517
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2518
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
2519
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2520
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2521
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2522
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2523
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
2524
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2525
+ break;
2526
+ default:
2527
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2528
+ break;
2529
+ }
2530
+
2531
+ switch (phy_type_high) {
2532
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2533
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2534
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2535
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2536
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
2537
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2538
+ break;
2539
+ default:
2540
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2541
+ break;
2542
+ }
2543
+
2544
+ if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2545
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2546
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
2547
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2548
+ speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2549
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
2550
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2551
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2552
+ return speed_phy_type_low;
2553
+ else
2554
+ return speed_phy_type_high;
2555
+}
2556
+
2557
+/**
2558
+ * ice_update_phy_type
2559
+ * @phy_type_low: pointer to the lower part of phy_type
2560
+ * @phy_type_high: pointer to the higher part of phy_type
2561
+ * @link_speeds_bitmap: targeted link speeds bitmap
2562
+ *
2563
+ * Note: For the link_speeds_bitmap structure, you can check it at
2564
+ * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2565
+ * link_speeds_bitmap include multiple speeds.
2566
+ *
2567
+ * Each entry in this [phy_type_low, phy_type_high] structure will
2568
+ * present a certain link speed. This helper function will turn on bits
2569
+ * in [phy_type_low, phy_type_high] structure based on the value of
2570
+ * link_speeds_bitmap input parameter.
2571
+ */
2572
+void
2573
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2574
+ u16 link_speeds_bitmap)
2575
+{
2576
+ u64 pt_high;
2577
+ u64 pt_low;
2578
+ int index;
2579
+ u16 speed;
2580
+
2581
+ /* We first check with low part of phy_type */
2582
+ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2583
+ pt_low = BIT_ULL(index);
2584
+ speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2585
+
2586
+ if (link_speeds_bitmap & speed)
2587
+ *phy_type_low |= BIT_ULL(index);
2588
+ }
2589
+
2590
+ /* We then check with high part of phy_type */
2591
+ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2592
+ pt_high = BIT_ULL(index);
2593
+ speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2594
+
2595
+ if (link_speeds_bitmap & speed)
2596
+ *phy_type_high |= BIT_ULL(index);
2597
+ }
2598
+}
2599
+
2600
+/**
13512601 * ice_aq_set_phy_cfg
1352
- * @hw: pointer to the hw struct
1353
- * @lport: logical port number
2602
+ * @hw: pointer to the HW struct
2603
+ * @pi: port info structure of the interested logical port
13542604 * @cfg: structure with PHY configuration data to be set
13552605 * @cd: pointer to command details structure or NULL
13562606 *
....@@ -1359,86 +2609,185 @@
13592609 * mode as the PF may not have the privilege to set some of the PHY Config
13602610 * parameters. This status will be indicated by the command response (0x0601).
13612611 */
1362
-static enum ice_status
1363
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2612
+enum ice_status
2613
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
13642614 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
13652615 {
1366
- struct ice_aqc_set_phy_cfg *cmd;
13672616 struct ice_aq_desc desc;
2617
+ enum ice_status status;
13682618
13692619 if (!cfg)
13702620 return ICE_ERR_PARAM;
13712621
1372
- cmd = &desc.params.set_phy;
1373
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1374
- cmd->lport_num = lport;
2622
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
2623
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2624
+ ice_debug(hw, ICE_DBG_PHY,
2625
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2626
+ cfg->caps);
13752627
1376
- return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2628
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2629
+ }
2630
+
2631
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2632
+ desc.params.set_phy.lport_num = pi->lport;
2633
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2634
+
2635
+ ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2636
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2637
+ (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2638
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2639
+ (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2640
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2641
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2642
+ cfg->low_power_ctrl_an);
2643
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2644
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2645
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2646
+ cfg->link_fec_opt);
2647
+
2648
+ status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2649
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2650
+ status = 0;
2651
+
2652
+ if (!status)
2653
+ pi->phy.curr_user_phy_cfg = *cfg;
2654
+
2655
+ return status;
13772656 }
13782657
13792658 /**
13802659 * ice_update_link_info - update status of the HW network link
13812660 * @pi: port info structure of the interested logical port
13822661 */
1383
-static enum ice_status
1384
-ice_update_link_info(struct ice_port_info *pi)
2662
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
13852663 {
1386
- struct ice_aqc_get_phy_caps_data *pcaps;
1387
- struct ice_phy_info *phy_info;
2664
+ struct ice_link_status *li;
13882665 enum ice_status status;
1389
- struct ice_hw *hw;
13902666
13912667 if (!pi)
13922668 return ICE_ERR_PARAM;
13932669
1394
- hw = pi->hw;
2670
+ li = &pi->phy.link_info;
13952671
1396
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1397
- if (!pcaps)
1398
- return ICE_ERR_NO_MEMORY;
1399
-
1400
- phy_info = &pi->phy;
14012672 status = ice_aq_get_link_info(pi, true, NULL, NULL);
14022673 if (status)
1403
- goto out;
2674
+ return status;
14042675
1405
- if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1406
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
2676
+ if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2677
+ struct ice_aqc_get_phy_caps_data *pcaps;
2678
+ struct ice_hw *hw;
2679
+
2680
+ hw = pi->hw;
2681
+ pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2682
+ GFP_KERNEL);
2683
+ if (!pcaps)
2684
+ return ICE_ERR_NO_MEMORY;
2685
+
2686
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
14072687 pcaps, NULL);
1408
- if (status)
1409
- goto out;
14102688
1411
- memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1412
- sizeof(phy_info->link_info.module_type));
2689
+ devm_kfree(ice_hw_to_dev(hw), pcaps);
14132690 }
1414
-out:
1415
- devm_kfree(ice_hw_to_dev(hw), pcaps);
2691
+
14162692 return status;
14172693 }
14182694
14192695 /**
1420
- * ice_set_fc
2696
+ * ice_cache_phy_user_req
14212697 * @pi: port information structure
1422
- * @aq_failures: pointer to status code, specific to ice_set_fc routine
1423
- * @atomic_restart: enable automatic link update
2698
+ * @cache_data: PHY logging data
2699
+ * @cache_mode: PHY logging mode
14242700 *
1425
- * Set the requested flow control mode.
2701
+ * Log the user request on (FC, FEC, SPEED) for later use.
2702
+ */
2703
+static void
2704
+ice_cache_phy_user_req(struct ice_port_info *pi,
2705
+ struct ice_phy_cache_mode_data cache_data,
2706
+ enum ice_phy_cache_mode cache_mode)
2707
+{
2708
+ if (!pi)
2709
+ return;
2710
+
2711
+ switch (cache_mode) {
2712
+ case ICE_FC_MODE:
2713
+ pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2714
+ break;
2715
+ case ICE_SPEED_MODE:
2716
+ pi->phy.curr_user_speed_req =
2717
+ cache_data.data.curr_user_speed_req;
2718
+ break;
2719
+ case ICE_FEC_MODE:
2720
+ pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2721
+ break;
2722
+ default:
2723
+ break;
2724
+ }
2725
+}
2726
+
2727
+/**
2728
+ * ice_caps_to_fc_mode
2729
+ * @caps: PHY capabilities
2730
+ *
2731
+ * Convert PHY FC capabilities to ice FC mode
2732
+ */
2733
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2734
+{
2735
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2736
+ caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2737
+ return ICE_FC_FULL;
2738
+
2739
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2740
+ return ICE_FC_TX_PAUSE;
2741
+
2742
+ if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2743
+ return ICE_FC_RX_PAUSE;
2744
+
2745
+ return ICE_FC_NONE;
2746
+}
2747
+
2748
+/**
2749
+ * ice_caps_to_fec_mode
2750
+ * @caps: PHY capabilities
2751
+ * @fec_options: Link FEC options
2752
+ *
2753
+ * Convert PHY FEC capabilities to ice FEC mode
2754
+ */
2755
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2756
+{
2757
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2758
+ return ICE_FEC_AUTO;
2759
+
2760
+ if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2761
+ ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2762
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2763
+ ICE_AQC_PHY_FEC_25G_KR_REQ))
2764
+ return ICE_FEC_BASER;
2765
+
2766
+ if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2767
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2768
+ ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2769
+ return ICE_FEC_RS;
2770
+
2771
+ return ICE_FEC_NONE;
2772
+}
2773
+
2774
+/**
2775
+ * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2776
+ * @pi: port information structure
2777
+ * @cfg: PHY configuration data to set FC mode
2778
+ * @req_mode: FC mode to configure
14262779 */
14272780 enum ice_status
1428
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
2781
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2782
+ enum ice_fc_mode req_mode)
14292783 {
1430
- struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1431
- struct ice_aqc_get_phy_caps_data *pcaps;
1432
- enum ice_status status;
2784
+ struct ice_phy_cache_mode_data cache_data;
14332785 u8 pause_mask = 0x0;
1434
- struct ice_hw *hw;
14352786
1436
- if (!pi)
1437
- return ICE_ERR_PARAM;
1438
- hw = pi->hw;
1439
- *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2787
+ if (!pi || !cfg)
2788
+ return ICE_ERR_BAD_PTR;
14402789
1441
- switch (pi->fc.req_mode) {
2790
+ switch (req_mode) {
14422791 case ICE_FC_FULL:
14432792 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
14442793 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
....@@ -1453,38 +2802,70 @@
14532802 break;
14542803 }
14552804
2805
+ /* clear the old pause settings */
2806
+ cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2807
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2808
+
2809
+ /* set the new capabilities */
2810
+ cfg->caps |= pause_mask;
2811
+
2812
+ /* Cache user FC request */
2813
+ cache_data.data.curr_user_fc_req = req_mode;
2814
+ ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2815
+
2816
+ return 0;
2817
+}
2818
+
2819
+/**
2820
+ * ice_set_fc
2821
+ * @pi: port information structure
2822
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
2823
+ * @ena_auto_link_update: enable automatic link update
2824
+ *
2825
+ * Set the requested flow control mode.
2826
+ */
2827
+enum ice_status
2828
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2829
+{
2830
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2831
+ struct ice_aqc_get_phy_caps_data *pcaps;
2832
+ enum ice_status status;
2833
+ struct ice_hw *hw;
2834
+
2835
+ if (!pi || !aq_failures)
2836
+ return ICE_ERR_BAD_PTR;
2837
+
2838
+ *aq_failures = 0;
2839
+ hw = pi->hw;
2840
+
14562841 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
14572842 if (!pcaps)
14582843 return ICE_ERR_NO_MEMORY;
14592844
1460
- /* Get the current phy config */
1461
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1462
- NULL);
2845
+ /* Get the current PHY config */
2846
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
2847
+ pcaps, NULL);
14632848 if (status) {
14642849 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
14652850 goto out;
14662851 }
14672852
1468
- /* clear the old pause settings */
1469
- cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1470
- ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1471
- /* set the new capabilities */
1472
- cfg.caps |= pause_mask;
2853
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2854
+
2855
+ /* Configure the set PHY data */
2856
+ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2857
+ if (status)
2858
+ goto out;
2859
+
14732860 /* If the capabilities have changed, then set the new config */
14742861 if (cfg.caps != pcaps->caps) {
14752862 int retry_count, retry_max = 10;
14762863
14772864 /* Auto restart link so settings take effect */
1478
- if (atomic_restart)
1479
- cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
1480
- /* Copy over all the old settings */
1481
- cfg.phy_type_low = pcaps->phy_type_low;
1482
- cfg.low_power_ctrl = pcaps->low_power_ctrl;
1483
- cfg.eee_cap = pcaps->eee_cap;
1484
- cfg.eeer_value = pcaps->eeer_value;
1485
- cfg.link_fec_opt = pcaps->link_fec_options;
2865
+ if (ena_auto_link_update)
2866
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
14862867
1487
- status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2868
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
14882869 if (status) {
14892870 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
14902871 goto out;
....@@ -1510,6 +2891,160 @@
15102891
15112892 out:
15122893 devm_kfree(ice_hw_to_dev(hw), pcaps);
2894
+ return status;
2895
+}
2896
+
2897
+/**
2898
+ * ice_phy_caps_equals_cfg
2899
+ * @phy_caps: PHY capabilities
2900
+ * @phy_cfg: PHY configuration
2901
+ *
2902
+ * Helper function to determine if PHY capabilities matches PHY
2903
+ * configuration
2904
+ */
2905
+bool
2906
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2907
+ struct ice_aqc_set_phy_cfg_data *phy_cfg)
2908
+{
2909
+ u8 caps_mask, cfg_mask;
2910
+
2911
+ if (!phy_caps || !phy_cfg)
2912
+ return false;
2913
+
2914
+ /* These bits are not common between capabilities and configuration.
2915
+ * Do not use them to determine equality.
2916
+ */
2917
+ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2918
+ ICE_AQC_GET_PHY_EN_MOD_QUAL);
2919
+ cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2920
+
2921
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2922
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2923
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2924
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2925
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
2926
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
2927
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2928
+ return false;
2929
+
2930
+ return true;
2931
+}
2932
+
2933
+/**
2934
+ * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2935
+ * @pi: port information structure
2936
+ * @caps: PHY ability structure to copy date from
2937
+ * @cfg: PHY configuration structure to copy data to
2938
+ *
2939
+ * Helper function to copy AQC PHY get ability data to PHY set configuration
2940
+ * data structure
2941
+ */
2942
+void
2943
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2944
+ struct ice_aqc_get_phy_caps_data *caps,
2945
+ struct ice_aqc_set_phy_cfg_data *cfg)
2946
+{
2947
+ if (!pi || !caps || !cfg)
2948
+ return;
2949
+
2950
+ memset(cfg, 0, sizeof(*cfg));
2951
+ cfg->phy_type_low = caps->phy_type_low;
2952
+ cfg->phy_type_high = caps->phy_type_high;
2953
+ cfg->caps = caps->caps;
2954
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2955
+ cfg->eee_cap = caps->eee_cap;
2956
+ cfg->eeer_value = caps->eeer_value;
2957
+ cfg->link_fec_opt = caps->link_fec_options;
2958
+ cfg->module_compliance_enforcement =
2959
+ caps->module_compliance_enforcement;
2960
+
2961
+ if (ice_fw_supports_link_override(pi->hw)) {
2962
+ struct ice_link_default_override_tlv tlv;
2963
+
2964
+ if (ice_get_link_default_override(&tlv, pi))
2965
+ return;
2966
+
2967
+ if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2968
+ cfg->module_compliance_enforcement |=
2969
+ ICE_LINK_OVERRIDE_STRICT_MODE;
2970
+ }
2971
+}
2972
+
2973
+/**
2974
+ * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2975
+ * @pi: port information structure
2976
+ * @cfg: PHY configuration data to set FEC mode
2977
+ * @fec: FEC mode to configure
2978
+ */
2979
+enum ice_status
2980
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2981
+ enum ice_fec_mode fec)
2982
+{
2983
+ struct ice_aqc_get_phy_caps_data *pcaps;
2984
+ enum ice_status status;
2985
+
2986
+ if (!pi || !cfg)
2987
+ return ICE_ERR_BAD_PTR;
2988
+
2989
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2990
+ if (!pcaps)
2991
+ return ICE_ERR_NO_MEMORY;
2992
+
2993
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
2994
+ NULL);
2995
+ if (status)
2996
+ goto out;
2997
+
2998
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2999
+ cfg->link_fec_opt = pcaps->link_fec_options;
3000
+
3001
+ switch (fec) {
3002
+ case ICE_FEC_BASER:
3003
+ /* Clear RS bits, and AND BASE-R ability
3004
+ * bits and OR request bits.
3005
+ */
3006
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3007
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3008
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3009
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
3010
+ break;
3011
+ case ICE_FEC_RS:
3012
+ /* Clear BASE-R bits, and AND RS ability
3013
+ * bits and OR request bits.
3014
+ */
3015
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3016
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3017
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3018
+ break;
3019
+ case ICE_FEC_NONE:
3020
+ /* Clear all FEC option bits. */
3021
+ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3022
+ break;
3023
+ case ICE_FEC_AUTO:
3024
+ /* AND auto FEC bit, and all caps bits. */
3025
+ cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3026
+ cfg->link_fec_opt |= pcaps->link_fec_options;
3027
+ break;
3028
+ default:
3029
+ status = ICE_ERR_PARAM;
3030
+ break;
3031
+ }
3032
+
3033
+ if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3034
+ struct ice_link_default_override_tlv tlv;
3035
+
3036
+ status = ice_get_link_default_override(&tlv, pi);
3037
+ if (status)
3038
+ goto out;
3039
+
3040
+ if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3041
+ (tlv.options & ICE_LINK_OVERRIDE_EN))
3042
+ cfg->link_fec_opt = tlv.fec_options;
3043
+ }
3044
+
3045
+out:
3046
+ kfree(pcaps);
3047
+
15133048 return status;
15143049 }
15153050
....@@ -1577,7 +3112,7 @@
15773112
15783113 /**
15793114 * ice_aq_set_event_mask
1580
- * @hw: pointer to the hw struct
3115
+ * @hw: pointer to the HW struct
15813116 * @port_num: port number of the physical function
15823117 * @mask: event mask to be set
15833118 * @cd: pointer to command details structure or NULL
....@@ -1598,8 +3133,104 @@
15983133 cmd->lport_num = port_num;
15993134
16003135 cmd->event_mask = cpu_to_le16(mask);
3136
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3137
+}
3138
+
3139
+/**
3140
+ * ice_aq_set_mac_loopback
3141
+ * @hw: pointer to the HW struct
3142
+ * @ena_lpbk: Enable or Disable loopback
3143
+ * @cd: pointer to command details structure or NULL
3144
+ *
3145
+ * Enable/disable loopback on a given port
3146
+ */
3147
+enum ice_status
3148
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3149
+{
3150
+ struct ice_aqc_set_mac_lb *cmd;
3151
+ struct ice_aq_desc desc;
3152
+
3153
+ cmd = &desc.params.set_mac_lb;
3154
+
3155
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3156
+ if (ena_lpbk)
3157
+ cmd->lb_mode = ICE_AQ_MAC_LB_EN;
16013158
16023159 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3160
+}
3161
+
3162
+/**
3163
+ * ice_aq_set_port_id_led
3164
+ * @pi: pointer to the port information
3165
+ * @is_orig_mode: is this LED set to original mode (by the net-list)
3166
+ * @cd: pointer to command details structure or NULL
3167
+ *
3168
+ * Set LED value for the given port (0x06e9)
3169
+ */
3170
+enum ice_status
3171
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3172
+ struct ice_sq_cd *cd)
3173
+{
3174
+ struct ice_aqc_set_port_id_led *cmd;
3175
+ struct ice_hw *hw = pi->hw;
3176
+ struct ice_aq_desc desc;
3177
+
3178
+ cmd = &desc.params.set_port_id_led;
3179
+
3180
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3181
+
3182
+ if (is_orig_mode)
3183
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3184
+ else
3185
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3186
+
3187
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3188
+}
3189
+
3190
+/**
3191
+ * ice_aq_sff_eeprom
3192
+ * @hw: pointer to the HW struct
3193
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3194
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3195
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3196
+ * @page: QSFP page
3197
+ * @set_page: set or ignore the page
3198
+ * @data: pointer to data buffer to be read/written to the I2C device.
3199
+ * @length: 1-16 for read, 1 for write.
3200
+ * @write: 0 read, 1 for write.
3201
+ * @cd: pointer to command details structure or NULL
3202
+ *
3203
+ * Read/Write SFF EEPROM (0x06EE)
3204
+ */
3205
+enum ice_status
3206
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3207
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3208
+ bool write, struct ice_sq_cd *cd)
3209
+{
3210
+ struct ice_aqc_sff_eeprom *cmd;
3211
+ struct ice_aq_desc desc;
3212
+ enum ice_status status;
3213
+
3214
+ if (!data || (mem_addr & 0xff00))
3215
+ return ICE_ERR_PARAM;
3216
+
3217
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3218
+ cmd = &desc.params.read_write_sff_param;
3219
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3220
+ cmd->lport_num = (u8)(lport & 0xff);
3221
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3222
+ cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3223
+ ICE_AQC_SFF_I2CBUS_7BIT_M) |
3224
+ ((set_page <<
3225
+ ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3226
+ ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3227
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3228
+ cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3229
+ if (write)
3230
+ cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3231
+
3232
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3233
+ return status;
16033234 }
16043235
16053236 /**
....@@ -1678,7 +3309,7 @@
16783309 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
16793310 break;
16803311 }
1681
- /* fall-through */
3312
+ fallthrough;
16823313 default:
16833314 status = ICE_ERR_PARAM;
16843315 goto ice_aq_get_set_rss_lut_exit;
....@@ -1695,7 +3326,7 @@
16953326 /**
16963327 * ice_aq_get_rss_lut
16973328 * @hw: pointer to the hardware structure
1698
- * @vsi_id: VSI FW index
3329
+ * @vsi_handle: software VSI handle
16993330 * @lut_type: LUT table type
17003331 * @lut: pointer to the LUT buffer provided by the caller
17013332 * @lut_size: size of the LUT buffer
....@@ -1703,17 +3334,20 @@
17033334 * get the RSS lookup table, PF or VSI type
17043335 */
17053336 enum ice_status
1706
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1707
- u16 lut_size)
3337
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3338
+ u8 *lut, u16 lut_size)
17083339 {
1709
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1710
- false);
3340
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3341
+ return ICE_ERR_PARAM;
3342
+
3343
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3344
+ lut_type, lut, lut_size, 0, false);
17113345 }
17123346
17133347 /**
17143348 * ice_aq_set_rss_lut
17153349 * @hw: pointer to the hardware structure
1716
- * @vsi_id: VSI FW index
3350
+ * @vsi_handle: software VSI handle
17173351 * @lut_type: LUT table type
17183352 * @lut: pointer to the LUT buffer provided by the caller
17193353 * @lut_size: size of the LUT buffer
....@@ -1721,16 +3355,19 @@
17213355 * set the RSS lookup table, PF or VSI type
17223356 */
17233357 enum ice_status
1724
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1725
- u16 lut_size)
3358
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3359
+ u8 *lut, u16 lut_size)
17263360 {
1727
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1728
- true);
3361
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3362
+ return ICE_ERR_PARAM;
3363
+
3364
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3365
+ lut_type, lut, lut_size, 0, true);
17293366 }
17303367
17313368 /**
17323369 * __ice_aq_get_set_rss_key
1733
- * @hw: pointer to the hw struct
3370
+ * @hw: pointer to the HW struct
17343371 * @vsi_id: VSI FW index
17353372 * @key: pointer to key info struct
17363373 * @set: set true to set the key, false to get the key
....@@ -1765,32 +3402,40 @@
17653402
17663403 /**
17673404 * ice_aq_get_rss_key
1768
- * @hw: pointer to the hw struct
1769
- * @vsi_id: VSI FW index
3405
+ * @hw: pointer to the HW struct
3406
+ * @vsi_handle: software VSI handle
17703407 * @key: pointer to key info struct
17713408 *
17723409 * get the RSS key per VSI
17733410 */
17743411 enum ice_status
1775
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
3412
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
17763413 struct ice_aqc_get_set_rss_keys *key)
17773414 {
1778
- return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
3415
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3416
+ return ICE_ERR_PARAM;
3417
+
3418
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3419
+ key, false);
17793420 }
17803421
17813422 /**
17823423 * ice_aq_set_rss_key
1783
- * @hw: pointer to the hw struct
1784
- * @vsi_id: VSI FW index
3424
+ * @hw: pointer to the HW struct
3425
+ * @vsi_handle: software VSI handle
17853426 * @keys: pointer to key info struct
17863427 *
17873428 * set the RSS key per VSI
17883429 */
17893430 enum ice_status
1790
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3431
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
17913432 struct ice_aqc_get_set_rss_keys *keys)
17923433 {
1793
- return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
3434
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3435
+ return ICE_ERR_PARAM;
3436
+
3437
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3438
+ keys, true);
17943439 }
17953440
17963441 /**
....@@ -1819,10 +3464,10 @@
18193464 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
18203465 struct ice_sq_cd *cd)
18213466 {
1822
- u16 i, sum_header_size, sum_q_size = 0;
18233467 struct ice_aqc_add_tx_qgrp *list;
18243468 struct ice_aqc_add_txqs *cmd;
18253469 struct ice_aq_desc desc;
3470
+ u16 i, sum_size = 0;
18263471
18273472 cmd = &desc.params.add_txqs;
18283473
....@@ -1834,18 +3479,13 @@
18343479 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
18353480 return ICE_ERR_PARAM;
18363481
1837
- sum_header_size = num_qgrps *
1838
- (sizeof(*qg_list) - sizeof(*qg_list->txqs));
1839
-
1840
- list = qg_list;
1841
- for (i = 0; i < num_qgrps; i++) {
1842
- struct ice_aqc_add_txqs_perq *q = list->txqs;
1843
-
1844
- sum_q_size += list->num_txqs * sizeof(*q);
1845
- list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3482
+ for (i = 0, list = qg_list; i < num_qgrps; i++) {
3483
+ sum_size += struct_size(list, txqs, list->num_txqs);
3484
+ list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3485
+ list->num_txqs);
18463486 }
18473487
1848
- if (buf_size != (sum_header_size + sum_q_size))
3488
+ if (buf_size != sum_size)
18493489 return ICE_ERR_PARAM;
18503490
18513491 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
....@@ -1861,6 +3501,8 @@
18613501 * @num_qgrps: number of groups in the list
18623502 * @qg_list: the list of groups to disable
18633503 * @buf_size: the total size of the qg_list buffer in bytes
3504
+ * @rst_src: if called due to reset, specifies the reset source
3505
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
18643506 * @cd: pointer to command details structure or NULL
18653507 *
18663508 * Disable LAN Tx queue (0x0C31)
....@@ -1868,39 +3510,86 @@
18683510 static enum ice_status
18693511 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
18703512 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3513
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
18713514 struct ice_sq_cd *cd)
18723515 {
3516
+ struct ice_aqc_dis_txq_item *item;
18733517 struct ice_aqc_dis_txqs *cmd;
18743518 struct ice_aq_desc desc;
3519
+ enum ice_status status;
18753520 u16 i, sz = 0;
18763521
18773522 cmd = &desc.params.dis_txqs;
18783523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
18793524
1880
- if (!qg_list)
3525
+ /* qg_list can be NULL only in VM/VF reset flow */
3526
+ if (!qg_list && !rst_src)
18813527 return ICE_ERR_PARAM;
18823528
18833529 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
18843530 return ICE_ERR_PARAM;
1885
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3531
+
18863532 cmd->num_entries = num_qgrps;
18873533
1888
- for (i = 0; i < num_qgrps; ++i) {
1889
- /* Calculate the size taken up by the queue IDs in this group */
1890
- sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3534
+ cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3535
+ ICE_AQC_Q_DIS_TIMEOUT_M);
18913536
1892
- /* Add the size of the group header */
1893
- sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3537
+ switch (rst_src) {
3538
+ case ICE_VM_RESET:
3539
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3540
+ cmd->vmvf_and_timeout |=
3541
+ cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3542
+ break;
3543
+ case ICE_VF_RESET:
3544
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3545
+ /* In this case, FW expects vmvf_num to be absolute VF ID */
3546
+ cmd->vmvf_and_timeout |=
3547
+ cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3548
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
3549
+ break;
3550
+ case ICE_NO_RESET:
3551
+ default:
3552
+ break;
3553
+ }
3554
+
3555
+ /* flush pipe on time out */
3556
+ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3557
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
3558
+ if (!qg_list)
3559
+ goto do_aq;
3560
+
3561
+ /* set RD bit to indicate that command buffer is provided by the driver
3562
+ * and it needs to be read by the firmware
3563
+ */
3564
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3565
+
3566
+ for (i = 0, item = qg_list; i < num_qgrps; i++) {
3567
+ u16 item_size = struct_size(item, q_id, item->num_qs);
18943568
18953569 /* If the num of queues is even, add 2 bytes of padding */
1896
- if ((qg_list[i].num_qs % 2) == 0)
1897
- sz += 2;
3570
+ if ((item->num_qs % 2) == 0)
3571
+ item_size += 2;
3572
+
3573
+ sz += item_size;
3574
+
3575
+ item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
18983576 }
18993577
19003578 if (buf_size != sz)
19013579 return ICE_ERR_PARAM;
19023580
1903
- return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3581
+do_aq:
3582
+ status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3583
+ if (status) {
3584
+ if (!qg_list)
3585
+ ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3586
+ vmvf_num, hw->adminq.sq_last_status);
3587
+ else
3588
+ ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3589
+ le16_to_cpu(qg_list[0].q_id[0]),
3590
+ hw->adminq.sq_last_status);
3591
+ }
3592
+ return status;
19043593 }
19053594
19063595 /* End of FW Admin Queue command wrappers */
....@@ -1911,8 +3600,8 @@
19113600 * @dest_ctx: the context to be written to
19123601 * @ce_info: a description of the struct to be filled
19133602 */
1914
-static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
1915
- const struct ice_ctx_ele *ce_info)
3603
+static void
3604
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
19163605 {
19173606 u8 src_byte, dest_byte, mask;
19183607 u8 *from, *dest;
....@@ -1950,8 +3639,8 @@
19503639 * @dest_ctx: the context to be written to
19513640 * @ce_info: a description of the struct to be filled
19523641 */
1953
-static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
1954
- const struct ice_ctx_ele *ce_info)
3642
+static void
3643
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
19553644 {
19563645 u16 src_word, mask;
19573646 __le16 dest_word;
....@@ -1993,8 +3682,8 @@
19933682 * @dest_ctx: the context to be written to
19943683 * @ce_info: a description of the struct to be filled
19953684 */
1996
-static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
1997
- const struct ice_ctx_ele *ce_info)
3685
+static void
3686
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
19983687 {
19993688 u32 src_dword, mask;
20003689 __le32 dest_dword;
....@@ -2044,8 +3733,8 @@
20443733 * @dest_ctx: the context to be written to
20453734 * @ce_info: a description of the struct to be filled
20463735 */
2047
-static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2048
- const struct ice_ctx_ele *ce_info)
3736
+static void
3737
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
20493738 {
20503739 u64 src_qword, mask;
20513740 __le64 dest_qword;
....@@ -2091,12 +3780,14 @@
20913780
20923781 /**
20933782 * ice_set_ctx - set context bits in packed structure
3783
+ * @hw: pointer to the hardware structure
20943784 * @src_ctx: pointer to a generic non-packed context structure
20953785 * @dest_ctx: pointer to memory for the packed structure
20963786 * @ce_info: a description of the structure to be transformed
20973787 */
20983788 enum ice_status
2099
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3789
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3790
+ const struct ice_ctx_ele *ce_info)
21003791 {
21013792 int f;
21023793
....@@ -2105,6 +3796,12 @@
21053796 * using the correct size so that we are correct regardless
21063797 * of the endianness of the machine.
21073798 */
3799
+ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3800
+ ice_debug(hw, ICE_DBG_QCTX,
3801
+ "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3802
+ f, ce_info[f].width, ce_info[f].size_of);
3803
+ continue;
3804
+ }
21083805 switch (ce_info[f].size_of) {
21093806 case sizeof(u8):
21103807 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
....@@ -2127,24 +3824,50 @@
21273824 }
21283825
21293826 /**
3827
+ * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3828
+ * @hw: pointer to the HW struct
3829
+ * @vsi_handle: software VSI handle
3830
+ * @tc: TC number
3831
+ * @q_handle: software queue handle
3832
+ */
3833
+struct ice_q_ctx *
3834
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3835
+{
3836
+ struct ice_vsi_ctx *vsi;
3837
+ struct ice_q_ctx *q_ctx;
3838
+
3839
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
3840
+ if (!vsi)
3841
+ return NULL;
3842
+ if (q_handle >= vsi->num_lan_q_entries[tc])
3843
+ return NULL;
3844
+ if (!vsi->lan_q_ctx[tc])
3845
+ return NULL;
3846
+ q_ctx = vsi->lan_q_ctx[tc];
3847
+ return &q_ctx[q_handle];
3848
+}
3849
+
3850
+/**
21303851 * ice_ena_vsi_txq
21313852 * @pi: port information structure
2132
- * @vsi_id: VSI id
2133
- * @tc: tc number
3853
+ * @vsi_handle: software VSI handle
3854
+ * @tc: TC number
3855
+ * @q_handle: software queue handle
21343856 * @num_qgrps: Number of added queue groups
21353857 * @buf: list of queue groups to be added
21363858 * @buf_size: size of buffer for indirect command
21373859 * @cd: pointer to command details structure or NULL
21383860 *
2139
- * This function adds one lan q
3861
+ * This function adds one LAN queue
21403862 */
21413863 enum ice_status
2142
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
2143
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3864
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3865
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
21443866 struct ice_sq_cd *cd)
21453867 {
21463868 struct ice_aqc_txsched_elem_data node = { 0 };
21473869 struct ice_sched_node *parent;
3870
+ struct ice_q_ctx *q_ctx;
21483871 enum ice_status status;
21493872 struct ice_hw *hw;
21503873
....@@ -2156,15 +3879,27 @@
21563879
21573880 hw = pi->hw;
21583881
3882
+ if (!ice_is_vsi_valid(hw, vsi_handle))
3883
+ return ICE_ERR_PARAM;
3884
+
21593885 mutex_lock(&pi->sched_lock);
21603886
3887
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3888
+ if (!q_ctx) {
3889
+ ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3890
+ q_handle);
3891
+ status = ICE_ERR_PARAM;
3892
+ goto ena_txq_exit;
3893
+ }
3894
+
21613895 /* find a parent node */
2162
- parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
3896
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
21633897 ICE_SCHED_NODE_OWNER_LAN);
21643898 if (!parent) {
21653899 status = ICE_ERR_PARAM;
21663900 goto ena_txq_exit;
21673901 }
3902
+
21683903 buf->parent_teid = parent->info.node_teid;
21693904 node.parent_teid = parent->info.node_teid;
21703905 /* Mark that the values in the "generic" section as valid. The default
....@@ -2176,20 +3911,39 @@
21763911 * Bit 5-6.
21773912 * - Bit 7 is reserved.
21783913 * Without setting the generic section as valid in valid_sections, the
2179
- * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
3914
+ * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
21803915 */
2181
- buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3916
+ buf->txqs[0].info.valid_sections =
3917
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3918
+ ICE_AQC_ELEM_VALID_EIR;
3919
+ buf->txqs[0].info.generic = 0;
3920
+ buf->txqs[0].info.cir_bw.bw_profile_idx =
3921
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3922
+ buf->txqs[0].info.cir_bw.bw_alloc =
3923
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3924
+ buf->txqs[0].info.eir_bw.bw_profile_idx =
3925
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3926
+ buf->txqs[0].info.eir_bw.bw_alloc =
3927
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
21823928
2183
- /* add the lan q */
3929
+ /* add the LAN queue */
21843930 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2185
- if (status)
3931
+ if (status) {
3932
+ ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3933
+ le16_to_cpu(buf->txqs[0].txq_id),
3934
+ hw->adminq.sq_last_status);
21863935 goto ena_txq_exit;
3936
+ }
21873937
21883938 node.node_teid = buf->txqs[0].q_teid;
21893939 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3940
+ q_ctx->q_handle = q_handle;
3941
+ q_ctx->q_teid = le32_to_cpu(node.node_teid);
21903942
2191
- /* add a leaf node into schduler tree q layer */
3943
+ /* add a leaf node into scheduler tree queue layer */
21923944 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3945
+ if (!status)
3946
+ status = ice_sched_replay_q_bw(pi, q_ctx);
21933947
21943948 ena_txq_exit:
21953949 mutex_unlock(&pi->sched_lock);
....@@ -2199,23 +3953,50 @@
21993953 /**
22003954 * ice_dis_vsi_txq
22013955 * @pi: port information structure
3956
+ * @vsi_handle: software VSI handle
3957
+ * @tc: TC number
22023958 * @num_queues: number of queues
3959
+ * @q_handles: pointer to software queue handle array
22033960 * @q_ids: pointer to the q_id array
22043961 * @q_teids: pointer to queue node teids
3962
+ * @rst_src: if called due to reset, specifies the reset source
3963
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
22053964 * @cd: pointer to command details structure or NULL
22063965 *
22073966 * This function removes queues and their corresponding nodes in SW DB
22083967 */
22093968 enum ice_status
2210
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2211
- u32 *q_teids, struct ice_sq_cd *cd)
3969
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3970
+ u16 *q_handles, u16 *q_ids, u32 *q_teids,
3971
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
3972
+ struct ice_sq_cd *cd)
22123973 {
22133974 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2214
- struct ice_aqc_dis_txq_item qg_list;
2215
- u16 i;
3975
+ struct ice_aqc_dis_txq_item *qg_list;
3976
+ struct ice_q_ctx *q_ctx;
3977
+ struct ice_hw *hw;
3978
+ u16 i, buf_size;
22163979
22173980 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
22183981 return ICE_ERR_CFG;
3982
+
3983
+ hw = pi->hw;
3984
+
3985
+ if (!num_queues) {
3986
+ /* if queue is disabled already yet the disable queue command
3987
+ * has to be sent to complete the VF reset, then call
3988
+ * ice_aq_dis_lan_txq without any queue information
3989
+ */
3990
+ if (rst_src)
3991
+ return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
3992
+ vmvf_num, NULL);
3993
+ return ICE_ERR_CFG;
3994
+ }
3995
+
3996
+ buf_size = struct_size(qg_list, q_id, 1);
3997
+ qg_list = kzalloc(buf_size, GFP_KERNEL);
3998
+ if (!qg_list)
3999
+ return ICE_ERR_NO_MEMORY;
22194000
22204001 mutex_lock(&pi->sched_lock);
22214002
....@@ -2225,32 +4006,45 @@
22254006 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
22264007 if (!node)
22274008 continue;
2228
- qg_list.parent_teid = node->info.parent_teid;
2229
- qg_list.num_qs = 1;
2230
- qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2231
- status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2232
- sizeof(qg_list), cd);
4009
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4010
+ if (!q_ctx) {
4011
+ ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4012
+ q_handles[i]);
4013
+ continue;
4014
+ }
4015
+ if (q_ctx->q_handle != q_handles[i]) {
4016
+ ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4017
+ q_ctx->q_handle, q_handles[i]);
4018
+ continue;
4019
+ }
4020
+ qg_list->parent_teid = node->info.parent_teid;
4021
+ qg_list->num_qs = 1;
4022
+ qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4023
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4024
+ vmvf_num, cd);
22334025
22344026 if (status)
22354027 break;
22364028 ice_free_sched_node(pi, node);
4029
+ q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
22374030 }
22384031 mutex_unlock(&pi->sched_lock);
4032
+ kfree(qg_list);
22394033 return status;
22404034 }
22414035
22424036 /**
2243
- * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
4037
+ * ice_cfg_vsi_qs - configure the new/existing VSI queues
22444038 * @pi: port information structure
2245
- * @vsi_id: VSI Id
4039
+ * @vsi_handle: software VSI handle
22464040 * @tc_bitmap: TC bitmap
22474041 * @maxqs: max queues array per TC
2248
- * @owner: lan or rdma
4042
+ * @owner: LAN or RDMA
22494043 *
22504044 * This function adds/updates the VSI queues per TC.
22514045 */
22524046 static enum ice_status
2253
-ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
4047
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
22544048 u16 *maxqs, u8 owner)
22554049 {
22564050 enum ice_status status = 0;
....@@ -2259,14 +4053,17 @@
22594053 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
22604054 return ICE_ERR_CFG;
22614055
4056
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4057
+ return ICE_ERR_PARAM;
4058
+
22624059 mutex_lock(&pi->sched_lock);
22634060
2264
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
4061
+ ice_for_each_traffic_class(i) {
22654062 /* configuration is possible only if TC node is present */
22664063 if (!ice_sched_get_tc_node(pi, i))
22674064 continue;
22684065
2269
- status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
4066
+ status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
22704067 ice_is_tc_ena(tc_bitmap, i));
22714068 if (status)
22724069 break;
....@@ -2277,18 +4074,336 @@
22774074 }
22784075
22794076 /**
2280
- * ice_cfg_vsi_lan - configure VSI lan queues
4077
+ * ice_cfg_vsi_lan - configure VSI LAN queues
22814078 * @pi: port information structure
2282
- * @vsi_id: VSI Id
4079
+ * @vsi_handle: software VSI handle
22834080 * @tc_bitmap: TC bitmap
2284
- * @max_lanqs: max lan queues array per TC
4081
+ * @max_lanqs: max LAN queues array per TC
22854082 *
2286
- * This function adds/updates the VSI lan queues per TC.
4083
+ * This function adds/updates the VSI LAN queues per TC.
22874084 */
22884085 enum ice_status
2289
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
4086
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
22904087 u16 *max_lanqs)
22914088 {
2292
- return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
4089
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
22934090 ICE_SCHED_NODE_OWNER_LAN);
22944091 }
4092
+
4093
+/**
4094
+ * ice_replay_pre_init - replay pre initialization
4095
+ * @hw: pointer to the HW struct
4096
+ *
4097
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4098
+ */
4099
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4100
+{
4101
+ struct ice_switch_info *sw = hw->switch_info;
4102
+ u8 i;
4103
+
4104
+ /* Delete old entries from replay filter list head if there is any */
4105
+ ice_rm_all_sw_replay_rule_info(hw);
4106
+ /* In start of replay, move entries into replay_rules list, it
4107
+ * will allow adding rules entries back to filt_rules list,
4108
+ * which is operational list.
4109
+ */
4110
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4111
+ list_replace_init(&sw->recp_list[i].filt_rules,
4112
+ &sw->recp_list[i].filt_replay_rules);
4113
+
4114
+ return 0;
4115
+}
4116
+
4117
+/**
4118
+ * ice_replay_vsi - replay VSI configuration
4119
+ * @hw: pointer to the HW struct
4120
+ * @vsi_handle: driver VSI handle
4121
+ *
4122
+ * Restore all VSI configuration after reset. It is required to call this
4123
+ * function with main VSI first.
4124
+ */
4125
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4126
+{
4127
+ enum ice_status status;
4128
+
4129
+ if (!ice_is_vsi_valid(hw, vsi_handle))
4130
+ return ICE_ERR_PARAM;
4131
+
4132
+ /* Replay pre-initialization if there is any */
4133
+ if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4134
+ status = ice_replay_pre_init(hw);
4135
+ if (status)
4136
+ return status;
4137
+ }
4138
+ /* Replay per VSI all RSS configurations */
4139
+ status = ice_replay_rss_cfg(hw, vsi_handle);
4140
+ if (status)
4141
+ return status;
4142
+ /* Replay per VSI all filters */
4143
+ status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4144
+ return status;
4145
+}
4146
+
4147
+/**
4148
+ * ice_replay_post - post replay configuration cleanup
4149
+ * @hw: pointer to the HW struct
4150
+ *
4151
+ * Post replay cleanup.
4152
+ */
4153
+void ice_replay_post(struct ice_hw *hw)
4154
+{
4155
+ /* Delete old entries from replay filter list head */
4156
+ ice_rm_all_sw_replay_rule_info(hw);
4157
+}
4158
+
4159
+/**
4160
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4161
+ * @hw: ptr to the hardware info
4162
+ * @reg: offset of 64 bit HW register to read from
4163
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
4164
+ * @prev_stat: ptr to previous loaded stat value
4165
+ * @cur_stat: ptr to current stat value
4166
+ */
4167
+void
4168
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4169
+ u64 *prev_stat, u64 *cur_stat)
4170
+{
4171
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4172
+
4173
+ /* device stats are not reset at PFR, they likely will not be zeroed
4174
+ * when the driver starts. Thus, save the value from the first read
4175
+ * without adding to the statistic value so that we report stats which
4176
+ * count up from zero.
4177
+ */
4178
+ if (!prev_stat_loaded) {
4179
+ *prev_stat = new_data;
4180
+ return;
4181
+ }
4182
+
4183
+ /* Calculate the difference between the new and old values, and then
4184
+ * add it to the software stat value.
4185
+ */
4186
+ if (new_data >= *prev_stat)
4187
+ *cur_stat += new_data - *prev_stat;
4188
+ else
4189
+ /* to manage the potential roll-over */
4190
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4191
+
4192
+ /* Update the previously stored value to prepare for next read */
4193
+ *prev_stat = new_data;
4194
+}
4195
+
4196
+/**
4197
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4198
+ * @hw: ptr to the hardware info
4199
+ * @reg: offset of HW register to read from
4200
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
4201
+ * @prev_stat: ptr to previous loaded stat value
4202
+ * @cur_stat: ptr to current stat value
4203
+ */
4204
+void
4205
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4206
+ u64 *prev_stat, u64 *cur_stat)
4207
+{
4208
+ u32 new_data;
4209
+
4210
+ new_data = rd32(hw, reg);
4211
+
4212
+ /* device stats are not reset at PFR, they likely will not be zeroed
4213
+ * when the driver starts. Thus, save the value from the first read
4214
+ * without adding to the statistic value so that we report stats which
4215
+ * count up from zero.
4216
+ */
4217
+ if (!prev_stat_loaded) {
4218
+ *prev_stat = new_data;
4219
+ return;
4220
+ }
4221
+
4222
+ /* Calculate the difference between the new and old values, and then
4223
+ * add it to the software stat value.
4224
+ */
4225
+ if (new_data >= *prev_stat)
4226
+ *cur_stat += new_data - *prev_stat;
4227
+ else
4228
+ /* to manage the potential roll-over */
4229
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4230
+
4231
+ /* Update the previously stored value to prepare for next read */
4232
+ *prev_stat = new_data;
4233
+}
4234
+
4235
+/**
4236
+ * ice_sched_query_elem - query element information from HW
4237
+ * @hw: pointer to the HW struct
4238
+ * @node_teid: node TEID to be queried
4239
+ * @buf: buffer to element information
4240
+ *
4241
+ * This function queries HW element information
4242
+ */
4243
+enum ice_status
4244
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4245
+ struct ice_aqc_txsched_elem_data *buf)
4246
+{
4247
+ u16 buf_size, num_elem_ret = 0;
4248
+ enum ice_status status;
4249
+
4250
+ buf_size = sizeof(*buf);
4251
+ memset(buf, 0, buf_size);
4252
+ buf->node_teid = cpu_to_le32(node_teid);
4253
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4254
+ NULL);
4255
+ if (status || num_elem_ret != 1)
4256
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4257
+ return status;
4258
+}
4259
+
4260
+/**
4261
+ * ice_fw_supports_link_override
4262
+ * @hw: pointer to the hardware structure
4263
+ *
4264
+ * Checks if the firmware supports link override
4265
+ */
4266
+bool ice_fw_supports_link_override(struct ice_hw *hw)
4267
+{
4268
+ /* Currently, only supported for E810 devices */
4269
+ if (hw->mac_type != ICE_MAC_E810)
4270
+ return false;
4271
+
4272
+ if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4273
+ if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4274
+ return true;
4275
+ if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4276
+ hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4277
+ return true;
4278
+ } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4279
+ return true;
4280
+ }
4281
+
4282
+ return false;
4283
+}
4284
+
4285
+/**
4286
+ * ice_get_link_default_override
4287
+ * @ldo: pointer to the link default override struct
4288
+ * @pi: pointer to the port info struct
4289
+ *
4290
+ * Gets the link default override for a port
4291
+ */
4292
+enum ice_status
4293
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4294
+ struct ice_port_info *pi)
4295
+{
4296
+ u16 i, tlv, tlv_len, tlv_start, buf, offset;
4297
+ struct ice_hw *hw = pi->hw;
4298
+ enum ice_status status;
4299
+
4300
+ status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4301
+ ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4302
+ if (status) {
4303
+ ice_debug(hw, ICE_DBG_INIT,
4304
+ "Failed to read link override TLV.\n");
4305
+ return status;
4306
+ }
4307
+
4308
+ /* Each port has its own config; calculate for our port */
4309
+ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4310
+ ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4311
+
4312
+ /* link options first */
4313
+ status = ice_read_sr_word(hw, tlv_start, &buf);
4314
+ if (status) {
4315
+ ice_debug(hw, ICE_DBG_INIT,
4316
+ "Failed to read override link options.\n");
4317
+ return status;
4318
+ }
4319
+ ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4320
+ ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4321
+ ICE_LINK_OVERRIDE_PHY_CFG_S;
4322
+
4323
+ /* link PHY config */
4324
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4325
+ status = ice_read_sr_word(hw, offset, &buf);
4326
+ if (status) {
4327
+ ice_debug(hw, ICE_DBG_INIT,
4328
+ "Failed to read override phy config.\n");
4329
+ return status;
4330
+ }
4331
+ ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4332
+
4333
+ /* PHY types low */
4334
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4335
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4336
+ status = ice_read_sr_word(hw, (offset + i), &buf);
4337
+ if (status) {
4338
+ ice_debug(hw, ICE_DBG_INIT,
4339
+ "Failed to read override link options.\n");
4340
+ return status;
4341
+ }
4342
+ /* shift 16 bits at a time to fill 64 bits */
4343
+ ldo->phy_type_low |= ((u64)buf << (i * 16));
4344
+ }
4345
+
4346
+ /* PHY types high */
4347
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4348
+ ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4349
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4350
+ status = ice_read_sr_word(hw, (offset + i), &buf);
4351
+ if (status) {
4352
+ ice_debug(hw, ICE_DBG_INIT,
4353
+ "Failed to read override link options.\n");
4354
+ return status;
4355
+ }
4356
+ /* shift 16 bits at a time to fill 64 bits */
4357
+ ldo->phy_type_high |= ((u64)buf << (i * 16));
4358
+ }
4359
+
4360
+ return status;
4361
+}
4362
+
4363
+/**
4364
+ * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4365
+ * @caps: get PHY capability data
4366
+ */
4367
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4368
+{
4369
+ if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4370
+ caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4371
+ ICE_AQC_PHY_AN_EN_CLAUSE73 |
4372
+ ICE_AQC_PHY_AN_EN_CLAUSE37))
4373
+ return true;
4374
+
4375
+ return false;
4376
+}
4377
+
4378
+/**
4379
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
4380
+ * @hw: pointer to the HW struct
4381
+ * @mib_type: Local, Remote or both Local and Remote MIBs
4382
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
4383
+ * @buf_size: size of the buffer (in bytes)
4384
+ * @cd: pointer to command details structure or NULL
4385
+ *
4386
+ * Set the LLDP MIB. (0x0A08)
4387
+ */
4388
+enum ice_status
4389
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4390
+ struct ice_sq_cd *cd)
4391
+{
4392
+ struct ice_aqc_lldp_set_local_mib *cmd;
4393
+ struct ice_aq_desc desc;
4394
+
4395
+ cmd = &desc.params.lldp_set_mib;
4396
+
4397
+ if (buf_size == 0 || !buf)
4398
+ return ICE_ERR_PARAM;
4399
+
4400
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4401
+
4402
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4403
+ desc.datalen = cpu_to_le16(buf_size);
4404
+
4405
+ cmd->type = mib_type;
4406
+ cmd->length = cpu_to_le16(buf_size);
4407
+
4408
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4409
+}