.. | .. |
---|
2 | 2 | // Copyright (c) 2016-2017 Hisilicon Limited. |
---|
3 | 3 | |
---|
4 | 4 | #include <linux/etherdevice.h> |
---|
| 5 | +#include <linux/iopoll.h> |
---|
5 | 6 | #include <net/rtnetlink.h> |
---|
6 | 7 | #include "hclgevf_cmd.h" |
---|
7 | 8 | #include "hclgevf_main.h" |
---|
.. | .. |
---|
10 | 11 | |
---|
11 | 12 | #define HCLGEVF_NAME "hclgevf" |
---|
12 | 13 | |
---|
13 | | -static int hclgevf_init_hdev(struct hclgevf_dev *hdev); |
---|
14 | | -static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); |
---|
| 14 | +#define HCLGEVF_RESET_MAX_FAIL_CNT 5 |
---|
| 15 | + |
---|
| 16 | +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); |
---|
15 | 17 | static struct hnae3_ae_algo ae_algovf; |
---|
16 | 18 | |
---|
| 19 | +static struct workqueue_struct *hclgevf_wq; |
---|
| 20 | + |
---|
17 | 21 | static const struct pci_device_id ae_algovf_pci_tbl[] = { |
---|
18 | | - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, |
---|
19 | | - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, |
---|
| 22 | + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, |
---|
| 23 | + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), |
---|
| 24 | + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
---|
20 | 25 | /* required last entry */ |
---|
21 | 26 | {0, } |
---|
22 | 27 | }; |
---|
23 | 28 | |
---|
| 29 | +static const u8 hclgevf_hash_key[] = { |
---|
| 30 | + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, |
---|
| 31 | + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, |
---|
| 32 | + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, |
---|
| 33 | + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, |
---|
| 34 | + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA |
---|
| 35 | +}; |
---|
| 36 | + |
---|
24 | 37 | MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); |
---|
25 | 38 | |
---|
26 | | -static inline struct hclgevf_dev *hclgevf_ae_get_hdev( |
---|
27 | | - struct hnae3_handle *handle) |
---|
| 39 | +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, |
---|
| 40 | + HCLGEVF_CMDQ_TX_ADDR_H_REG, |
---|
| 41 | + HCLGEVF_CMDQ_TX_DEPTH_REG, |
---|
| 42 | + HCLGEVF_CMDQ_TX_TAIL_REG, |
---|
| 43 | + HCLGEVF_CMDQ_TX_HEAD_REG, |
---|
| 44 | + HCLGEVF_CMDQ_RX_ADDR_L_REG, |
---|
| 45 | + HCLGEVF_CMDQ_RX_ADDR_H_REG, |
---|
| 46 | + HCLGEVF_CMDQ_RX_DEPTH_REG, |
---|
| 47 | + HCLGEVF_CMDQ_RX_TAIL_REG, |
---|
| 48 | + HCLGEVF_CMDQ_RX_HEAD_REG, |
---|
| 49 | + HCLGEVF_VECTOR0_CMDQ_SRC_REG, |
---|
| 50 | + HCLGEVF_VECTOR0_CMDQ_STATE_REG, |
---|
| 51 | + HCLGEVF_CMDQ_INTR_EN_REG, |
---|
| 52 | + HCLGEVF_CMDQ_INTR_GEN_REG}; |
---|
| 53 | + |
---|
| 54 | +static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, |
---|
| 55 | + HCLGEVF_RST_ING, |
---|
| 56 | + HCLGEVF_GRO_EN_REG}; |
---|
| 57 | + |
---|
| 58 | +static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, |
---|
| 59 | + HCLGEVF_RING_RX_ADDR_H_REG, |
---|
| 60 | + HCLGEVF_RING_RX_BD_NUM_REG, |
---|
| 61 | + HCLGEVF_RING_RX_BD_LENGTH_REG, |
---|
| 62 | + HCLGEVF_RING_RX_MERGE_EN_REG, |
---|
| 63 | + HCLGEVF_RING_RX_TAIL_REG, |
---|
| 64 | + HCLGEVF_RING_RX_HEAD_REG, |
---|
| 65 | + HCLGEVF_RING_RX_FBD_NUM_REG, |
---|
| 66 | + HCLGEVF_RING_RX_OFFSET_REG, |
---|
| 67 | + HCLGEVF_RING_RX_FBD_OFFSET_REG, |
---|
| 68 | + HCLGEVF_RING_RX_STASH_REG, |
---|
| 69 | + HCLGEVF_RING_RX_BD_ERR_REG, |
---|
| 70 | + HCLGEVF_RING_TX_ADDR_L_REG, |
---|
| 71 | + HCLGEVF_RING_TX_ADDR_H_REG, |
---|
| 72 | + HCLGEVF_RING_TX_BD_NUM_REG, |
---|
| 73 | + HCLGEVF_RING_TX_PRIORITY_REG, |
---|
| 74 | + HCLGEVF_RING_TX_TC_REG, |
---|
| 75 | + HCLGEVF_RING_TX_MERGE_EN_REG, |
---|
| 76 | + HCLGEVF_RING_TX_TAIL_REG, |
---|
| 77 | + HCLGEVF_RING_TX_HEAD_REG, |
---|
| 78 | + HCLGEVF_RING_TX_FBD_NUM_REG, |
---|
| 79 | + HCLGEVF_RING_TX_OFFSET_REG, |
---|
| 80 | + HCLGEVF_RING_TX_EBD_NUM_REG, |
---|
| 81 | + HCLGEVF_RING_TX_EBD_OFFSET_REG, |
---|
| 82 | + HCLGEVF_RING_TX_BD_ERR_REG, |
---|
| 83 | + HCLGEVF_RING_EN_REG}; |
---|
| 84 | + |
---|
| 85 | +static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, |
---|
| 86 | + HCLGEVF_TQP_INTR_GL0_REG, |
---|
| 87 | + HCLGEVF_TQP_INTR_GL1_REG, |
---|
| 88 | + HCLGEVF_TQP_INTR_GL2_REG, |
---|
| 89 | + HCLGEVF_TQP_INTR_RL_REG}; |
---|
| 90 | + |
---|
| 91 | +static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) |
---|
28 | 92 | { |
---|
29 | 93 | if (!handle->client) |
---|
30 | 94 | return container_of(handle, struct hclgevf_dev, nic); |
---|
.. | .. |
---|
36 | 100 | |
---|
37 | 101 | static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) |
---|
38 | 102 | { |
---|
| 103 | + struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
39 | 104 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
40 | | - struct hnae3_queue *queue; |
---|
41 | 105 | struct hclgevf_desc desc; |
---|
42 | 106 | struct hclgevf_tqp *tqp; |
---|
43 | 107 | int status; |
---|
44 | 108 | int i; |
---|
45 | 109 | |
---|
46 | | - for (i = 0; i < hdev->num_tqps; i++) { |
---|
47 | | - queue = handle->kinfo.tqp[i]; |
---|
48 | | - tqp = container_of(queue, struct hclgevf_tqp, q); |
---|
| 110 | + for (i = 0; i < kinfo->num_tqps; i++) { |
---|
| 111 | + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); |
---|
49 | 112 | hclgevf_cmd_setup_basic_desc(&desc, |
---|
50 | 113 | HCLGEVF_OPC_QUERY_RX_STATUS, |
---|
51 | 114 | true); |
---|
.. | .. |
---|
82 | 145 | static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) |
---|
83 | 146 | { |
---|
84 | 147 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
85 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
86 | 148 | struct hclgevf_tqp *tqp; |
---|
87 | 149 | u64 *buff = data; |
---|
88 | 150 | int i; |
---|
89 | 151 | |
---|
90 | | - for (i = 0; i < hdev->num_tqps; i++) { |
---|
91 | | - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); |
---|
| 152 | + for (i = 0; i < kinfo->num_tqps; i++) { |
---|
| 153 | + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); |
---|
92 | 154 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
---|
93 | 155 | } |
---|
94 | 156 | for (i = 0; i < kinfo->num_tqps; i++) { |
---|
95 | | - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); |
---|
| 157 | + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); |
---|
96 | 158 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
---|
97 | 159 | } |
---|
98 | 160 | |
---|
.. | .. |
---|
101 | 163 | |
---|
102 | 164 | static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) |
---|
103 | 165 | { |
---|
104 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 166 | + struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
105 | 167 | |
---|
106 | | - return hdev->num_tqps * 2; |
---|
| 168 | + return kinfo->num_tqps * 2; |
---|
107 | 169 | } |
---|
108 | 170 | |
---|
109 | 171 | static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) |
---|
110 | 172 | { |
---|
111 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 173 | + struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
112 | 174 | u8 *buff = data; |
---|
113 | | - int i = 0; |
---|
| 175 | + int i; |
---|
114 | 176 | |
---|
115 | | - for (i = 0; i < hdev->num_tqps; i++) { |
---|
116 | | - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], |
---|
117 | | - struct hclgevf_tqp, q); |
---|
118 | | - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", |
---|
| 177 | + for (i = 0; i < kinfo->num_tqps; i++) { |
---|
| 178 | + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], |
---|
| 179 | + struct hclgevf_tqp, q); |
---|
| 180 | + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", |
---|
119 | 181 | tqp->index); |
---|
120 | 182 | buff += ETH_GSTRING_LEN; |
---|
121 | 183 | } |
---|
122 | 184 | |
---|
123 | | - for (i = 0; i < hdev->num_tqps; i++) { |
---|
124 | | - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], |
---|
125 | | - struct hclgevf_tqp, q); |
---|
126 | | - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", |
---|
| 185 | + for (i = 0; i < kinfo->num_tqps; i++) { |
---|
| 186 | + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], |
---|
| 187 | + struct hclgevf_tqp, q); |
---|
| 188 | + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", |
---|
127 | 189 | tqp->index); |
---|
128 | 190 | buff += ETH_GSTRING_LEN; |
---|
129 | 191 | } |
---|
.. | .. |
---|
168 | 230 | hclgevf_tqps_get_stats(handle, data); |
---|
169 | 231 | } |
---|
170 | 232 | |
---|
| 233 | +static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, |
---|
| 234 | + u8 subcode) |
---|
| 235 | +{ |
---|
| 236 | + if (msg) { |
---|
| 237 | + memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); |
---|
| 238 | + msg->code = code; |
---|
| 239 | + msg->subcode = subcode; |
---|
| 240 | + } |
---|
| 241 | +} |
---|
| 242 | + |
---|
171 | 243 | static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) |
---|
172 | 244 | { |
---|
| 245 | + struct hclge_vf_to_pf_msg send_msg; |
---|
173 | 246 | u8 resp_msg; |
---|
174 | 247 | int status; |
---|
175 | 248 | |
---|
176 | | - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, |
---|
177 | | - true, &resp_msg, sizeof(u8)); |
---|
| 249 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); |
---|
| 250 | + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, |
---|
| 251 | + sizeof(resp_msg)); |
---|
178 | 252 | if (status) { |
---|
179 | 253 | dev_err(&hdev->pdev->dev, |
---|
180 | 254 | "VF request to get TC info from PF failed %d", |
---|
.. | .. |
---|
187 | 261 | return 0; |
---|
188 | 262 | } |
---|
189 | 263 | |
---|
190 | | -static int hclge_get_queue_info(struct hclgevf_dev *hdev) |
---|
| 264 | +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) |
---|
191 | 265 | { |
---|
192 | | -#define HCLGEVF_TQPS_RSS_INFO_LEN 8 |
---|
| 266 | + struct hnae3_handle *nic = &hdev->nic; |
---|
| 267 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 268 | + u8 resp_msg; |
---|
| 269 | + int ret; |
---|
| 270 | + |
---|
| 271 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, |
---|
| 272 | + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); |
---|
| 273 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, |
---|
| 274 | + sizeof(u8)); |
---|
| 275 | + if (ret) { |
---|
| 276 | + dev_err(&hdev->pdev->dev, |
---|
| 277 | + "VF request to get port based vlan state failed %d", |
---|
| 278 | + ret); |
---|
| 279 | + return ret; |
---|
| 280 | + } |
---|
| 281 | + |
---|
| 282 | + nic->port_base_vlan_state = resp_msg; |
---|
| 283 | + |
---|
| 284 | + return 0; |
---|
| 285 | +} |
---|
| 286 | + |
---|
| 287 | +static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) |
---|
| 288 | +{ |
---|
| 289 | +#define HCLGEVF_TQPS_RSS_INFO_LEN 6 |
---|
| 290 | +#define HCLGEVF_TQPS_ALLOC_OFFSET 0 |
---|
| 291 | +#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 |
---|
| 292 | +#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 |
---|
| 293 | + |
---|
193 | 294 | u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; |
---|
| 295 | + struct hclge_vf_to_pf_msg send_msg; |
---|
194 | 296 | int status; |
---|
195 | 297 | |
---|
196 | | - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, |
---|
197 | | - true, resp_msg, |
---|
| 298 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); |
---|
| 299 | + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, |
---|
198 | 300 | HCLGEVF_TQPS_RSS_INFO_LEN); |
---|
199 | 301 | if (status) { |
---|
200 | 302 | dev_err(&hdev->pdev->dev, |
---|
.. | .. |
---|
203 | 305 | return status; |
---|
204 | 306 | } |
---|
205 | 307 | |
---|
206 | | - memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); |
---|
207 | | - memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); |
---|
208 | | - memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); |
---|
209 | | - memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); |
---|
| 308 | + memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], |
---|
| 309 | + sizeof(u16)); |
---|
| 310 | + memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], |
---|
| 311 | + sizeof(u16)); |
---|
| 312 | + memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], |
---|
| 313 | + sizeof(u16)); |
---|
| 314 | + |
---|
| 315 | + return 0; |
---|
| 316 | +} |
---|
| 317 | + |
---|
| 318 | +static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) |
---|
| 319 | +{ |
---|
| 320 | +#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 |
---|
| 321 | +#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 |
---|
| 322 | +#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 |
---|
| 323 | + |
---|
| 324 | + u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; |
---|
| 325 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 326 | + int ret; |
---|
| 327 | + |
---|
| 328 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); |
---|
| 329 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, |
---|
| 330 | + HCLGEVF_TQPS_DEPTH_INFO_LEN); |
---|
| 331 | + if (ret) { |
---|
| 332 | + dev_err(&hdev->pdev->dev, |
---|
| 333 | + "VF request to get tqp depth info from PF failed %d", |
---|
| 334 | + ret); |
---|
| 335 | + return ret; |
---|
| 336 | + } |
---|
| 337 | + |
---|
| 338 | + memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], |
---|
| 339 | + sizeof(u16)); |
---|
| 340 | + memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], |
---|
| 341 | + sizeof(u16)); |
---|
| 342 | + |
---|
| 343 | + return 0; |
---|
| 344 | +} |
---|
| 345 | + |
---|
| 346 | +static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) |
---|
| 347 | +{ |
---|
| 348 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 349 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 350 | + u16 qid_in_pf = 0; |
---|
| 351 | + u8 resp_data[2]; |
---|
| 352 | + int ret; |
---|
| 353 | + |
---|
| 354 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); |
---|
| 355 | + memcpy(send_msg.data, &queue_id, sizeof(queue_id)); |
---|
| 356 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, |
---|
| 357 | + sizeof(resp_data)); |
---|
| 358 | + if (!ret) |
---|
| 359 | + qid_in_pf = *(u16 *)resp_data; |
---|
| 360 | + |
---|
| 361 | + return qid_in_pf; |
---|
| 362 | +} |
---|
| 363 | + |
---|
| 364 | +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) |
---|
| 365 | +{ |
---|
| 366 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 367 | + u8 resp_msg[2]; |
---|
| 368 | + int ret; |
---|
| 369 | + |
---|
| 370 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); |
---|
| 371 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, |
---|
| 372 | + sizeof(resp_msg)); |
---|
| 373 | + if (ret) { |
---|
| 374 | + dev_err(&hdev->pdev->dev, |
---|
| 375 | + "VF request to get the pf port media type failed %d", |
---|
| 376 | + ret); |
---|
| 377 | + return ret; |
---|
| 378 | + } |
---|
| 379 | + |
---|
| 380 | + hdev->hw.mac.media_type = resp_msg[0]; |
---|
| 381 | + hdev->hw.mac.module_type = resp_msg[1]; |
---|
210 | 382 | |
---|
211 | 383 | return 0; |
---|
212 | 384 | } |
---|
.. | .. |
---|
215 | 387 | { |
---|
216 | 388 | struct hclgevf_tqp *tqp; |
---|
217 | 389 | int i; |
---|
218 | | - |
---|
219 | | - /* if this is on going reset then we need to re-allocate the TPQs |
---|
220 | | - * since we cannot assume we would get same number of TPQs back from PF |
---|
221 | | - */ |
---|
222 | | - if (hclgevf_dev_ongoing_reset(hdev)) |
---|
223 | | - devm_kfree(&hdev->pdev->dev, hdev->htqp); |
---|
224 | 390 | |
---|
225 | 391 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, |
---|
226 | 392 | sizeof(struct hclgevf_tqp), GFP_KERNEL); |
---|
.. | .. |
---|
235 | 401 | |
---|
236 | 402 | tqp->q.ae_algo = &ae_algovf; |
---|
237 | 403 | tqp->q.buf_size = hdev->rx_buf_len; |
---|
238 | | - tqp->q.desc_num = hdev->num_desc; |
---|
| 404 | + tqp->q.tx_desc_num = hdev->num_tx_desc; |
---|
| 405 | + tqp->q.rx_desc_num = hdev->num_rx_desc; |
---|
239 | 406 | tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + |
---|
240 | 407 | i * HCLGEVF_TQP_REG_SIZE; |
---|
241 | 408 | |
---|
.. | .. |
---|
250 | 417 | struct hnae3_handle *nic = &hdev->nic; |
---|
251 | 418 | struct hnae3_knic_private_info *kinfo; |
---|
252 | 419 | u16 new_tqps = hdev->num_tqps; |
---|
253 | | - int i; |
---|
| 420 | + unsigned int i; |
---|
254 | 421 | |
---|
255 | 422 | kinfo = &nic->kinfo; |
---|
256 | 423 | kinfo->num_tc = 0; |
---|
257 | | - kinfo->num_desc = hdev->num_desc; |
---|
| 424 | + kinfo->num_tx_desc = hdev->num_tx_desc; |
---|
| 425 | + kinfo->num_rx_desc = hdev->num_rx_desc; |
---|
258 | 426 | kinfo->rx_buf_len = hdev->rx_buf_len; |
---|
259 | 427 | for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) |
---|
260 | 428 | if (hdev->hw_tc_map & BIT(i)) |
---|
.. | .. |
---|
264 | 432 | = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); |
---|
265 | 433 | new_tqps = kinfo->rss_size * kinfo->num_tc; |
---|
266 | 434 | kinfo->num_tqps = min(new_tqps, hdev->num_tqps); |
---|
267 | | - |
---|
268 | | - /* if this is on going reset then we need to re-allocate the hnae queues |
---|
269 | | - * as well since number of TPQs from PF might have changed. |
---|
270 | | - */ |
---|
271 | | - if (hclgevf_dev_ongoing_reset(hdev)) |
---|
272 | | - devm_kfree(&hdev->pdev->dev, kinfo->tqp); |
---|
273 | 435 | |
---|
274 | 436 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, |
---|
275 | 437 | sizeof(struct hnae3_queue *), GFP_KERNEL); |
---|
.. | .. |
---|
282 | 444 | kinfo->tqp[i] = &hdev->htqp[i].q; |
---|
283 | 445 | } |
---|
284 | 446 | |
---|
| 447 | + /* after init the max rss_size and tqps, adjust the default tqp numbers |
---|
| 448 | + * and rss size with the actual vector numbers |
---|
| 449 | + */ |
---|
| 450 | + kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); |
---|
| 451 | + kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, |
---|
| 452 | + kinfo->rss_size); |
---|
| 453 | + |
---|
285 | 454 | return 0; |
---|
286 | 455 | } |
---|
287 | 456 | |
---|
288 | 457 | static void hclgevf_request_link_info(struct hclgevf_dev *hdev) |
---|
289 | 458 | { |
---|
| 459 | + struct hclge_vf_to_pf_msg send_msg; |
---|
290 | 460 | int status; |
---|
291 | | - u8 resp_msg; |
---|
292 | 461 | |
---|
293 | | - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, |
---|
294 | | - 0, false, &resp_msg, sizeof(u8)); |
---|
| 462 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); |
---|
| 463 | + status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
295 | 464 | if (status) |
---|
296 | 465 | dev_err(&hdev->pdev->dev, |
---|
297 | 466 | "VF failed to fetch link status(%d) from PF", status); |
---|
.. | .. |
---|
299 | 468 | |
---|
300 | 469 | void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) |
---|
301 | 470 | { |
---|
| 471 | + struct hnae3_handle *rhandle = &hdev->roce; |
---|
302 | 472 | struct hnae3_handle *handle = &hdev->nic; |
---|
| 473 | + struct hnae3_client *rclient; |
---|
303 | 474 | struct hnae3_client *client; |
---|
304 | 475 | |
---|
| 476 | + if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) |
---|
| 477 | + return; |
---|
| 478 | + |
---|
305 | 479 | client = handle->client; |
---|
| 480 | + rclient = hdev->roce_client; |
---|
306 | 481 | |
---|
307 | 482 | link_state = |
---|
308 | 483 | test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; |
---|
309 | 484 | |
---|
310 | 485 | if (link_state != hdev->hw.mac.link) { |
---|
311 | 486 | client->ops->link_status_change(handle, !!link_state); |
---|
| 487 | + if (rclient && rclient->ops->link_status_change) |
---|
| 488 | + rclient->ops->link_status_change(rhandle, !!link_state); |
---|
312 | 489 | hdev->hw.mac.link = link_state; |
---|
313 | 490 | } |
---|
| 491 | + |
---|
| 492 | + clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); |
---|
| 493 | +} |
---|
| 494 | + |
---|
| 495 | +static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) |
---|
| 496 | +{ |
---|
| 497 | +#define HCLGEVF_ADVERTISING 0 |
---|
| 498 | +#define HCLGEVF_SUPPORTED 1 |
---|
| 499 | + |
---|
| 500 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 501 | + |
---|
| 502 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); |
---|
| 503 | + send_msg.data[0] = HCLGEVF_ADVERTISING; |
---|
| 504 | + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 505 | + send_msg.data[0] = HCLGEVF_SUPPORTED; |
---|
| 506 | + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
314 | 507 | } |
---|
315 | 508 | |
---|
316 | 509 | static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) |
---|
.. | .. |
---|
322 | 515 | nic->pdev = hdev->pdev; |
---|
323 | 516 | nic->numa_node_mask = hdev->numa_node_mask; |
---|
324 | 517 | nic->flags |= HNAE3_SUPPORT_VF; |
---|
325 | | - |
---|
326 | | - if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { |
---|
327 | | - dev_err(&hdev->pdev->dev, "unsupported device type %d\n", |
---|
328 | | - hdev->ae_dev->dev_type); |
---|
329 | | - return -EINVAL; |
---|
330 | | - } |
---|
331 | 518 | |
---|
332 | 519 | ret = hclgevf_knic_setup(hdev); |
---|
333 | 520 | if (ret) |
---|
.. | .. |
---|
357 | 544 | int alloc = 0; |
---|
358 | 545 | int i, j; |
---|
359 | 546 | |
---|
| 547 | + vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); |
---|
360 | 548 | vector_num = min(hdev->num_msi_left, vector_num); |
---|
361 | 549 | |
---|
362 | 550 | for (j = 0; j < vector_num; j++) { |
---|
.. | .. |
---|
391 | 579 | return i; |
---|
392 | 580 | |
---|
393 | 581 | return -EINVAL; |
---|
| 582 | +} |
---|
| 583 | + |
---|
| 584 | +static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, |
---|
| 585 | + const u8 hfunc, const u8 *key) |
---|
| 586 | +{ |
---|
| 587 | + struct hclgevf_rss_config_cmd *req; |
---|
| 588 | + unsigned int key_offset = 0; |
---|
| 589 | + struct hclgevf_desc desc; |
---|
| 590 | + int key_counts; |
---|
| 591 | + int key_size; |
---|
| 592 | + int ret; |
---|
| 593 | + |
---|
| 594 | + key_counts = HCLGEVF_RSS_KEY_SIZE; |
---|
| 595 | + req = (struct hclgevf_rss_config_cmd *)desc.data; |
---|
| 596 | + |
---|
| 597 | + while (key_counts) { |
---|
| 598 | + hclgevf_cmd_setup_basic_desc(&desc, |
---|
| 599 | + HCLGEVF_OPC_RSS_GENERIC_CONFIG, |
---|
| 600 | + false); |
---|
| 601 | + |
---|
| 602 | + req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); |
---|
| 603 | + req->hash_config |= |
---|
| 604 | + (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); |
---|
| 605 | + |
---|
| 606 | + key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); |
---|
| 607 | + memcpy(req->hash_key, |
---|
| 608 | + key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); |
---|
| 609 | + |
---|
| 610 | + key_counts -= key_size; |
---|
| 611 | + key_offset++; |
---|
| 612 | + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
| 613 | + if (ret) { |
---|
| 614 | + dev_err(&hdev->pdev->dev, |
---|
| 615 | + "Configure RSS config fail, status = %d\n", |
---|
| 616 | + ret); |
---|
| 617 | + return ret; |
---|
| 618 | + } |
---|
| 619 | + } |
---|
| 620 | + |
---|
| 621 | + return 0; |
---|
394 | 622 | } |
---|
395 | 623 | |
---|
396 | 624 | static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) |
---|
.. | .. |
---|
442 | 670 | u16 tc_size[HCLGEVF_MAX_TC_NUM]; |
---|
443 | 671 | struct hclgevf_desc desc; |
---|
444 | 672 | u16 roundup_size; |
---|
| 673 | + unsigned int i; |
---|
445 | 674 | int status; |
---|
446 | | - int i; |
---|
447 | 675 | |
---|
448 | 676 | req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; |
---|
449 | 677 | |
---|
.. | .. |
---|
473 | 701 | return status; |
---|
474 | 702 | } |
---|
475 | 703 | |
---|
476 | | -static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, |
---|
477 | | - u8 *key) |
---|
| 704 | +/* for revision 0x20, vf shared the same rss config with pf */ |
---|
| 705 | +static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) |
---|
478 | 706 | { |
---|
479 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
480 | | - struct hclgevf_rss_config_cmd *req; |
---|
481 | | - int lkup_times = key ? 3 : 1; |
---|
482 | | - struct hclgevf_desc desc; |
---|
483 | | - int key_offset; |
---|
484 | | - int key_size; |
---|
485 | | - int status; |
---|
| 707 | +#define HCLGEVF_RSS_MBX_RESP_LEN 8 |
---|
| 708 | + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
| 709 | + u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; |
---|
| 710 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 711 | + u16 msg_num, hash_key_index; |
---|
| 712 | + u8 index; |
---|
| 713 | + int ret; |
---|
486 | 714 | |
---|
487 | | - req = (struct hclgevf_rss_config_cmd *)desc.data; |
---|
488 | | - lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); |
---|
489 | | - |
---|
490 | | - for (key_offset = 0; key_offset < lkup_times; key_offset++) { |
---|
491 | | - hclgevf_cmd_setup_basic_desc(&desc, |
---|
492 | | - HCLGEVF_OPC_RSS_GENERIC_CONFIG, |
---|
493 | | - true); |
---|
494 | | - req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); |
---|
495 | | - |
---|
496 | | - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
497 | | - if (status) { |
---|
| 715 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); |
---|
| 716 | + msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / |
---|
| 717 | + HCLGEVF_RSS_MBX_RESP_LEN; |
---|
| 718 | + for (index = 0; index < msg_num; index++) { |
---|
| 719 | + send_msg.data[0] = index; |
---|
| 720 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, |
---|
| 721 | + HCLGEVF_RSS_MBX_RESP_LEN); |
---|
| 722 | + if (ret) { |
---|
498 | 723 | dev_err(&hdev->pdev->dev, |
---|
499 | | - "failed to get hardware RSS cfg, status = %d\n", |
---|
500 | | - status); |
---|
501 | | - return status; |
---|
| 724 | + "VF get rss hash key from PF failed, ret=%d", |
---|
| 725 | + ret); |
---|
| 726 | + return ret; |
---|
502 | 727 | } |
---|
503 | 728 | |
---|
504 | | - if (key_offset == 2) |
---|
505 | | - key_size = |
---|
506 | | - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; |
---|
| 729 | + hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; |
---|
| 730 | + if (index == msg_num - 1) |
---|
| 731 | + memcpy(&rss_cfg->rss_hash_key[hash_key_index], |
---|
| 732 | + &resp_msg[0], |
---|
| 733 | + HCLGEVF_RSS_KEY_SIZE - hash_key_index); |
---|
507 | 734 | else |
---|
508 | | - key_size = HCLGEVF_RSS_HASH_KEY_NUM; |
---|
509 | | - |
---|
510 | | - if (key) |
---|
511 | | - memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, |
---|
512 | | - req->hash_key, |
---|
513 | | - key_size); |
---|
514 | | - } |
---|
515 | | - |
---|
516 | | - if (hash) { |
---|
517 | | - if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) |
---|
518 | | - *hash = ETH_RSS_HASH_TOP; |
---|
519 | | - else |
---|
520 | | - *hash = ETH_RSS_HASH_UNKNOWN; |
---|
| 735 | + memcpy(&rss_cfg->rss_hash_key[hash_key_index], |
---|
| 736 | + &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); |
---|
521 | 737 | } |
---|
522 | 738 | |
---|
523 | 739 | return 0; |
---|
.. | .. |
---|
528 | 744 | { |
---|
529 | 745 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
530 | 746 | struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
531 | | - int i; |
---|
| 747 | + int i, ret; |
---|
| 748 | + |
---|
| 749 | + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
---|
| 750 | + /* Get hash algorithm */ |
---|
| 751 | + if (hfunc) { |
---|
| 752 | + switch (rss_cfg->hash_algo) { |
---|
| 753 | + case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: |
---|
| 754 | + *hfunc = ETH_RSS_HASH_TOP; |
---|
| 755 | + break; |
---|
| 756 | + case HCLGEVF_RSS_HASH_ALGO_SIMPLE: |
---|
| 757 | + *hfunc = ETH_RSS_HASH_XOR; |
---|
| 758 | + break; |
---|
| 759 | + default: |
---|
| 760 | + *hfunc = ETH_RSS_HASH_UNKNOWN; |
---|
| 761 | + break; |
---|
| 762 | + } |
---|
| 763 | + } |
---|
| 764 | + |
---|
| 765 | + /* Get the RSS Key required by the user */ |
---|
| 766 | + if (key) |
---|
| 767 | + memcpy(key, rss_cfg->rss_hash_key, |
---|
| 768 | + HCLGEVF_RSS_KEY_SIZE); |
---|
| 769 | + } else { |
---|
| 770 | + if (hfunc) |
---|
| 771 | + *hfunc = ETH_RSS_HASH_TOP; |
---|
| 772 | + if (key) { |
---|
| 773 | + ret = hclgevf_get_rss_hash_key(hdev); |
---|
| 774 | + if (ret) |
---|
| 775 | + return ret; |
---|
| 776 | + memcpy(key, rss_cfg->rss_hash_key, |
---|
| 777 | + HCLGEVF_RSS_KEY_SIZE); |
---|
| 778 | + } |
---|
| 779 | + } |
---|
532 | 780 | |
---|
533 | 781 | if (indir) |
---|
534 | 782 | for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) |
---|
535 | 783 | indir[i] = rss_cfg->rss_indirection_tbl[i]; |
---|
536 | 784 | |
---|
537 | | - return hclgevf_get_rss_hw_cfg(handle, hfunc, key); |
---|
| 785 | + return 0; |
---|
| 786 | +} |
---|
| 787 | + |
---|
| 788 | +static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc, |
---|
| 789 | + u8 *hash_algo) |
---|
| 790 | +{ |
---|
| 791 | + switch (hfunc) { |
---|
| 792 | + case ETH_RSS_HASH_TOP: |
---|
| 793 | + *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; |
---|
| 794 | + return 0; |
---|
| 795 | + case ETH_RSS_HASH_XOR: |
---|
| 796 | + *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; |
---|
| 797 | + return 0; |
---|
| 798 | + case ETH_RSS_HASH_NO_CHANGE: |
---|
| 799 | + *hash_algo = hdev->rss_cfg.hash_algo; |
---|
| 800 | + return 0; |
---|
| 801 | + default: |
---|
| 802 | + return -EINVAL; |
---|
| 803 | + } |
---|
538 | 804 | } |
---|
539 | 805 | |
---|
540 | 806 | static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, |
---|
541 | | - const u8 *key, const u8 hfunc) |
---|
| 807 | + const u8 *key, const u8 hfunc) |
---|
542 | 808 | { |
---|
543 | 809 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
544 | 810 | struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
545 | | - int i; |
---|
| 811 | + u8 hash_algo; |
---|
| 812 | + int ret, i; |
---|
| 813 | + |
---|
| 814 | + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
---|
| 815 | + ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo); |
---|
| 816 | + if (ret) |
---|
| 817 | + return ret; |
---|
| 818 | + |
---|
| 819 | + /* Set the RSS Hash Key if specififed by the user */ |
---|
| 820 | + if (key) { |
---|
| 821 | + ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); |
---|
| 822 | + if (ret) { |
---|
| 823 | + dev_err(&hdev->pdev->dev, |
---|
| 824 | + "invalid hfunc type %u\n", hfunc); |
---|
| 825 | + return ret; |
---|
| 826 | + } |
---|
| 827 | + |
---|
| 828 | + /* Update the shadow RSS key with user specified qids */ |
---|
| 829 | + memcpy(rss_cfg->rss_hash_key, key, |
---|
| 830 | + HCLGEVF_RSS_KEY_SIZE); |
---|
| 831 | + } else { |
---|
| 832 | + ret = hclgevf_set_rss_algo_key(hdev, hash_algo, |
---|
| 833 | + rss_cfg->rss_hash_key); |
---|
| 834 | + if (ret) |
---|
| 835 | + return ret; |
---|
| 836 | + } |
---|
| 837 | + rss_cfg->hash_algo = hash_algo; |
---|
| 838 | + } |
---|
546 | 839 | |
---|
547 | 840 | /* update the shadow RSS table with user specified qids */ |
---|
548 | 841 | for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) |
---|
.. | .. |
---|
550 | 843 | |
---|
551 | 844 | /* update the hardware */ |
---|
552 | 845 | return hclgevf_set_rss_indir_table(hdev); |
---|
| 846 | +} |
---|
| 847 | + |
---|
| 848 | +static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
---|
| 849 | +{ |
---|
| 850 | + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; |
---|
| 851 | + |
---|
| 852 | + if (nfc->data & RXH_L4_B_2_3) |
---|
| 853 | + hash_sets |= HCLGEVF_D_PORT_BIT; |
---|
| 854 | + else |
---|
| 855 | + hash_sets &= ~HCLGEVF_D_PORT_BIT; |
---|
| 856 | + |
---|
| 857 | + if (nfc->data & RXH_IP_SRC) |
---|
| 858 | + hash_sets |= HCLGEVF_S_IP_BIT; |
---|
| 859 | + else |
---|
| 860 | + hash_sets &= ~HCLGEVF_S_IP_BIT; |
---|
| 861 | + |
---|
| 862 | + if (nfc->data & RXH_IP_DST) |
---|
| 863 | + hash_sets |= HCLGEVF_D_IP_BIT; |
---|
| 864 | + else |
---|
| 865 | + hash_sets &= ~HCLGEVF_D_IP_BIT; |
---|
| 866 | + |
---|
| 867 | + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) |
---|
| 868 | + hash_sets |= HCLGEVF_V_TAG_BIT; |
---|
| 869 | + |
---|
| 870 | + return hash_sets; |
---|
| 871 | +} |
---|
| 872 | + |
---|
| 873 | +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, |
---|
| 874 | + struct ethtool_rxnfc *nfc) |
---|
| 875 | +{ |
---|
| 876 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 877 | + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
| 878 | + struct hclgevf_rss_input_tuple_cmd *req; |
---|
| 879 | + struct hclgevf_desc desc; |
---|
| 880 | + u8 tuple_sets; |
---|
| 881 | + int ret; |
---|
| 882 | + |
---|
| 883 | + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
---|
| 884 | + return -EOPNOTSUPP; |
---|
| 885 | + |
---|
| 886 | + if (nfc->data & |
---|
| 887 | + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) |
---|
| 888 | + return -EINVAL; |
---|
| 889 | + |
---|
| 890 | + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; |
---|
| 891 | + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); |
---|
| 892 | + |
---|
| 893 | + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; |
---|
| 894 | + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; |
---|
| 895 | + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; |
---|
| 896 | + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; |
---|
| 897 | + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; |
---|
| 898 | + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; |
---|
| 899 | + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; |
---|
| 900 | + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; |
---|
| 901 | + |
---|
| 902 | + tuple_sets = hclgevf_get_rss_hash_bits(nfc); |
---|
| 903 | + switch (nfc->flow_type) { |
---|
| 904 | + case TCP_V4_FLOW: |
---|
| 905 | + req->ipv4_tcp_en = tuple_sets; |
---|
| 906 | + break; |
---|
| 907 | + case TCP_V6_FLOW: |
---|
| 908 | + req->ipv6_tcp_en = tuple_sets; |
---|
| 909 | + break; |
---|
| 910 | + case UDP_V4_FLOW: |
---|
| 911 | + req->ipv4_udp_en = tuple_sets; |
---|
| 912 | + break; |
---|
| 913 | + case UDP_V6_FLOW: |
---|
| 914 | + req->ipv6_udp_en = tuple_sets; |
---|
| 915 | + break; |
---|
| 916 | + case SCTP_V4_FLOW: |
---|
| 917 | + req->ipv4_sctp_en = tuple_sets; |
---|
| 918 | + break; |
---|
| 919 | + case SCTP_V6_FLOW: |
---|
| 920 | + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && |
---|
| 921 | + (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) |
---|
| 922 | + return -EINVAL; |
---|
| 923 | + |
---|
| 924 | + req->ipv6_sctp_en = tuple_sets; |
---|
| 925 | + break; |
---|
| 926 | + case IPV4_FLOW: |
---|
| 927 | + req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 928 | + break; |
---|
| 929 | + case IPV6_FLOW: |
---|
| 930 | + req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 931 | + break; |
---|
| 932 | + default: |
---|
| 933 | + return -EINVAL; |
---|
| 934 | + } |
---|
| 935 | + |
---|
| 936 | + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
| 937 | + if (ret) { |
---|
| 938 | + dev_err(&hdev->pdev->dev, |
---|
| 939 | + "Set rss tuple fail, status = %d\n", ret); |
---|
| 940 | + return ret; |
---|
| 941 | + } |
---|
| 942 | + |
---|
| 943 | + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; |
---|
| 944 | + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; |
---|
| 945 | + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; |
---|
| 946 | + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; |
---|
| 947 | + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; |
---|
| 948 | + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; |
---|
| 949 | + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; |
---|
| 950 | + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; |
---|
| 951 | + return 0; |
---|
| 952 | +} |
---|
| 953 | + |
---|
| 954 | +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, |
---|
| 955 | + struct ethtool_rxnfc *nfc) |
---|
| 956 | +{ |
---|
| 957 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 958 | + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
| 959 | + u8 tuple_sets; |
---|
| 960 | + |
---|
| 961 | + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
---|
| 962 | + return -EOPNOTSUPP; |
---|
| 963 | + |
---|
| 964 | + nfc->data = 0; |
---|
| 965 | + |
---|
| 966 | + switch (nfc->flow_type) { |
---|
| 967 | + case TCP_V4_FLOW: |
---|
| 968 | + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; |
---|
| 969 | + break; |
---|
| 970 | + case UDP_V4_FLOW: |
---|
| 971 | + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; |
---|
| 972 | + break; |
---|
| 973 | + case TCP_V6_FLOW: |
---|
| 974 | + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; |
---|
| 975 | + break; |
---|
| 976 | + case UDP_V6_FLOW: |
---|
| 977 | + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; |
---|
| 978 | + break; |
---|
| 979 | + case SCTP_V4_FLOW: |
---|
| 980 | + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; |
---|
| 981 | + break; |
---|
| 982 | + case SCTP_V6_FLOW: |
---|
| 983 | + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; |
---|
| 984 | + break; |
---|
| 985 | + case IPV4_FLOW: |
---|
| 986 | + case IPV6_FLOW: |
---|
| 987 | + tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; |
---|
| 988 | + break; |
---|
| 989 | + default: |
---|
| 990 | + return -EINVAL; |
---|
| 991 | + } |
---|
| 992 | + |
---|
| 993 | + if (!tuple_sets) |
---|
| 994 | + return 0; |
---|
| 995 | + |
---|
| 996 | + if (tuple_sets & HCLGEVF_D_PORT_BIT) |
---|
| 997 | + nfc->data |= RXH_L4_B_2_3; |
---|
| 998 | + if (tuple_sets & HCLGEVF_S_PORT_BIT) |
---|
| 999 | + nfc->data |= RXH_L4_B_0_1; |
---|
| 1000 | + if (tuple_sets & HCLGEVF_D_IP_BIT) |
---|
| 1001 | + nfc->data |= RXH_IP_DST; |
---|
| 1002 | + if (tuple_sets & HCLGEVF_S_IP_BIT) |
---|
| 1003 | + nfc->data |= RXH_IP_SRC; |
---|
| 1004 | + |
---|
| 1005 | + return 0; |
---|
| 1006 | +} |
---|
| 1007 | + |
---|
| 1008 | +static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, |
---|
| 1009 | + struct hclgevf_rss_cfg *rss_cfg) |
---|
| 1010 | +{ |
---|
| 1011 | + struct hclgevf_rss_input_tuple_cmd *req; |
---|
| 1012 | + struct hclgevf_desc desc; |
---|
| 1013 | + int ret; |
---|
| 1014 | + |
---|
| 1015 | + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); |
---|
| 1016 | + |
---|
| 1017 | + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; |
---|
| 1018 | + |
---|
| 1019 | + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; |
---|
| 1020 | + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; |
---|
| 1021 | + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; |
---|
| 1022 | + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; |
---|
| 1023 | + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; |
---|
| 1024 | + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; |
---|
| 1025 | + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; |
---|
| 1026 | + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; |
---|
| 1027 | + |
---|
| 1028 | + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
| 1029 | + if (ret) |
---|
| 1030 | + dev_err(&hdev->pdev->dev, |
---|
| 1031 | + "Configure rss input fail, status = %d\n", ret); |
---|
| 1032 | + return ret; |
---|
553 | 1033 | } |
---|
554 | 1034 | |
---|
555 | 1035 | static int hclgevf_get_tc_size(struct hnae3_handle *handle) |
---|
.. | .. |
---|
565 | 1045 | struct hnae3_ring_chain_node *ring_chain) |
---|
566 | 1046 | { |
---|
567 | 1047 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1048 | + struct hclge_vf_to_pf_msg send_msg; |
---|
568 | 1049 | struct hnae3_ring_chain_node *node; |
---|
569 | | - struct hclge_mbx_vf_to_pf_cmd *req; |
---|
570 | | - struct hclgevf_desc desc; |
---|
571 | | - int i = 0; |
---|
572 | 1050 | int status; |
---|
573 | | - u8 type; |
---|
| 1051 | + int i = 0; |
---|
574 | 1052 | |
---|
575 | | - req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; |
---|
| 1053 | + memset(&send_msg, 0, sizeof(send_msg)); |
---|
| 1054 | + send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : |
---|
| 1055 | + HCLGE_MBX_UNMAP_RING_TO_VECTOR; |
---|
| 1056 | + send_msg.vector_id = vector_id; |
---|
576 | 1057 | |
---|
577 | 1058 | for (node = ring_chain; node; node = node->next) { |
---|
578 | | - int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + |
---|
579 | | - HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; |
---|
580 | | - |
---|
581 | | - if (i == 0) { |
---|
582 | | - hclgevf_cmd_setup_basic_desc(&desc, |
---|
583 | | - HCLGEVF_OPC_MBX_VF_TO_PF, |
---|
584 | | - false); |
---|
585 | | - type = en ? |
---|
586 | | - HCLGE_MBX_MAP_RING_TO_VECTOR : |
---|
587 | | - HCLGE_MBX_UNMAP_RING_TO_VECTOR; |
---|
588 | | - req->msg[0] = type; |
---|
589 | | - req->msg[1] = vector_id; |
---|
590 | | - } |
---|
591 | | - |
---|
592 | | - req->msg[idx_offset] = |
---|
| 1059 | + send_msg.param[i].ring_type = |
---|
593 | 1060 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); |
---|
594 | | - req->msg[idx_offset + 1] = node->tqp_index; |
---|
595 | | - req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, |
---|
596 | | - HNAE3_RING_GL_IDX_M, |
---|
597 | | - HNAE3_RING_GL_IDX_S); |
---|
| 1061 | + |
---|
| 1062 | + send_msg.param[i].tqp_index = node->tqp_index; |
---|
| 1063 | + send_msg.param[i].int_gl_index = |
---|
| 1064 | + hnae3_get_field(node->int_gl_idx, |
---|
| 1065 | + HNAE3_RING_GL_IDX_M, |
---|
| 1066 | + HNAE3_RING_GL_IDX_S); |
---|
598 | 1067 | |
---|
599 | 1068 | i++; |
---|
600 | | - if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - |
---|
601 | | - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / |
---|
602 | | - HCLGE_MBX_RING_NODE_VARIABLE_NUM) || |
---|
603 | | - !node->next) { |
---|
604 | | - req->msg[2] = i; |
---|
| 1069 | + if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { |
---|
| 1070 | + send_msg.ring_num = i; |
---|
605 | 1071 | |
---|
606 | | - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
| 1072 | + status = hclgevf_send_mbx_msg(hdev, &send_msg, false, |
---|
| 1073 | + NULL, 0); |
---|
607 | 1074 | if (status) { |
---|
608 | 1075 | dev_err(&hdev->pdev->dev, |
---|
609 | 1076 | "Map TQP fail, status is %d.\n", |
---|
.. | .. |
---|
611 | 1078 | return status; |
---|
612 | 1079 | } |
---|
613 | 1080 | i = 0; |
---|
614 | | - hclgevf_cmd_setup_basic_desc(&desc, |
---|
615 | | - HCLGEVF_OPC_MBX_VF_TO_PF, |
---|
616 | | - false); |
---|
617 | | - req->msg[0] = type; |
---|
618 | | - req->msg[1] = vector_id; |
---|
619 | 1081 | } |
---|
620 | 1082 | } |
---|
621 | 1083 | |
---|
.. | .. |
---|
645 | 1107 | { |
---|
646 | 1108 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
647 | 1109 | int ret, vector_id; |
---|
| 1110 | + |
---|
| 1111 | + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) |
---|
| 1112 | + return 0; |
---|
648 | 1113 | |
---|
649 | 1114 | vector_id = hclgevf_get_vector_index(hdev, vector); |
---|
650 | 1115 | if (vector_id < 0) { |
---|
.. | .. |
---|
682 | 1147 | } |
---|
683 | 1148 | |
---|
684 | 1149 | static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, |
---|
685 | | - bool en_uc_pmc, bool en_mc_pmc) |
---|
| 1150 | + bool en_uc_pmc, bool en_mc_pmc, |
---|
| 1151 | + bool en_bc_pmc) |
---|
686 | 1152 | { |
---|
687 | | - struct hclge_mbx_vf_to_pf_cmd *req; |
---|
688 | | - struct hclgevf_desc desc; |
---|
689 | | - int status; |
---|
| 1153 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 1154 | + int ret; |
---|
690 | 1155 | |
---|
691 | | - req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; |
---|
| 1156 | + memset(&send_msg, 0, sizeof(send_msg)); |
---|
| 1157 | + send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; |
---|
| 1158 | + send_msg.en_bc = en_bc_pmc ? 1 : 0; |
---|
| 1159 | + send_msg.en_uc = en_uc_pmc ? 1 : 0; |
---|
| 1160 | + send_msg.en_mc = en_mc_pmc ? 1 : 0; |
---|
692 | 1161 | |
---|
693 | | - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); |
---|
694 | | - req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; |
---|
695 | | - req->msg[1] = en_uc_pmc ? 1 : 0; |
---|
696 | | - req->msg[2] = en_mc_pmc ? 1 : 0; |
---|
697 | | - |
---|
698 | | - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
699 | | - if (status) |
---|
| 1162 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 1163 | + if (ret) |
---|
700 | 1164 | dev_err(&hdev->pdev->dev, |
---|
701 | | - "Set promisc mode fail, status is %d.\n", status); |
---|
| 1165 | + "Set promisc mode fail, status is %d.\n", ret); |
---|
702 | 1166 | |
---|
703 | | - return status; |
---|
| 1167 | + return ret; |
---|
704 | 1168 | } |
---|
705 | 1169 | |
---|
706 | | -static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, |
---|
707 | | - bool en_uc_pmc, bool en_mc_pmc) |
---|
| 1170 | +static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
---|
| 1171 | + bool en_mc_pmc) |
---|
| 1172 | +{ |
---|
| 1173 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1174 | + bool en_bc_pmc; |
---|
| 1175 | + |
---|
| 1176 | + en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; |
---|
| 1177 | + |
---|
| 1178 | + return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, |
---|
| 1179 | + en_bc_pmc); |
---|
| 1180 | +} |
---|
| 1181 | + |
---|
| 1182 | +static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) |
---|
708 | 1183 | { |
---|
709 | 1184 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
710 | 1185 | |
---|
711 | | - hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); |
---|
| 1186 | + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); |
---|
712 | 1187 | } |
---|
713 | 1188 | |
---|
714 | | -static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, |
---|
| 1189 | +static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) |
---|
| 1190 | +{ |
---|
| 1191 | + struct hnae3_handle *handle = &hdev->nic; |
---|
| 1192 | + bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; |
---|
| 1193 | + bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; |
---|
| 1194 | + int ret; |
---|
| 1195 | + |
---|
| 1196 | + if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { |
---|
| 1197 | + ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); |
---|
| 1198 | + if (!ret) |
---|
| 1199 | + clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); |
---|
| 1200 | + } |
---|
| 1201 | +} |
---|
| 1202 | + |
---|
| 1203 | +static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, |
---|
715 | 1204 | int stream_id, bool enable) |
---|
716 | 1205 | { |
---|
717 | 1206 | struct hclgevf_cfg_com_tqp_queue_cmd *req; |
---|
.. | .. |
---|
724 | 1213 | false); |
---|
725 | 1214 | req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); |
---|
726 | 1215 | req->stream_id = cpu_to_le16(stream_id); |
---|
727 | | - req->enable |= enable << HCLGEVF_TQP_ENABLE_B; |
---|
| 1216 | + if (enable) |
---|
| 1217 | + req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; |
---|
728 | 1218 | |
---|
729 | 1219 | status = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
730 | 1220 | if (status) |
---|
.. | .. |
---|
734 | 1224 | return status; |
---|
735 | 1225 | } |
---|
736 | 1226 | |
---|
737 | | -static int hclgevf_get_queue_id(struct hnae3_queue *queue) |
---|
738 | | -{ |
---|
739 | | - struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); |
---|
740 | | - |
---|
741 | | - return tqp->index; |
---|
742 | | -} |
---|
743 | | - |
---|
744 | 1227 | static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) |
---|
745 | 1228 | { |
---|
746 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
747 | | - struct hnae3_queue *queue; |
---|
| 1229 | + struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
748 | 1230 | struct hclgevf_tqp *tqp; |
---|
749 | 1231 | int i; |
---|
750 | 1232 | |
---|
751 | | - for (i = 0; i < hdev->num_tqps; i++) { |
---|
752 | | - queue = handle->kinfo.tqp[i]; |
---|
753 | | - tqp = container_of(queue, struct hclgevf_tqp, q); |
---|
| 1233 | + for (i = 0; i < kinfo->num_tqps; i++) { |
---|
| 1234 | + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); |
---|
754 | 1235 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); |
---|
755 | 1236 | } |
---|
756 | 1237 | } |
---|
757 | 1238 | |
---|
758 | | -static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) |
---|
| 1239 | +static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) |
---|
759 | 1240 | { |
---|
760 | | - u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; |
---|
761 | | - int ret; |
---|
| 1241 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 1242 | + u8 host_mac[ETH_ALEN]; |
---|
| 1243 | + int status; |
---|
762 | 1244 | |
---|
763 | | - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, |
---|
764 | | - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, |
---|
765 | | - NULL, 0, true, &resp_msg, sizeof(u8)); |
---|
766 | | - |
---|
767 | | - if (ret) { |
---|
| 1245 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); |
---|
| 1246 | + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, |
---|
| 1247 | + ETH_ALEN); |
---|
| 1248 | + if (status) { |
---|
768 | 1249 | dev_err(&hdev->pdev->dev, |
---|
769 | | - "Read mta type fail, ret=%d.\n", ret); |
---|
770 | | - return ret; |
---|
| 1250 | + "fail to get VF MAC from host %d", status); |
---|
| 1251 | + return status; |
---|
771 | 1252 | } |
---|
772 | 1253 | |
---|
773 | | - if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { |
---|
774 | | - dev_err(&hdev->pdev->dev, |
---|
775 | | - "Read mta type invalid, resp=%d.\n", resp_msg); |
---|
776 | | - return -EINVAL; |
---|
777 | | - } |
---|
778 | | - |
---|
779 | | - hdev->mta_mac_sel_type = resp_msg; |
---|
| 1254 | + ether_addr_copy(p, host_mac); |
---|
780 | 1255 | |
---|
781 | 1256 | return 0; |
---|
782 | | -} |
---|
783 | | - |
---|
784 | | -static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, |
---|
785 | | - const u8 *addr) |
---|
786 | | -{ |
---|
787 | | - u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; |
---|
788 | | - u16 high_val = addr[1] | (addr[0] << 8); |
---|
789 | | - |
---|
790 | | - return (high_val >> rsh) & 0xfff; |
---|
791 | | -} |
---|
792 | | - |
---|
793 | | -static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, |
---|
794 | | - unsigned long *status) |
---|
795 | | -{ |
---|
796 | | -#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 |
---|
797 | | -#define HCLGEVF_MTA_STATUS_MSG_BITS \ |
---|
798 | | - (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) |
---|
799 | | -#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ |
---|
800 | | - (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) |
---|
801 | | - u16 tbl_cnt; |
---|
802 | | - u16 tbl_idx; |
---|
803 | | - u8 msg_cnt; |
---|
804 | | - u8 msg_idx; |
---|
805 | | - int ret; |
---|
806 | | - |
---|
807 | | - msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, |
---|
808 | | - HCLGEVF_MTA_STATUS_MSG_BITS); |
---|
809 | | - tbl_idx = 0; |
---|
810 | | - msg_idx = 0; |
---|
811 | | - while (msg_cnt--) { |
---|
812 | | - u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; |
---|
813 | | - u8 *p = &msg[1]; |
---|
814 | | - u8 msg_ofs; |
---|
815 | | - u8 msg_bit; |
---|
816 | | - |
---|
817 | | - memset(msg, 0, sizeof(msg)); |
---|
818 | | - |
---|
819 | | - /* set index field */ |
---|
820 | | - msg[0] = 0x7F & msg_idx; |
---|
821 | | - |
---|
822 | | - /* set end flag field */ |
---|
823 | | - if (msg_cnt == 0) { |
---|
824 | | - msg[0] |= 0x80; |
---|
825 | | - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; |
---|
826 | | - } else { |
---|
827 | | - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; |
---|
828 | | - } |
---|
829 | | - |
---|
830 | | - /* set status field */ |
---|
831 | | - msg_ofs = 0; |
---|
832 | | - msg_bit = 0; |
---|
833 | | - while (tbl_cnt--) { |
---|
834 | | - if (test_bit(tbl_idx, status)) |
---|
835 | | - p[msg_ofs] |= BIT(msg_bit); |
---|
836 | | - |
---|
837 | | - tbl_idx++; |
---|
838 | | - |
---|
839 | | - msg_bit++; |
---|
840 | | - if (msg_bit == BITS_PER_BYTE) { |
---|
841 | | - msg_bit = 0; |
---|
842 | | - msg_ofs++; |
---|
843 | | - } |
---|
844 | | - } |
---|
845 | | - |
---|
846 | | - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, |
---|
847 | | - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, |
---|
848 | | - msg, sizeof(msg), false, NULL, 0); |
---|
849 | | - if (ret) |
---|
850 | | - break; |
---|
851 | | - |
---|
852 | | - msg_idx++; |
---|
853 | | - } |
---|
854 | | - |
---|
855 | | - return ret; |
---|
856 | | -} |
---|
857 | | - |
---|
858 | | -static int hclgevf_update_mta_status(struct hnae3_handle *handle) |
---|
859 | | -{ |
---|
860 | | - unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; |
---|
861 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
862 | | - struct net_device *netdev = hdev->nic.kinfo.netdev; |
---|
863 | | - struct netdev_hw_addr *ha; |
---|
864 | | - u16 tbl_idx; |
---|
865 | | - |
---|
866 | | - /* clear status */ |
---|
867 | | - memset(mta_status, 0, sizeof(mta_status)); |
---|
868 | | - |
---|
869 | | - /* update status from mc addr list */ |
---|
870 | | - netdev_for_each_mc_addr(ha, netdev) { |
---|
871 | | - tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); |
---|
872 | | - set_bit(tbl_idx, mta_status); |
---|
873 | | - } |
---|
874 | | - |
---|
875 | | - return hclgevf_do_update_mta_status(hdev, mta_status); |
---|
876 | 1257 | } |
---|
877 | 1258 | |
---|
878 | 1259 | static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
---|
879 | 1260 | { |
---|
880 | 1261 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1262 | + u8 host_mac_addr[ETH_ALEN]; |
---|
881 | 1263 | |
---|
882 | | - ether_addr_copy(p, hdev->hw.mac.mac_addr); |
---|
| 1264 | + if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) |
---|
| 1265 | + return; |
---|
| 1266 | + |
---|
| 1267 | + hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); |
---|
| 1268 | + if (hdev->has_pf_mac) |
---|
| 1269 | + ether_addr_copy(p, host_mac_addr); |
---|
| 1270 | + else |
---|
| 1271 | + ether_addr_copy(p, hdev->hw.mac.mac_addr); |
---|
883 | 1272 | } |
---|
884 | 1273 | |
---|
885 | 1274 | static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, |
---|
.. | .. |
---|
887 | 1276 | { |
---|
888 | 1277 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
889 | 1278 | u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; |
---|
| 1279 | + struct hclge_vf_to_pf_msg send_msg; |
---|
890 | 1280 | u8 *new_mac_addr = (u8 *)p; |
---|
891 | | - u8 msg_data[ETH_ALEN * 2]; |
---|
892 | | - u16 subcode; |
---|
893 | 1281 | int status; |
---|
894 | 1282 | |
---|
895 | | - ether_addr_copy(msg_data, new_mac_addr); |
---|
896 | | - ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); |
---|
897 | | - |
---|
898 | | - subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : |
---|
899 | | - HCLGE_MBX_MAC_VLAN_UC_MODIFY; |
---|
900 | | - |
---|
901 | | - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, |
---|
902 | | - subcode, msg_data, ETH_ALEN * 2, |
---|
903 | | - true, NULL, 0); |
---|
| 1283 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); |
---|
| 1284 | + send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; |
---|
| 1285 | + ether_addr_copy(send_msg.data, new_mac_addr); |
---|
| 1286 | + if (is_first && !hdev->has_pf_mac) |
---|
| 1287 | + eth_zero_addr(&send_msg.data[ETH_ALEN]); |
---|
| 1288 | + else |
---|
| 1289 | + ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); |
---|
| 1290 | + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); |
---|
904 | 1291 | if (!status) |
---|
905 | 1292 | ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); |
---|
906 | 1293 | |
---|
907 | 1294 | return status; |
---|
908 | 1295 | } |
---|
909 | 1296 | |
---|
| 1297 | +static struct hclgevf_mac_addr_node * |
---|
| 1298 | +hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) |
---|
| 1299 | +{ |
---|
| 1300 | + struct hclgevf_mac_addr_node *mac_node, *tmp; |
---|
| 1301 | + |
---|
| 1302 | + list_for_each_entry_safe(mac_node, tmp, list, node) |
---|
| 1303 | + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) |
---|
| 1304 | + return mac_node; |
---|
| 1305 | + |
---|
| 1306 | + return NULL; |
---|
| 1307 | +} |
---|
| 1308 | + |
---|
| 1309 | +static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, |
---|
| 1310 | + enum HCLGEVF_MAC_NODE_STATE state) |
---|
| 1311 | +{ |
---|
| 1312 | + switch (state) { |
---|
| 1313 | + /* from set_rx_mode or tmp_add_list */ |
---|
| 1314 | + case HCLGEVF_MAC_TO_ADD: |
---|
| 1315 | + if (mac_node->state == HCLGEVF_MAC_TO_DEL) |
---|
| 1316 | + mac_node->state = HCLGEVF_MAC_ACTIVE; |
---|
| 1317 | + break; |
---|
| 1318 | + /* only from set_rx_mode */ |
---|
| 1319 | + case HCLGEVF_MAC_TO_DEL: |
---|
| 1320 | + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { |
---|
| 1321 | + list_del(&mac_node->node); |
---|
| 1322 | + kfree(mac_node); |
---|
| 1323 | + } else { |
---|
| 1324 | + mac_node->state = HCLGEVF_MAC_TO_DEL; |
---|
| 1325 | + } |
---|
| 1326 | + break; |
---|
| 1327 | + /* only from tmp_add_list, the mac_node->state won't be |
---|
| 1328 | + * HCLGEVF_MAC_ACTIVE |
---|
| 1329 | + */ |
---|
| 1330 | + case HCLGEVF_MAC_ACTIVE: |
---|
| 1331 | + if (mac_node->state == HCLGEVF_MAC_TO_ADD) |
---|
| 1332 | + mac_node->state = HCLGEVF_MAC_ACTIVE; |
---|
| 1333 | + break; |
---|
| 1334 | + } |
---|
| 1335 | +} |
---|
| 1336 | + |
---|
| 1337 | +static int hclgevf_update_mac_list(struct hnae3_handle *handle, |
---|
| 1338 | + enum HCLGEVF_MAC_NODE_STATE state, |
---|
| 1339 | + enum HCLGEVF_MAC_ADDR_TYPE mac_type, |
---|
| 1340 | + const unsigned char *addr) |
---|
| 1341 | +{ |
---|
| 1342 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1343 | + struct hclgevf_mac_addr_node *mac_node; |
---|
| 1344 | + struct list_head *list; |
---|
| 1345 | + |
---|
| 1346 | + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? |
---|
| 1347 | + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; |
---|
| 1348 | + |
---|
| 1349 | + spin_lock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1350 | + |
---|
| 1351 | + /* if the mac addr is already in the mac list, no need to add a new |
---|
| 1352 | + * one into it, just check the mac addr state, convert it to a new |
---|
| 1353 | + * new state, or just remove it, or do nothing. |
---|
| 1354 | + */ |
---|
| 1355 | + mac_node = hclgevf_find_mac_node(list, addr); |
---|
| 1356 | + if (mac_node) { |
---|
| 1357 | + hclgevf_update_mac_node(mac_node, state); |
---|
| 1358 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1359 | + return 0; |
---|
| 1360 | + } |
---|
| 1361 | + /* if this address is never added, unnecessary to delete */ |
---|
| 1362 | + if (state == HCLGEVF_MAC_TO_DEL) { |
---|
| 1363 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1364 | + return -ENOENT; |
---|
| 1365 | + } |
---|
| 1366 | + |
---|
| 1367 | + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); |
---|
| 1368 | + if (!mac_node) { |
---|
| 1369 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1370 | + return -ENOMEM; |
---|
| 1371 | + } |
---|
| 1372 | + |
---|
| 1373 | + mac_node->state = state; |
---|
| 1374 | + ether_addr_copy(mac_node->mac_addr, addr); |
---|
| 1375 | + list_add_tail(&mac_node->node, list); |
---|
| 1376 | + |
---|
| 1377 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1378 | + return 0; |
---|
| 1379 | +} |
---|
| 1380 | + |
---|
910 | 1381 | static int hclgevf_add_uc_addr(struct hnae3_handle *handle, |
---|
911 | 1382 | const unsigned char *addr) |
---|
912 | 1383 | { |
---|
913 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
914 | | - |
---|
915 | | - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, |
---|
916 | | - HCLGE_MBX_MAC_VLAN_UC_ADD, |
---|
917 | | - addr, ETH_ALEN, false, NULL, 0); |
---|
| 1384 | + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, |
---|
| 1385 | + HCLGEVF_MAC_ADDR_UC, addr); |
---|
918 | 1386 | } |
---|
919 | 1387 | |
---|
920 | 1388 | static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, |
---|
921 | 1389 | const unsigned char *addr) |
---|
922 | 1390 | { |
---|
923 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
924 | | - |
---|
925 | | - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, |
---|
926 | | - HCLGE_MBX_MAC_VLAN_UC_REMOVE, |
---|
927 | | - addr, ETH_ALEN, false, NULL, 0); |
---|
| 1391 | + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, |
---|
| 1392 | + HCLGEVF_MAC_ADDR_UC, addr); |
---|
928 | 1393 | } |
---|
929 | 1394 | |
---|
930 | 1395 | static int hclgevf_add_mc_addr(struct hnae3_handle *handle, |
---|
931 | 1396 | const unsigned char *addr) |
---|
932 | 1397 | { |
---|
933 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
934 | | - |
---|
935 | | - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, |
---|
936 | | - HCLGE_MBX_MAC_VLAN_MC_ADD, |
---|
937 | | - addr, ETH_ALEN, false, NULL, 0); |
---|
| 1398 | + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, |
---|
| 1399 | + HCLGEVF_MAC_ADDR_MC, addr); |
---|
938 | 1400 | } |
---|
939 | 1401 | |
---|
940 | 1402 | static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, |
---|
941 | 1403 | const unsigned char *addr) |
---|
942 | 1404 | { |
---|
943 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1405 | + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, |
---|
| 1406 | + HCLGEVF_MAC_ADDR_MC, addr); |
---|
| 1407 | +} |
---|
944 | 1408 | |
---|
945 | | - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, |
---|
946 | | - HCLGE_MBX_MAC_VLAN_MC_REMOVE, |
---|
947 | | - addr, ETH_ALEN, false, NULL, 0); |
---|
| 1409 | +static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, |
---|
| 1410 | + struct hclgevf_mac_addr_node *mac_node, |
---|
| 1411 | + enum HCLGEVF_MAC_ADDR_TYPE mac_type) |
---|
| 1412 | +{ |
---|
| 1413 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 1414 | + u8 code, subcode; |
---|
| 1415 | + |
---|
| 1416 | + if (mac_type == HCLGEVF_MAC_ADDR_UC) { |
---|
| 1417 | + code = HCLGE_MBX_SET_UNICAST; |
---|
| 1418 | + if (mac_node->state == HCLGEVF_MAC_TO_ADD) |
---|
| 1419 | + subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; |
---|
| 1420 | + else |
---|
| 1421 | + subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; |
---|
| 1422 | + } else { |
---|
| 1423 | + code = HCLGE_MBX_SET_MULTICAST; |
---|
| 1424 | + if (mac_node->state == HCLGEVF_MAC_TO_ADD) |
---|
| 1425 | + subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; |
---|
| 1426 | + else |
---|
| 1427 | + subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; |
---|
| 1428 | + } |
---|
| 1429 | + |
---|
| 1430 | + hclgevf_build_send_msg(&send_msg, code, subcode); |
---|
| 1431 | + ether_addr_copy(send_msg.data, mac_node->mac_addr); |
---|
| 1432 | + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 1433 | +} |
---|
| 1434 | + |
---|
| 1435 | +static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, |
---|
| 1436 | + struct list_head *list, |
---|
| 1437 | + enum HCLGEVF_MAC_ADDR_TYPE mac_type) |
---|
| 1438 | +{ |
---|
| 1439 | + struct hclgevf_mac_addr_node *mac_node, *tmp; |
---|
| 1440 | + int ret; |
---|
| 1441 | + |
---|
| 1442 | + list_for_each_entry_safe(mac_node, tmp, list, node) { |
---|
| 1443 | + ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); |
---|
| 1444 | + if (ret) { |
---|
| 1445 | + dev_err(&hdev->pdev->dev, |
---|
| 1446 | + "failed to configure mac %pM, state = %d, ret = %d\n", |
---|
| 1447 | + mac_node->mac_addr, mac_node->state, ret); |
---|
| 1448 | + return; |
---|
| 1449 | + } |
---|
| 1450 | + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { |
---|
| 1451 | + mac_node->state = HCLGEVF_MAC_ACTIVE; |
---|
| 1452 | + } else { |
---|
| 1453 | + list_del(&mac_node->node); |
---|
| 1454 | + kfree(mac_node); |
---|
| 1455 | + } |
---|
| 1456 | + } |
---|
| 1457 | +} |
---|
| 1458 | + |
---|
| 1459 | +static void hclgevf_sync_from_add_list(struct list_head *add_list, |
---|
| 1460 | + struct list_head *mac_list) |
---|
| 1461 | +{ |
---|
| 1462 | + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; |
---|
| 1463 | + |
---|
| 1464 | + list_for_each_entry_safe(mac_node, tmp, add_list, node) { |
---|
| 1465 | + /* if the mac address from tmp_add_list is not in the |
---|
| 1466 | + * uc/mc_mac_list, it means have received a TO_DEL request |
---|
| 1467 | + * during the time window of sending mac config request to PF |
---|
| 1468 | + * If mac_node state is ACTIVE, then change its state to TO_DEL, |
---|
| 1469 | + * then it will be removed at next time. If is TO_ADD, it means |
---|
| 1470 | + * send TO_ADD request failed, so just remove the mac node. |
---|
| 1471 | + */ |
---|
| 1472 | + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); |
---|
| 1473 | + if (new_node) { |
---|
| 1474 | + hclgevf_update_mac_node(new_node, mac_node->state); |
---|
| 1475 | + list_del(&mac_node->node); |
---|
| 1476 | + kfree(mac_node); |
---|
| 1477 | + } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { |
---|
| 1478 | + mac_node->state = HCLGEVF_MAC_TO_DEL; |
---|
| 1479 | + list_del(&mac_node->node); |
---|
| 1480 | + list_add_tail(&mac_node->node, mac_list); |
---|
| 1481 | + } else { |
---|
| 1482 | + list_del(&mac_node->node); |
---|
| 1483 | + kfree(mac_node); |
---|
| 1484 | + } |
---|
| 1485 | + } |
---|
| 1486 | +} |
---|
| 1487 | + |
---|
| 1488 | +static void hclgevf_sync_from_del_list(struct list_head *del_list, |
---|
| 1489 | + struct list_head *mac_list) |
---|
| 1490 | +{ |
---|
| 1491 | + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; |
---|
| 1492 | + |
---|
| 1493 | + list_for_each_entry_safe(mac_node, tmp, del_list, node) { |
---|
| 1494 | + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); |
---|
| 1495 | + if (new_node) { |
---|
| 1496 | + /* If the mac addr is exist in the mac list, it means |
---|
| 1497 | + * received a new request TO_ADD during the time window |
---|
| 1498 | + * of sending mac addr configurrequest to PF, so just |
---|
| 1499 | + * change the mac state to ACTIVE. |
---|
| 1500 | + */ |
---|
| 1501 | + new_node->state = HCLGEVF_MAC_ACTIVE; |
---|
| 1502 | + list_del(&mac_node->node); |
---|
| 1503 | + kfree(mac_node); |
---|
| 1504 | + } else { |
---|
| 1505 | + list_del(&mac_node->node); |
---|
| 1506 | + list_add_tail(&mac_node->node, mac_list); |
---|
| 1507 | + } |
---|
| 1508 | + } |
---|
| 1509 | +} |
---|
| 1510 | + |
---|
| 1511 | +static void hclgevf_clear_list(struct list_head *list) |
---|
| 1512 | +{ |
---|
| 1513 | + struct hclgevf_mac_addr_node *mac_node, *tmp; |
---|
| 1514 | + |
---|
| 1515 | + list_for_each_entry_safe(mac_node, tmp, list, node) { |
---|
| 1516 | + list_del(&mac_node->node); |
---|
| 1517 | + kfree(mac_node); |
---|
| 1518 | + } |
---|
| 1519 | +} |
---|
| 1520 | + |
---|
| 1521 | +static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, |
---|
| 1522 | + enum HCLGEVF_MAC_ADDR_TYPE mac_type) |
---|
| 1523 | +{ |
---|
| 1524 | + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; |
---|
| 1525 | + struct list_head tmp_add_list, tmp_del_list; |
---|
| 1526 | + struct list_head *list; |
---|
| 1527 | + |
---|
| 1528 | + INIT_LIST_HEAD(&tmp_add_list); |
---|
| 1529 | + INIT_LIST_HEAD(&tmp_del_list); |
---|
| 1530 | + |
---|
| 1531 | + /* move the mac addr to the tmp_add_list and tmp_del_list, then |
---|
| 1532 | + * we can add/delete these mac addr outside the spin lock |
---|
| 1533 | + */ |
---|
| 1534 | + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? |
---|
| 1535 | + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; |
---|
| 1536 | + |
---|
| 1537 | + spin_lock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1538 | + |
---|
| 1539 | + list_for_each_entry_safe(mac_node, tmp, list, node) { |
---|
| 1540 | + switch (mac_node->state) { |
---|
| 1541 | + case HCLGEVF_MAC_TO_DEL: |
---|
| 1542 | + list_del(&mac_node->node); |
---|
| 1543 | + list_add_tail(&mac_node->node, &tmp_del_list); |
---|
| 1544 | + break; |
---|
| 1545 | + case HCLGEVF_MAC_TO_ADD: |
---|
| 1546 | + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); |
---|
| 1547 | + if (!new_node) |
---|
| 1548 | + goto stop_traverse; |
---|
| 1549 | + |
---|
| 1550 | + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); |
---|
| 1551 | + new_node->state = mac_node->state; |
---|
| 1552 | + list_add_tail(&new_node->node, &tmp_add_list); |
---|
| 1553 | + break; |
---|
| 1554 | + default: |
---|
| 1555 | + break; |
---|
| 1556 | + } |
---|
| 1557 | + } |
---|
| 1558 | + |
---|
| 1559 | +stop_traverse: |
---|
| 1560 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1561 | + |
---|
| 1562 | + /* delete first, in order to get max mac table space for adding */ |
---|
| 1563 | + hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); |
---|
| 1564 | + hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); |
---|
| 1565 | + |
---|
| 1566 | + /* if some mac addresses were added/deleted fail, move back to the |
---|
| 1567 | + * mac_list, and retry at next time. |
---|
| 1568 | + */ |
---|
| 1569 | + spin_lock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1570 | + |
---|
| 1571 | + hclgevf_sync_from_del_list(&tmp_del_list, list); |
---|
| 1572 | + hclgevf_sync_from_add_list(&tmp_add_list, list); |
---|
| 1573 | + |
---|
| 1574 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1575 | +} |
---|
| 1576 | + |
---|
| 1577 | +static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) |
---|
| 1578 | +{ |
---|
| 1579 | + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); |
---|
| 1580 | + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); |
---|
| 1581 | +} |
---|
| 1582 | + |
---|
| 1583 | +static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) |
---|
| 1584 | +{ |
---|
| 1585 | + spin_lock_bh(&hdev->mac_table.mac_list_lock); |
---|
| 1586 | + |
---|
| 1587 | + hclgevf_clear_list(&hdev->mac_table.uc_mac_list); |
---|
| 1588 | + hclgevf_clear_list(&hdev->mac_table.mc_mac_list); |
---|
| 1589 | + |
---|
| 1590 | + spin_unlock_bh(&hdev->mac_table.mac_list_lock); |
---|
948 | 1591 | } |
---|
949 | 1592 | |
---|
950 | 1593 | static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, |
---|
951 | 1594 | __be16 proto, u16 vlan_id, |
---|
952 | 1595 | bool is_kill) |
---|
953 | 1596 | { |
---|
954 | | -#define HCLGEVF_VLAN_MBX_MSG_LEN 5 |
---|
955 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
956 | | - u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; |
---|
| 1597 | +#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 |
---|
| 1598 | +#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 |
---|
| 1599 | +#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 |
---|
957 | 1600 | |
---|
958 | | - if (vlan_id > 4095) |
---|
| 1601 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1602 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 1603 | + int ret; |
---|
| 1604 | + |
---|
| 1605 | + if (vlan_id > HCLGEVF_MAX_VLAN_ID) |
---|
959 | 1606 | return -EINVAL; |
---|
960 | 1607 | |
---|
961 | 1608 | if (proto != htons(ETH_P_8021Q)) |
---|
962 | 1609 | return -EPROTONOSUPPORT; |
---|
963 | 1610 | |
---|
964 | | - msg_data[0] = is_kill; |
---|
965 | | - memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); |
---|
966 | | - memcpy(&msg_data[3], &proto, sizeof(proto)); |
---|
967 | | - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, |
---|
968 | | - HCLGE_MBX_VLAN_FILTER, msg_data, |
---|
969 | | - HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); |
---|
| 1611 | + /* When device is resetting or reset failed, firmware is unable to |
---|
| 1612 | + * handle mailbox. Just record the vlan id, and remove it after |
---|
| 1613 | + * reset finished. |
---|
| 1614 | + */ |
---|
| 1615 | + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || |
---|
| 1616 | + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { |
---|
| 1617 | + set_bit(vlan_id, hdev->vlan_del_fail_bmap); |
---|
| 1618 | + return -EBUSY; |
---|
| 1619 | + } |
---|
| 1620 | + |
---|
| 1621 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, |
---|
| 1622 | + HCLGE_MBX_VLAN_FILTER); |
---|
| 1623 | + send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; |
---|
| 1624 | + memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, |
---|
| 1625 | + sizeof(vlan_id)); |
---|
| 1626 | + memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, |
---|
| 1627 | + sizeof(proto)); |
---|
| 1628 | + /* when remove hw vlan filter failed, record the vlan id, |
---|
| 1629 | + * and try to remove it from hw later, to be consistence |
---|
| 1630 | + * with stack. |
---|
| 1631 | + */ |
---|
| 1632 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); |
---|
| 1633 | + if (is_kill && ret) |
---|
| 1634 | + set_bit(vlan_id, hdev->vlan_del_fail_bmap); |
---|
| 1635 | + |
---|
| 1636 | + return ret; |
---|
| 1637 | +} |
---|
| 1638 | + |
---|
| 1639 | +static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) |
---|
| 1640 | +{ |
---|
| 1641 | +#define HCLGEVF_MAX_SYNC_COUNT 60 |
---|
| 1642 | + struct hnae3_handle *handle = &hdev->nic; |
---|
| 1643 | + int ret, sync_cnt = 0; |
---|
| 1644 | + u16 vlan_id; |
---|
| 1645 | + |
---|
| 1646 | + vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); |
---|
| 1647 | + while (vlan_id != VLAN_N_VID) { |
---|
| 1648 | + ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), |
---|
| 1649 | + vlan_id, true); |
---|
| 1650 | + if (ret) |
---|
| 1651 | + return; |
---|
| 1652 | + |
---|
| 1653 | + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); |
---|
| 1654 | + sync_cnt++; |
---|
| 1655 | + if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) |
---|
| 1656 | + return; |
---|
| 1657 | + |
---|
| 1658 | + vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); |
---|
| 1659 | + } |
---|
970 | 1660 | } |
---|
971 | 1661 | |
---|
972 | 1662 | static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
---|
973 | 1663 | { |
---|
974 | 1664 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
975 | | - u8 msg_data; |
---|
| 1665 | + struct hclge_vf_to_pf_msg send_msg; |
---|
976 | 1666 | |
---|
977 | | - msg_data = enable ? 1 : 0; |
---|
978 | | - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, |
---|
979 | | - HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, |
---|
980 | | - 1, false, NULL, 0); |
---|
| 1667 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, |
---|
| 1668 | + HCLGE_MBX_VLAN_RX_OFF_CFG); |
---|
| 1669 | + send_msg.data[0] = enable ? 1 : 0; |
---|
| 1670 | + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
981 | 1671 | } |
---|
982 | 1672 | |
---|
983 | | -static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
---|
| 1673 | +static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
---|
984 | 1674 | { |
---|
985 | 1675 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
986 | | - u8 msg_data[2]; |
---|
| 1676 | + struct hclge_vf_to_pf_msg send_msg; |
---|
987 | 1677 | int ret; |
---|
988 | | - |
---|
989 | | - memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); |
---|
990 | 1678 | |
---|
991 | 1679 | /* disable vf queue before send queue reset msg to PF */ |
---|
992 | 1680 | ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); |
---|
993 | 1681 | if (ret) |
---|
994 | | - return; |
---|
| 1682 | + return ret; |
---|
995 | 1683 | |
---|
996 | | - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, |
---|
997 | | - 2, true, NULL, 0); |
---|
| 1684 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); |
---|
| 1685 | + memcpy(send_msg.data, &queue_id, sizeof(queue_id)); |
---|
| 1686 | + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); |
---|
| 1687 | +} |
---|
| 1688 | + |
---|
| 1689 | +static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) |
---|
| 1690 | +{ |
---|
| 1691 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 1692 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 1693 | + |
---|
| 1694 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); |
---|
| 1695 | + memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); |
---|
| 1696 | + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); |
---|
998 | 1697 | } |
---|
999 | 1698 | |
---|
1000 | 1699 | static int hclgevf_notify_client(struct hclgevf_dev *hdev, |
---|
.. | .. |
---|
1002 | 1701 | { |
---|
1003 | 1702 | struct hnae3_client *client = hdev->nic_client; |
---|
1004 | 1703 | struct hnae3_handle *handle = &hdev->nic; |
---|
| 1704 | + int ret; |
---|
| 1705 | + |
---|
| 1706 | + if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || |
---|
| 1707 | + !client) |
---|
| 1708 | + return 0; |
---|
1005 | 1709 | |
---|
1006 | 1710 | if (!client->ops->reset_notify) |
---|
1007 | 1711 | return -EOPNOTSUPP; |
---|
1008 | 1712 | |
---|
1009 | | - return client->ops->reset_notify(handle, type); |
---|
| 1713 | + ret = client->ops->reset_notify(handle, type); |
---|
| 1714 | + if (ret) |
---|
| 1715 | + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", |
---|
| 1716 | + type, ret); |
---|
| 1717 | + |
---|
| 1718 | + return ret; |
---|
| 1719 | +} |
---|
| 1720 | + |
---|
| 1721 | +static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, |
---|
| 1722 | + enum hnae3_reset_notify_type type) |
---|
| 1723 | +{ |
---|
| 1724 | + struct hnae3_client *client = hdev->roce_client; |
---|
| 1725 | + struct hnae3_handle *handle = &hdev->roce; |
---|
| 1726 | + int ret; |
---|
| 1727 | + |
---|
| 1728 | + if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) |
---|
| 1729 | + return 0; |
---|
| 1730 | + |
---|
| 1731 | + if (!client->ops->reset_notify) |
---|
| 1732 | + return -EOPNOTSUPP; |
---|
| 1733 | + |
---|
| 1734 | + ret = client->ops->reset_notify(handle, type); |
---|
| 1735 | + if (ret) |
---|
| 1736 | + dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", |
---|
| 1737 | + type, ret); |
---|
| 1738 | + return ret; |
---|
1010 | 1739 | } |
---|
1011 | 1740 | |
---|
1012 | 1741 | static int hclgevf_reset_wait(struct hclgevf_dev *hdev) |
---|
1013 | 1742 | { |
---|
1014 | | -#define HCLGEVF_RESET_WAIT_MS 500 |
---|
1015 | | -#define HCLGEVF_RESET_WAIT_CNT 20 |
---|
1016 | | - u32 val, cnt = 0; |
---|
| 1743 | +#define HCLGEVF_RESET_WAIT_US 20000 |
---|
| 1744 | +#define HCLGEVF_RESET_WAIT_CNT 2000 |
---|
| 1745 | +#define HCLGEVF_RESET_WAIT_TIMEOUT_US \ |
---|
| 1746 | + (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) |
---|
1017 | 1747 | |
---|
1018 | | - /* wait to check the hardware reset completion status */ |
---|
1019 | | - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); |
---|
1020 | | - while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && |
---|
1021 | | - (cnt < HCLGEVF_RESET_WAIT_CNT)) { |
---|
1022 | | - msleep(HCLGEVF_RESET_WAIT_MS); |
---|
1023 | | - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); |
---|
1024 | | - cnt++; |
---|
1025 | | - } |
---|
| 1748 | + u32 val; |
---|
| 1749 | + int ret; |
---|
| 1750 | + |
---|
| 1751 | + if (hdev->reset_type == HNAE3_VF_RESET) |
---|
| 1752 | + ret = readl_poll_timeout(hdev->hw.io_base + |
---|
| 1753 | + HCLGEVF_VF_RST_ING, val, |
---|
| 1754 | + !(val & HCLGEVF_VF_RST_ING_BIT), |
---|
| 1755 | + HCLGEVF_RESET_WAIT_US, |
---|
| 1756 | + HCLGEVF_RESET_WAIT_TIMEOUT_US); |
---|
| 1757 | + else |
---|
| 1758 | + ret = readl_poll_timeout(hdev->hw.io_base + |
---|
| 1759 | + HCLGEVF_RST_ING, val, |
---|
| 1760 | + !(val & HCLGEVF_RST_ING_BITS), |
---|
| 1761 | + HCLGEVF_RESET_WAIT_US, |
---|
| 1762 | + HCLGEVF_RESET_WAIT_TIMEOUT_US); |
---|
1026 | 1763 | |
---|
1027 | 1764 | /* hardware completion status should be available by this time */ |
---|
1028 | | - if (cnt >= HCLGEVF_RESET_WAIT_CNT) { |
---|
1029 | | - dev_warn(&hdev->pdev->dev, |
---|
1030 | | - "could'nt get reset done status from h/w, timeout!\n"); |
---|
1031 | | - return -EBUSY; |
---|
| 1765 | + if (ret) { |
---|
| 1766 | + dev_err(&hdev->pdev->dev, |
---|
| 1767 | + "couldn't get reset done status from h/w, timeout!\n"); |
---|
| 1768 | + return ret; |
---|
1032 | 1769 | } |
---|
1033 | 1770 | |
---|
1034 | 1771 | /* we will wait a bit more to let reset of the stack to complete. This |
---|
1035 | 1772 | * might happen in case reset assertion was made by PF. Yes, this also |
---|
1036 | 1773 | * means we might end up waiting bit more even for VF reset. |
---|
1037 | 1774 | */ |
---|
1038 | | - msleep(5000); |
---|
| 1775 | + if (hdev->reset_type == HNAE3_VF_FULL_RESET) |
---|
| 1776 | + msleep(5000); |
---|
| 1777 | + else |
---|
| 1778 | + msleep(500); |
---|
1039 | 1779 | |
---|
1040 | 1780 | return 0; |
---|
| 1781 | +} |
---|
| 1782 | + |
---|
| 1783 | +static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) |
---|
| 1784 | +{ |
---|
| 1785 | + u32 reg_val; |
---|
| 1786 | + |
---|
| 1787 | + reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); |
---|
| 1788 | + if (enable) |
---|
| 1789 | + reg_val |= HCLGEVF_NIC_SW_RST_RDY; |
---|
| 1790 | + else |
---|
| 1791 | + reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; |
---|
| 1792 | + |
---|
| 1793 | + hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, |
---|
| 1794 | + reg_val); |
---|
1041 | 1795 | } |
---|
1042 | 1796 | |
---|
1043 | 1797 | static int hclgevf_reset_stack(struct hclgevf_dev *hdev) |
---|
.. | .. |
---|
1045 | 1799 | int ret; |
---|
1046 | 1800 | |
---|
1047 | 1801 | /* uninitialize the nic client */ |
---|
1048 | | - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); |
---|
| 1802 | + ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); |
---|
| 1803 | + if (ret) |
---|
| 1804 | + return ret; |
---|
1049 | 1805 | |
---|
1050 | 1806 | /* re-initialize the hclge device */ |
---|
1051 | | - ret = hclgevf_init_hdev(hdev); |
---|
| 1807 | + ret = hclgevf_reset_hdev(hdev); |
---|
1052 | 1808 | if (ret) { |
---|
1053 | 1809 | dev_err(&hdev->pdev->dev, |
---|
1054 | 1810 | "hclge device re-init failed, VF is disabled!\n"); |
---|
.. | .. |
---|
1056 | 1812 | } |
---|
1057 | 1813 | |
---|
1058 | 1814 | /* bring up the nic client again */ |
---|
1059 | | - hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); |
---|
| 1815 | + ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); |
---|
| 1816 | + if (ret) |
---|
| 1817 | + return ret; |
---|
| 1818 | + |
---|
| 1819 | + /* clear handshake status with IMP */ |
---|
| 1820 | + hclgevf_reset_handshake(hdev, false); |
---|
| 1821 | + |
---|
| 1822 | + /* bring up the nic to enable TX/RX again */ |
---|
| 1823 | + return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); |
---|
| 1824 | +} |
---|
| 1825 | + |
---|
| 1826 | +static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) |
---|
| 1827 | +{ |
---|
| 1828 | +#define HCLGEVF_RESET_SYNC_TIME 100 |
---|
| 1829 | + |
---|
| 1830 | + if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { |
---|
| 1831 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 1832 | + int ret; |
---|
| 1833 | + |
---|
| 1834 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); |
---|
| 1835 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); |
---|
| 1836 | + if (ret) { |
---|
| 1837 | + dev_err(&hdev->pdev->dev, |
---|
| 1838 | + "failed to assert VF reset, ret = %d\n", ret); |
---|
| 1839 | + return ret; |
---|
| 1840 | + } |
---|
| 1841 | + hdev->rst_stats.vf_func_rst_cnt++; |
---|
| 1842 | + } |
---|
| 1843 | + |
---|
| 1844 | + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); |
---|
| 1845 | + /* inform hardware that preparatory work is done */ |
---|
| 1846 | + msleep(HCLGEVF_RESET_SYNC_TIME); |
---|
| 1847 | + hclgevf_reset_handshake(hdev, true); |
---|
| 1848 | + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", |
---|
| 1849 | + hdev->reset_type); |
---|
1060 | 1850 | |
---|
1061 | 1851 | return 0; |
---|
1062 | 1852 | } |
---|
1063 | 1853 | |
---|
1064 | | -static int hclgevf_reset(struct hclgevf_dev *hdev) |
---|
| 1854 | +static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) |
---|
| 1855 | +{ |
---|
| 1856 | + dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", |
---|
| 1857 | + hdev->rst_stats.vf_func_rst_cnt); |
---|
| 1858 | + dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", |
---|
| 1859 | + hdev->rst_stats.flr_rst_cnt); |
---|
| 1860 | + dev_info(&hdev->pdev->dev, "VF reset count: %u\n", |
---|
| 1861 | + hdev->rst_stats.vf_rst_cnt); |
---|
| 1862 | + dev_info(&hdev->pdev->dev, "reset done count: %u\n", |
---|
| 1863 | + hdev->rst_stats.rst_done_cnt); |
---|
| 1864 | + dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", |
---|
| 1865 | + hdev->rst_stats.hw_rst_done_cnt); |
---|
| 1866 | + dev_info(&hdev->pdev->dev, "reset count: %u\n", |
---|
| 1867 | + hdev->rst_stats.rst_cnt); |
---|
| 1868 | + dev_info(&hdev->pdev->dev, "reset fail count: %u\n", |
---|
| 1869 | + hdev->rst_stats.rst_fail_cnt); |
---|
| 1870 | + dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", |
---|
| 1871 | + hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); |
---|
| 1872 | + dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", |
---|
| 1873 | + hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); |
---|
| 1874 | + dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", |
---|
| 1875 | + hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); |
---|
| 1876 | + dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", |
---|
| 1877 | + hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); |
---|
| 1878 | + dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); |
---|
| 1879 | +} |
---|
| 1880 | + |
---|
| 1881 | +static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) |
---|
| 1882 | +{ |
---|
| 1883 | + /* recover handshake status with IMP when reset fail */ |
---|
| 1884 | + hclgevf_reset_handshake(hdev, true); |
---|
| 1885 | + hdev->rst_stats.rst_fail_cnt++; |
---|
| 1886 | + dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", |
---|
| 1887 | + hdev->rst_stats.rst_fail_cnt); |
---|
| 1888 | + |
---|
| 1889 | + if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) |
---|
| 1890 | + set_bit(hdev->reset_type, &hdev->reset_pending); |
---|
| 1891 | + |
---|
| 1892 | + if (hclgevf_is_reset_pending(hdev)) { |
---|
| 1893 | + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); |
---|
| 1894 | + hclgevf_reset_task_schedule(hdev); |
---|
| 1895 | + } else { |
---|
| 1896 | + set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); |
---|
| 1897 | + hclgevf_dump_rst_info(hdev); |
---|
| 1898 | + } |
---|
| 1899 | +} |
---|
| 1900 | + |
---|
| 1901 | +static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) |
---|
1065 | 1902 | { |
---|
1066 | 1903 | int ret; |
---|
1067 | 1904 | |
---|
| 1905 | + hdev->rst_stats.rst_cnt++; |
---|
| 1906 | + |
---|
| 1907 | + /* perform reset of the stack & ae device for a client */ |
---|
| 1908 | + ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); |
---|
| 1909 | + if (ret) |
---|
| 1910 | + return ret; |
---|
| 1911 | + |
---|
1068 | 1912 | rtnl_lock(); |
---|
1069 | | - |
---|
1070 | 1913 | /* bring down the nic to stop any ongoing TX/RX */ |
---|
1071 | | - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); |
---|
1072 | | - |
---|
| 1914 | + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); |
---|
1073 | 1915 | rtnl_unlock(); |
---|
| 1916 | + if (ret) |
---|
| 1917 | + return ret; |
---|
| 1918 | + |
---|
| 1919 | + return hclgevf_reset_prepare_wait(hdev); |
---|
| 1920 | +} |
---|
| 1921 | + |
---|
| 1922 | +static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) |
---|
| 1923 | +{ |
---|
| 1924 | + int ret; |
---|
| 1925 | + |
---|
| 1926 | + hdev->rst_stats.hw_rst_done_cnt++; |
---|
| 1927 | + ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); |
---|
| 1928 | + if (ret) |
---|
| 1929 | + return ret; |
---|
| 1930 | + |
---|
| 1931 | + rtnl_lock(); |
---|
| 1932 | + /* now, re-initialize the nic client and ae device */ |
---|
| 1933 | + ret = hclgevf_reset_stack(hdev); |
---|
| 1934 | + rtnl_unlock(); |
---|
| 1935 | + if (ret) { |
---|
| 1936 | + dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); |
---|
| 1937 | + return ret; |
---|
| 1938 | + } |
---|
| 1939 | + |
---|
| 1940 | + ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); |
---|
| 1941 | + /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 |
---|
| 1942 | + * times |
---|
| 1943 | + */ |
---|
| 1944 | + if (ret && |
---|
| 1945 | + hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) |
---|
| 1946 | + return ret; |
---|
| 1947 | + |
---|
| 1948 | + ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); |
---|
| 1949 | + if (ret) |
---|
| 1950 | + return ret; |
---|
| 1951 | + |
---|
| 1952 | + hdev->last_reset_time = jiffies; |
---|
| 1953 | + hdev->rst_stats.rst_done_cnt++; |
---|
| 1954 | + hdev->rst_stats.rst_fail_cnt = 0; |
---|
| 1955 | + clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); |
---|
| 1956 | + |
---|
| 1957 | + return 0; |
---|
| 1958 | +} |
---|
| 1959 | + |
---|
| 1960 | +static void hclgevf_reset(struct hclgevf_dev *hdev) |
---|
| 1961 | +{ |
---|
| 1962 | + if (hclgevf_reset_prepare(hdev)) |
---|
| 1963 | + goto err_reset; |
---|
1074 | 1964 | |
---|
1075 | 1965 | /* check if VF could successfully fetch the hardware reset completion |
---|
1076 | 1966 | * status from the hardware |
---|
1077 | 1967 | */ |
---|
1078 | | - ret = hclgevf_reset_wait(hdev); |
---|
1079 | | - if (ret) { |
---|
| 1968 | + if (hclgevf_reset_wait(hdev)) { |
---|
1080 | 1969 | /* can't do much in this situation, will disable VF */ |
---|
1081 | 1970 | dev_err(&hdev->pdev->dev, |
---|
1082 | | - "VF failed(=%d) to fetch H/W reset completion status\n", |
---|
1083 | | - ret); |
---|
1084 | | - |
---|
1085 | | - dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); |
---|
1086 | | - rtnl_lock(); |
---|
1087 | | - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); |
---|
1088 | | - |
---|
1089 | | - rtnl_unlock(); |
---|
1090 | | - return ret; |
---|
| 1971 | + "failed to fetch H/W reset completion status\n"); |
---|
| 1972 | + goto err_reset; |
---|
1091 | 1973 | } |
---|
1092 | 1974 | |
---|
1093 | | - rtnl_lock(); |
---|
| 1975 | + if (hclgevf_reset_rebuild(hdev)) |
---|
| 1976 | + goto err_reset; |
---|
1094 | 1977 | |
---|
1095 | | - /* now, re-initialize the nic client and ae device*/ |
---|
1096 | | - ret = hclgevf_reset_stack(hdev); |
---|
1097 | | - if (ret) |
---|
1098 | | - dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); |
---|
| 1978 | + return; |
---|
1099 | 1979 | |
---|
1100 | | - /* bring up the nic to enable TX/RX again */ |
---|
1101 | | - hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); |
---|
1102 | | - |
---|
1103 | | - rtnl_unlock(); |
---|
1104 | | - |
---|
1105 | | - return ret; |
---|
| 1980 | +err_reset: |
---|
| 1981 | + hclgevf_reset_err_handle(hdev); |
---|
1106 | 1982 | } |
---|
1107 | 1983 | |
---|
1108 | | -static int hclgevf_do_reset(struct hclgevf_dev *hdev) |
---|
| 1984 | +static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, |
---|
| 1985 | + unsigned long *addr) |
---|
1109 | 1986 | { |
---|
1110 | | - int status; |
---|
1111 | | - u8 respmsg; |
---|
| 1987 | + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; |
---|
1112 | 1988 | |
---|
1113 | | - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, |
---|
1114 | | - 0, false, &respmsg, sizeof(u8)); |
---|
1115 | | - if (status) |
---|
1116 | | - dev_err(&hdev->pdev->dev, |
---|
1117 | | - "VF reset request to PF failed(=%d)\n", status); |
---|
| 1989 | + /* return the highest priority reset level amongst all */ |
---|
| 1990 | + if (test_bit(HNAE3_VF_RESET, addr)) { |
---|
| 1991 | + rst_level = HNAE3_VF_RESET; |
---|
| 1992 | + clear_bit(HNAE3_VF_RESET, addr); |
---|
| 1993 | + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); |
---|
| 1994 | + clear_bit(HNAE3_VF_FUNC_RESET, addr); |
---|
| 1995 | + } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { |
---|
| 1996 | + rst_level = HNAE3_VF_FULL_RESET; |
---|
| 1997 | + clear_bit(HNAE3_VF_FULL_RESET, addr); |
---|
| 1998 | + clear_bit(HNAE3_VF_FUNC_RESET, addr); |
---|
| 1999 | + } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { |
---|
| 2000 | + rst_level = HNAE3_VF_PF_FUNC_RESET; |
---|
| 2001 | + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); |
---|
| 2002 | + clear_bit(HNAE3_VF_FUNC_RESET, addr); |
---|
| 2003 | + } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { |
---|
| 2004 | + rst_level = HNAE3_VF_FUNC_RESET; |
---|
| 2005 | + clear_bit(HNAE3_VF_FUNC_RESET, addr); |
---|
| 2006 | + } else if (test_bit(HNAE3_FLR_RESET, addr)) { |
---|
| 2007 | + rst_level = HNAE3_FLR_RESET; |
---|
| 2008 | + clear_bit(HNAE3_FLR_RESET, addr); |
---|
| 2009 | + } |
---|
1118 | 2010 | |
---|
1119 | | - return status; |
---|
| 2011 | + return rst_level; |
---|
1120 | 2012 | } |
---|
1121 | 2013 | |
---|
1122 | | -static void hclgevf_reset_event(struct hnae3_handle *handle) |
---|
| 2014 | +static void hclgevf_reset_event(struct pci_dev *pdev, |
---|
| 2015 | + struct hnae3_handle *handle) |
---|
1123 | 2016 | { |
---|
1124 | | - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 2017 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 2018 | + struct hclgevf_dev *hdev = ae_dev->priv; |
---|
1125 | 2019 | |
---|
1126 | 2020 | dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); |
---|
1127 | 2021 | |
---|
1128 | | - handle->reset_level = HNAE3_VF_RESET; |
---|
| 2022 | + if (hdev->default_reset_request) |
---|
| 2023 | + hdev->reset_level = |
---|
| 2024 | + hclgevf_get_reset_level(hdev, |
---|
| 2025 | + &hdev->default_reset_request); |
---|
| 2026 | + else |
---|
| 2027 | + hdev->reset_level = HNAE3_VF_FUNC_RESET; |
---|
1129 | 2028 | |
---|
1130 | 2029 | /* reset of this VF requested */ |
---|
1131 | 2030 | set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); |
---|
1132 | 2031 | hclgevf_reset_task_schedule(hdev); |
---|
1133 | 2032 | |
---|
1134 | | - handle->last_reset_time = jiffies; |
---|
| 2033 | + hdev->last_reset_time = jiffies; |
---|
| 2034 | +} |
---|
| 2035 | + |
---|
| 2036 | +static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, |
---|
| 2037 | + enum hnae3_reset_type rst_type) |
---|
| 2038 | +{ |
---|
| 2039 | + struct hclgevf_dev *hdev = ae_dev->priv; |
---|
| 2040 | + |
---|
| 2041 | + set_bit(rst_type, &hdev->default_reset_request); |
---|
| 2042 | +} |
---|
| 2043 | + |
---|
| 2044 | +static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) |
---|
| 2045 | +{ |
---|
| 2046 | + writel(en ? 1 : 0, vector->addr); |
---|
| 2047 | +} |
---|
| 2048 | + |
---|
| 2049 | +static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) |
---|
| 2050 | +{ |
---|
| 2051 | +#define HCLGEVF_FLR_RETRY_WAIT_MS 500 |
---|
| 2052 | +#define HCLGEVF_FLR_RETRY_CNT 5 |
---|
| 2053 | + |
---|
| 2054 | + struct hclgevf_dev *hdev = ae_dev->priv; |
---|
| 2055 | + int retry_cnt = 0; |
---|
| 2056 | + int ret; |
---|
| 2057 | + |
---|
| 2058 | +retry: |
---|
| 2059 | + down(&hdev->reset_sem); |
---|
| 2060 | + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); |
---|
| 2061 | + hdev->reset_type = HNAE3_FLR_RESET; |
---|
| 2062 | + ret = hclgevf_reset_prepare(hdev); |
---|
| 2063 | + if (ret) { |
---|
| 2064 | + dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", |
---|
| 2065 | + ret); |
---|
| 2066 | + if (hdev->reset_pending || |
---|
| 2067 | + retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { |
---|
| 2068 | + dev_err(&hdev->pdev->dev, |
---|
| 2069 | + "reset_pending:0x%lx, retry_cnt:%d\n", |
---|
| 2070 | + hdev->reset_pending, retry_cnt); |
---|
| 2071 | + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); |
---|
| 2072 | + up(&hdev->reset_sem); |
---|
| 2073 | + msleep(HCLGEVF_FLR_RETRY_WAIT_MS); |
---|
| 2074 | + goto retry; |
---|
| 2075 | + } |
---|
| 2076 | + } |
---|
| 2077 | + |
---|
| 2078 | + /* disable misc vector before FLR done */ |
---|
| 2079 | + hclgevf_enable_vector(&hdev->misc_vector, false); |
---|
| 2080 | + hdev->rst_stats.flr_rst_cnt++; |
---|
| 2081 | +} |
---|
| 2082 | + |
---|
| 2083 | +static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) |
---|
| 2084 | +{ |
---|
| 2085 | + struct hclgevf_dev *hdev = ae_dev->priv; |
---|
| 2086 | + int ret; |
---|
| 2087 | + |
---|
| 2088 | + hclgevf_enable_vector(&hdev->misc_vector, true); |
---|
| 2089 | + |
---|
| 2090 | + ret = hclgevf_reset_rebuild(hdev); |
---|
| 2091 | + if (ret) |
---|
| 2092 | + dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", |
---|
| 2093 | + ret); |
---|
| 2094 | + |
---|
| 2095 | + hdev->reset_type = HNAE3_NONE_RESET; |
---|
| 2096 | + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); |
---|
| 2097 | + up(&hdev->reset_sem); |
---|
1135 | 2098 | } |
---|
1136 | 2099 | |
---|
1137 | 2100 | static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) |
---|
.. | .. |
---|
1158 | 2121 | |
---|
1159 | 2122 | void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) |
---|
1160 | 2123 | { |
---|
1161 | | - if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && |
---|
1162 | | - !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { |
---|
1163 | | - set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); |
---|
1164 | | - schedule_work(&hdev->rst_service_task); |
---|
1165 | | - } |
---|
| 2124 | + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && |
---|
| 2125 | + !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, |
---|
| 2126 | + &hdev->state)) |
---|
| 2127 | + mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); |
---|
1166 | 2128 | } |
---|
1167 | 2129 | |
---|
1168 | 2130 | void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) |
---|
1169 | 2131 | { |
---|
1170 | | - if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && |
---|
1171 | | - !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { |
---|
1172 | | - set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); |
---|
1173 | | - schedule_work(&hdev->mbx_service_task); |
---|
1174 | | - } |
---|
| 2132 | + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && |
---|
| 2133 | + !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, |
---|
| 2134 | + &hdev->state)) |
---|
| 2135 | + mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); |
---|
1175 | 2136 | } |
---|
1176 | 2137 | |
---|
1177 | | -static void hclgevf_task_schedule(struct hclgevf_dev *hdev) |
---|
| 2138 | +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, |
---|
| 2139 | + unsigned long delay) |
---|
1178 | 2140 | { |
---|
1179 | | - if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && |
---|
1180 | | - !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) |
---|
1181 | | - schedule_work(&hdev->service_task); |
---|
| 2141 | + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && |
---|
| 2142 | + !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) |
---|
| 2143 | + mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); |
---|
1182 | 2144 | } |
---|
1183 | 2145 | |
---|
1184 | | -static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) |
---|
| 2146 | +static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) |
---|
1185 | 2147 | { |
---|
1186 | | - /* if we have any pending mailbox event then schedule the mbx task */ |
---|
1187 | | - if (hdev->mbx_event_pending) |
---|
1188 | | - hclgevf_mbx_task_schedule(hdev); |
---|
| 2148 | +#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 |
---|
1189 | 2149 | |
---|
1190 | | - if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) |
---|
1191 | | - hclgevf_reset_task_schedule(hdev); |
---|
1192 | | -} |
---|
1193 | | - |
---|
1194 | | -static void hclgevf_service_timer(struct timer_list *t) |
---|
1195 | | -{ |
---|
1196 | | - struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); |
---|
1197 | | - |
---|
1198 | | - mod_timer(&hdev->service_timer, jiffies + 5 * HZ); |
---|
1199 | | - |
---|
1200 | | - hclgevf_task_schedule(hdev); |
---|
1201 | | -} |
---|
1202 | | - |
---|
1203 | | -static void hclgevf_reset_service_task(struct work_struct *work) |
---|
1204 | | -{ |
---|
1205 | | - struct hclgevf_dev *hdev = |
---|
1206 | | - container_of(work, struct hclgevf_dev, rst_service_task); |
---|
1207 | | - int ret; |
---|
1208 | | - |
---|
1209 | | - if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) |
---|
| 2150 | + if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) |
---|
1210 | 2151 | return; |
---|
1211 | 2152 | |
---|
1212 | | - clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); |
---|
| 2153 | + down(&hdev->reset_sem); |
---|
| 2154 | + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); |
---|
1213 | 2155 | |
---|
1214 | 2156 | if (test_and_clear_bit(HCLGEVF_RESET_PENDING, |
---|
1215 | 2157 | &hdev->reset_state)) { |
---|
1216 | 2158 | /* PF has initmated that it is about to reset the hardware. |
---|
1217 | | - * We now have to poll & check if harware has actually completed |
---|
1218 | | - * the reset sequence. On hardware reset completion, VF needs to |
---|
1219 | | - * reset the client and ae device. |
---|
| 2159 | + * We now have to poll & check if hardware has actually |
---|
| 2160 | + * completed the reset sequence. On hardware reset completion, |
---|
| 2161 | + * VF needs to reset the client and ae device. |
---|
1220 | 2162 | */ |
---|
1221 | 2163 | hdev->reset_attempts = 0; |
---|
1222 | 2164 | |
---|
1223 | | - ret = hclgevf_reset(hdev); |
---|
1224 | | - if (ret) |
---|
1225 | | - dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); |
---|
| 2165 | + hdev->last_reset_time = jiffies; |
---|
| 2166 | + hdev->reset_type = |
---|
| 2167 | + hclgevf_get_reset_level(hdev, &hdev->reset_pending); |
---|
| 2168 | + if (hdev->reset_type != HNAE3_NONE_RESET) |
---|
| 2169 | + hclgevf_reset(hdev); |
---|
1226 | 2170 | } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, |
---|
1227 | 2171 | &hdev->reset_state)) { |
---|
1228 | 2172 | /* we could be here when either of below happens: |
---|
1229 | | - * 1. reset was initiated due to watchdog timeout due to |
---|
| 2173 | + * 1. reset was initiated due to watchdog timeout caused by |
---|
1230 | 2174 | * a. IMP was earlier reset and our TX got choked down and |
---|
1231 | 2175 | * which resulted in watchdog reacting and inducing VF |
---|
1232 | 2176 | * reset. This also means our cmdq would be unreliable. |
---|
.. | .. |
---|
1240 | 2184 | * 1b and 2. cases but we will not get any intimation about 1a |
---|
1241 | 2185 | * from PF as cmdq would be in unreliable state i.e. mailbox |
---|
1242 | 2186 | * communication between PF and VF would be broken. |
---|
1243 | | - */ |
---|
1244 | | - |
---|
1245 | | - /* if we are never geting into pending state it means either: |
---|
| 2187 | + * |
---|
| 2188 | + * if we are never geting into pending state it means either: |
---|
1246 | 2189 | * 1. PF is not receiving our request which could be due to IMP |
---|
1247 | 2190 | * reset |
---|
1248 | 2191 | * 2. PF is screwed |
---|
1249 | 2192 | * We cannot do much for 2. but to check first we can try reset |
---|
1250 | 2193 | * our PCIe + stack and see if it alleviates the problem. |
---|
1251 | 2194 | */ |
---|
1252 | | - if (hdev->reset_attempts > 3) { |
---|
| 2195 | + if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { |
---|
1253 | 2196 | /* prepare for full reset of stack + pcie interface */ |
---|
1254 | | - hdev->nic.reset_level = HNAE3_VF_FULL_RESET; |
---|
| 2197 | + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); |
---|
1255 | 2198 | |
---|
1256 | 2199 | /* "defer" schedule the reset task again */ |
---|
1257 | 2200 | set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); |
---|
1258 | 2201 | } else { |
---|
1259 | 2202 | hdev->reset_attempts++; |
---|
1260 | 2203 | |
---|
1261 | | - /* request PF for resetting this VF via mailbox */ |
---|
1262 | | - ret = hclgevf_do_reset(hdev); |
---|
1263 | | - if (ret) |
---|
1264 | | - dev_warn(&hdev->pdev->dev, |
---|
1265 | | - "VF rst fail, stack will call\n"); |
---|
| 2204 | + set_bit(hdev->reset_level, &hdev->reset_pending); |
---|
| 2205 | + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); |
---|
1266 | 2206 | } |
---|
| 2207 | + hclgevf_reset_task_schedule(hdev); |
---|
1267 | 2208 | } |
---|
1268 | 2209 | |
---|
| 2210 | + hdev->reset_type = HNAE3_NONE_RESET; |
---|
1269 | 2211 | clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); |
---|
| 2212 | + up(&hdev->reset_sem); |
---|
1270 | 2213 | } |
---|
1271 | 2214 | |
---|
1272 | | -static void hclgevf_mailbox_service_task(struct work_struct *work) |
---|
| 2215 | +static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) |
---|
1273 | 2216 | { |
---|
1274 | | - struct hclgevf_dev *hdev; |
---|
1275 | | - |
---|
1276 | | - hdev = container_of(work, struct hclgevf_dev, mbx_service_task); |
---|
| 2217 | + if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) |
---|
| 2218 | + return; |
---|
1277 | 2219 | |
---|
1278 | 2220 | if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) |
---|
1279 | 2221 | return; |
---|
1280 | | - |
---|
1281 | | - clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); |
---|
1282 | 2222 | |
---|
1283 | 2223 | hclgevf_mbx_async_handler(hdev); |
---|
1284 | 2224 | |
---|
1285 | 2225 | clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); |
---|
1286 | 2226 | } |
---|
1287 | 2227 | |
---|
1288 | | -static void hclgevf_service_task(struct work_struct *work) |
---|
| 2228 | +static void hclgevf_keep_alive(struct hclgevf_dev *hdev) |
---|
1289 | 2229 | { |
---|
1290 | | - struct hclgevf_dev *hdev; |
---|
| 2230 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 2231 | + int ret; |
---|
1291 | 2232 | |
---|
1292 | | - hdev = container_of(work, struct hclgevf_dev, service_task); |
---|
| 2233 | + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) |
---|
| 2234 | + return; |
---|
| 2235 | + |
---|
| 2236 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); |
---|
| 2237 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 2238 | + if (ret) |
---|
| 2239 | + dev_err(&hdev->pdev->dev, |
---|
| 2240 | + "VF sends keep alive cmd failed(=%d)\n", ret); |
---|
| 2241 | +} |
---|
| 2242 | + |
---|
| 2243 | +static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) |
---|
| 2244 | +{ |
---|
| 2245 | + unsigned long delta = round_jiffies_relative(HZ); |
---|
| 2246 | + struct hnae3_handle *handle = &hdev->nic; |
---|
| 2247 | + |
---|
| 2248 | + if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) |
---|
| 2249 | + return; |
---|
| 2250 | + |
---|
| 2251 | + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { |
---|
| 2252 | + delta = jiffies - hdev->last_serv_processed; |
---|
| 2253 | + |
---|
| 2254 | + if (delta < round_jiffies_relative(HZ)) { |
---|
| 2255 | + delta = round_jiffies_relative(HZ) - delta; |
---|
| 2256 | + goto out; |
---|
| 2257 | + } |
---|
| 2258 | + } |
---|
| 2259 | + |
---|
| 2260 | + hdev->serv_processed_cnt++; |
---|
| 2261 | + if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) |
---|
| 2262 | + hclgevf_keep_alive(hdev); |
---|
| 2263 | + |
---|
| 2264 | + if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { |
---|
| 2265 | + hdev->last_serv_processed = jiffies; |
---|
| 2266 | + goto out; |
---|
| 2267 | + } |
---|
| 2268 | + |
---|
| 2269 | + if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) |
---|
| 2270 | + hclgevf_tqps_update_stats(handle); |
---|
1293 | 2271 | |
---|
1294 | 2272 | /* request the link status from the PF. PF would be able to tell VF |
---|
1295 | 2273 | * about such updates in future so we might remove this later |
---|
1296 | 2274 | */ |
---|
1297 | 2275 | hclgevf_request_link_info(hdev); |
---|
1298 | 2276 | |
---|
1299 | | - hclgevf_deferred_task_schedule(hdev); |
---|
| 2277 | + hclgevf_update_link_mode(hdev); |
---|
1300 | 2278 | |
---|
1301 | | - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); |
---|
| 2279 | + hclgevf_sync_vlan_filter(hdev); |
---|
| 2280 | + |
---|
| 2281 | + hclgevf_sync_mac_table(hdev); |
---|
| 2282 | + |
---|
| 2283 | + hclgevf_sync_promisc_mode(hdev); |
---|
| 2284 | + |
---|
| 2285 | + hdev->last_serv_processed = jiffies; |
---|
| 2286 | + |
---|
| 2287 | +out: |
---|
| 2288 | + hclgevf_task_schedule(hdev, delta); |
---|
| 2289 | +} |
---|
| 2290 | + |
---|
| 2291 | +static void hclgevf_service_task(struct work_struct *work) |
---|
| 2292 | +{ |
---|
| 2293 | + struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, |
---|
| 2294 | + service_task.work); |
---|
| 2295 | + |
---|
| 2296 | + hclgevf_reset_service_task(hdev); |
---|
| 2297 | + hclgevf_mailbox_service_task(hdev); |
---|
| 2298 | + hclgevf_periodic_service_task(hdev); |
---|
| 2299 | + |
---|
| 2300 | + /* Handle reset and mbx again in case periodical task delays the |
---|
| 2301 | + * handling by calling hclgevf_task_schedule() in |
---|
| 2302 | + * hclgevf_periodic_service_task() |
---|
| 2303 | + */ |
---|
| 2304 | + hclgevf_reset_service_task(hdev); |
---|
| 2305 | + hclgevf_mailbox_service_task(hdev); |
---|
1302 | 2306 | } |
---|
1303 | 2307 | |
---|
1304 | 2308 | static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) |
---|
.. | .. |
---|
1306 | 2310 | hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); |
---|
1307 | 2311 | } |
---|
1308 | 2312 | |
---|
1309 | | -static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) |
---|
| 2313 | +static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, |
---|
| 2314 | + u32 *clearval) |
---|
1310 | 2315 | { |
---|
1311 | | - u32 cmdq_src_reg; |
---|
| 2316 | + u32 val, cmdq_stat_reg, rst_ing_reg; |
---|
1312 | 2317 | |
---|
1313 | 2318 | /* fetch the events from their corresponding regs */ |
---|
1314 | | - cmdq_src_reg = hclgevf_read_dev(&hdev->hw, |
---|
1315 | | - HCLGEVF_VECTOR0_CMDQ_SRC_REG); |
---|
| 2319 | + cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, |
---|
| 2320 | + HCLGEVF_VECTOR0_CMDQ_STATE_REG); |
---|
1316 | 2321 | |
---|
1317 | | - /* check for vector0 mailbox(=CMDQ RX) event source */ |
---|
1318 | | - if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { |
---|
1319 | | - cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); |
---|
1320 | | - *clearval = cmdq_src_reg; |
---|
1321 | | - return true; |
---|
| 2322 | + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { |
---|
| 2323 | + rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); |
---|
| 2324 | + dev_info(&hdev->pdev->dev, |
---|
| 2325 | + "receive reset interrupt 0x%x!\n", rst_ing_reg); |
---|
| 2326 | + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); |
---|
| 2327 | + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); |
---|
| 2328 | + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); |
---|
| 2329 | + *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); |
---|
| 2330 | + hdev->rst_stats.vf_rst_cnt++; |
---|
| 2331 | + /* set up VF hardware reset status, its PF will clear |
---|
| 2332 | + * this status when PF has initialized done. |
---|
| 2333 | + */ |
---|
| 2334 | + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); |
---|
| 2335 | + hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, |
---|
| 2336 | + val | HCLGEVF_VF_RST_ING_BIT); |
---|
| 2337 | + return HCLGEVF_VECTOR0_EVENT_RST; |
---|
1322 | 2338 | } |
---|
1323 | 2339 | |
---|
1324 | | - dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); |
---|
| 2340 | + /* check for vector0 mailbox(=CMDQ RX) event source */ |
---|
| 2341 | + if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { |
---|
| 2342 | + /* for revision 0x21, clearing interrupt is writing bit 0 |
---|
| 2343 | + * to the clear register, writing bit 1 means to keep the |
---|
| 2344 | + * old value. |
---|
| 2345 | + * for revision 0x20, the clear register is a read & write |
---|
| 2346 | + * register, so we should just write 0 to the bit we are |
---|
| 2347 | + * handling, and keep other bits as cmdq_stat_reg. |
---|
| 2348 | + */ |
---|
| 2349 | + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) |
---|
| 2350 | + *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); |
---|
| 2351 | + else |
---|
| 2352 | + *clearval = cmdq_stat_reg & |
---|
| 2353 | + ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); |
---|
1325 | 2354 | |
---|
1326 | | - return false; |
---|
1327 | | -} |
---|
| 2355 | + return HCLGEVF_VECTOR0_EVENT_MBX; |
---|
| 2356 | + } |
---|
1328 | 2357 | |
---|
1329 | | -static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) |
---|
1330 | | -{ |
---|
1331 | | - writel(en ? 1 : 0, vector->addr); |
---|
| 2358 | + /* print other vector0 event source */ |
---|
| 2359 | + dev_info(&hdev->pdev->dev, |
---|
| 2360 | + "vector 0 interrupt from unknown source, cmdq_src = %#x\n", |
---|
| 2361 | + cmdq_stat_reg); |
---|
| 2362 | + |
---|
| 2363 | + return HCLGEVF_VECTOR0_EVENT_OTHER; |
---|
1332 | 2364 | } |
---|
1333 | 2365 | |
---|
1334 | 2366 | static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) |
---|
1335 | 2367 | { |
---|
| 2368 | + enum hclgevf_evt_cause event_cause; |
---|
1336 | 2369 | struct hclgevf_dev *hdev = data; |
---|
1337 | 2370 | u32 clearval; |
---|
1338 | 2371 | |
---|
1339 | 2372 | hclgevf_enable_vector(&hdev->misc_vector, false); |
---|
1340 | | - if (!hclgevf_check_event_cause(hdev, &clearval)) |
---|
1341 | | - goto skip_sched; |
---|
| 2373 | + event_cause = hclgevf_check_evt_cause(hdev, &clearval); |
---|
| 2374 | + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) |
---|
| 2375 | + hclgevf_clear_event_cause(hdev, clearval); |
---|
1342 | 2376 | |
---|
1343 | | - hclgevf_mbx_handler(hdev); |
---|
| 2377 | + switch (event_cause) { |
---|
| 2378 | + case HCLGEVF_VECTOR0_EVENT_RST: |
---|
| 2379 | + hclgevf_reset_task_schedule(hdev); |
---|
| 2380 | + break; |
---|
| 2381 | + case HCLGEVF_VECTOR0_EVENT_MBX: |
---|
| 2382 | + hclgevf_mbx_handler(hdev); |
---|
| 2383 | + break; |
---|
| 2384 | + default: |
---|
| 2385 | + break; |
---|
| 2386 | + } |
---|
1344 | 2387 | |
---|
1345 | | - hclgevf_clear_event_cause(hdev, clearval); |
---|
1346 | | - |
---|
1347 | | -skip_sched: |
---|
1348 | 2388 | hclgevf_enable_vector(&hdev->misc_vector, true); |
---|
1349 | 2389 | |
---|
1350 | 2390 | return IRQ_HANDLED; |
---|
.. | .. |
---|
1354 | 2394 | { |
---|
1355 | 2395 | int ret; |
---|
1356 | 2396 | |
---|
1357 | | - /* get queue configuration from PF */ |
---|
1358 | | - ret = hclge_get_queue_info(hdev); |
---|
| 2397 | + /* get current port based vlan state from PF */ |
---|
| 2398 | + ret = hclgevf_get_port_base_vlan_filter_state(hdev); |
---|
1359 | 2399 | if (ret) |
---|
1360 | 2400 | return ret; |
---|
| 2401 | + |
---|
| 2402 | + /* get queue configuration from PF */ |
---|
| 2403 | + ret = hclgevf_get_queue_info(hdev); |
---|
| 2404 | + if (ret) |
---|
| 2405 | + return ret; |
---|
| 2406 | + |
---|
| 2407 | + /* get queue depth info from PF */ |
---|
| 2408 | + ret = hclgevf_get_queue_depth(hdev); |
---|
| 2409 | + if (ret) |
---|
| 2410 | + return ret; |
---|
| 2411 | + |
---|
| 2412 | + ret = hclgevf_get_pf_media_type(hdev); |
---|
| 2413 | + if (ret) |
---|
| 2414 | + return ret; |
---|
| 2415 | + |
---|
1361 | 2416 | /* get tc configuration from PF */ |
---|
1362 | 2417 | return hclgevf_get_tc_info(hdev); |
---|
1363 | 2418 | } |
---|
.. | .. |
---|
1365 | 2420 | static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) |
---|
1366 | 2421 | { |
---|
1367 | 2422 | struct pci_dev *pdev = ae_dev->pdev; |
---|
1368 | | - struct hclgevf_dev *hdev = ae_dev->priv; |
---|
| 2423 | + struct hclgevf_dev *hdev; |
---|
1369 | 2424 | |
---|
1370 | 2425 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); |
---|
1371 | 2426 | if (!hdev) |
---|
.. | .. |
---|
1401 | 2456 | return 0; |
---|
1402 | 2457 | } |
---|
1403 | 2458 | |
---|
| 2459 | +static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) |
---|
| 2460 | +{ |
---|
| 2461 | + struct hclgevf_cfg_gro_status_cmd *req; |
---|
| 2462 | + struct hclgevf_desc desc; |
---|
| 2463 | + int ret; |
---|
| 2464 | + |
---|
| 2465 | + if (!hnae3_dev_gro_supported(hdev)) |
---|
| 2466 | + return 0; |
---|
| 2467 | + |
---|
| 2468 | + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, |
---|
| 2469 | + false); |
---|
| 2470 | + req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; |
---|
| 2471 | + |
---|
| 2472 | + req->gro_en = en ? 1 : 0; |
---|
| 2473 | + |
---|
| 2474 | + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); |
---|
| 2475 | + if (ret) |
---|
| 2476 | + dev_err(&hdev->pdev->dev, |
---|
| 2477 | + "VF GRO hardware config cmd failed, ret = %d.\n", ret); |
---|
| 2478 | + |
---|
| 2479 | + return ret; |
---|
| 2480 | +} |
---|
| 2481 | + |
---|
| 2482 | +static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) |
---|
| 2483 | +{ |
---|
| 2484 | + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
| 2485 | + struct hclgevf_rss_tuple_cfg *tuple_sets; |
---|
| 2486 | + u32 i; |
---|
| 2487 | + |
---|
| 2488 | + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; |
---|
| 2489 | + rss_cfg->rss_size = hdev->nic.kinfo.rss_size; |
---|
| 2490 | + tuple_sets = &rss_cfg->rss_tuple_sets; |
---|
| 2491 | + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
---|
| 2492 | + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; |
---|
| 2493 | + memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, |
---|
| 2494 | + HCLGEVF_RSS_KEY_SIZE); |
---|
| 2495 | + |
---|
| 2496 | + tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 2497 | + tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 2498 | + tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; |
---|
| 2499 | + tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 2500 | + tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 2501 | + tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 2502 | + tuple_sets->ipv6_sctp_en = |
---|
| 2503 | + hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? |
---|
| 2504 | + HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : |
---|
| 2505 | + HCLGEVF_RSS_INPUT_TUPLE_SCTP; |
---|
| 2506 | + tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; |
---|
| 2507 | + } |
---|
| 2508 | + |
---|
| 2509 | + /* Initialize RSS indirect table */ |
---|
| 2510 | + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) |
---|
| 2511 | + rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; |
---|
| 2512 | +} |
---|
| 2513 | + |
---|
1404 | 2514 | static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) |
---|
1405 | 2515 | { |
---|
1406 | 2516 | struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; |
---|
1407 | | - int i, ret; |
---|
| 2517 | + int ret; |
---|
1408 | 2518 | |
---|
1409 | | - rss_cfg->rss_size = hdev->rss_size_max; |
---|
| 2519 | + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
---|
| 2520 | + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, |
---|
| 2521 | + rss_cfg->rss_hash_key); |
---|
| 2522 | + if (ret) |
---|
| 2523 | + return ret; |
---|
1410 | 2524 | |
---|
1411 | | - /* Initialize RSS indirect table for each vport */ |
---|
1412 | | - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) |
---|
1413 | | - rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; |
---|
| 2525 | + ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); |
---|
| 2526 | + if (ret) |
---|
| 2527 | + return ret; |
---|
| 2528 | + } |
---|
1414 | 2529 | |
---|
1415 | 2530 | ret = hclgevf_set_rss_indir_table(hdev); |
---|
1416 | 2531 | if (ret) |
---|
1417 | 2532 | return ret; |
---|
1418 | 2533 | |
---|
1419 | | - return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); |
---|
| 2534 | + return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); |
---|
1420 | 2535 | } |
---|
1421 | 2536 | |
---|
1422 | 2537 | static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) |
---|
1423 | 2538 | { |
---|
1424 | | - /* other vlan config(like, VLAN TX/RX offload) would also be added |
---|
1425 | | - * here later |
---|
1426 | | - */ |
---|
| 2539 | + struct hnae3_handle *nic = &hdev->nic; |
---|
| 2540 | + int ret; |
---|
| 2541 | + |
---|
| 2542 | + ret = hclgevf_en_hw_strip_rxvtag(nic, true); |
---|
| 2543 | + if (ret) { |
---|
| 2544 | + dev_err(&hdev->pdev->dev, |
---|
| 2545 | + "failed to enable rx vlan offload, ret = %d\n", ret); |
---|
| 2546 | + return ret; |
---|
| 2547 | + } |
---|
| 2548 | + |
---|
1427 | 2549 | return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, |
---|
1428 | 2550 | false); |
---|
| 2551 | +} |
---|
| 2552 | + |
---|
| 2553 | +static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) |
---|
| 2554 | +{ |
---|
| 2555 | +#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 |
---|
| 2556 | + |
---|
| 2557 | + unsigned long last = hdev->serv_processed_cnt; |
---|
| 2558 | + int i = 0; |
---|
| 2559 | + |
---|
| 2560 | + while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && |
---|
| 2561 | + i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && |
---|
| 2562 | + last == hdev->serv_processed_cnt) |
---|
| 2563 | + usleep_range(1, 1); |
---|
| 2564 | +} |
---|
| 2565 | + |
---|
| 2566 | +static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) |
---|
| 2567 | +{ |
---|
| 2568 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 2569 | + |
---|
| 2570 | + if (enable) { |
---|
| 2571 | + hclgevf_task_schedule(hdev, 0); |
---|
| 2572 | + } else { |
---|
| 2573 | + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); |
---|
| 2574 | + |
---|
| 2575 | + /* flush memory to make sure DOWN is seen by service task */ |
---|
| 2576 | + smp_mb__before_atomic(); |
---|
| 2577 | + hclgevf_flush_link_update(hdev); |
---|
| 2578 | + } |
---|
1429 | 2579 | } |
---|
1430 | 2580 | |
---|
1431 | 2581 | static int hclgevf_ae_start(struct hnae3_handle *handle) |
---|
1432 | 2582 | { |
---|
1433 | 2583 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
1434 | | - int i, queue_id; |
---|
1435 | 2584 | |
---|
1436 | | - for (i = 0; i < handle->kinfo.num_tqps; i++) { |
---|
1437 | | - /* ring enable */ |
---|
1438 | | - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); |
---|
1439 | | - if (queue_id < 0) { |
---|
1440 | | - dev_warn(&hdev->pdev->dev, |
---|
1441 | | - "Get invalid queue id, ignore it\n"); |
---|
1442 | | - continue; |
---|
1443 | | - } |
---|
| 2585 | + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); |
---|
1444 | 2586 | |
---|
1445 | | - hclgevf_tqp_enable(hdev, queue_id, 0, true); |
---|
1446 | | - } |
---|
1447 | | - |
---|
1448 | | - /* reset tqp stats */ |
---|
1449 | 2587 | hclgevf_reset_tqp_stats(handle); |
---|
1450 | 2588 | |
---|
1451 | 2589 | hclgevf_request_link_info(hdev); |
---|
1452 | 2590 | |
---|
1453 | | - clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); |
---|
1454 | | - mod_timer(&hdev->service_timer, jiffies + HZ); |
---|
| 2591 | + hclgevf_update_link_mode(hdev); |
---|
1455 | 2592 | |
---|
1456 | 2593 | return 0; |
---|
1457 | 2594 | } |
---|
.. | .. |
---|
1459 | 2596 | static void hclgevf_ae_stop(struct hnae3_handle *handle) |
---|
1460 | 2597 | { |
---|
1461 | 2598 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
1462 | | - int i, queue_id; |
---|
| 2599 | + int i; |
---|
1463 | 2600 | |
---|
1464 | 2601 | set_bit(HCLGEVF_STATE_DOWN, &hdev->state); |
---|
1465 | 2602 | |
---|
1466 | | - for (i = 0; i < hdev->num_tqps; i++) { |
---|
1467 | | - /* Ring disable */ |
---|
1468 | | - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); |
---|
1469 | | - if (queue_id < 0) { |
---|
1470 | | - dev_warn(&hdev->pdev->dev, |
---|
1471 | | - "Get invalid queue id, ignore it\n"); |
---|
1472 | | - continue; |
---|
1473 | | - } |
---|
| 2603 | + if (hdev->reset_type != HNAE3_VF_RESET) |
---|
| 2604 | + for (i = 0; i < handle->kinfo.num_tqps; i++) |
---|
| 2605 | + if (hclgevf_reset_tqp(handle, i)) |
---|
| 2606 | + break; |
---|
1474 | 2607 | |
---|
1475 | | - hclgevf_tqp_enable(hdev, queue_id, 0, false); |
---|
1476 | | - } |
---|
1477 | | - |
---|
1478 | | - /* reset tqp stats */ |
---|
1479 | 2608 | hclgevf_reset_tqp_stats(handle); |
---|
1480 | | - del_timer_sync(&hdev->service_timer); |
---|
1481 | | - cancel_work_sync(&hdev->service_task); |
---|
1482 | | - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); |
---|
1483 | 2609 | hclgevf_update_link_status(hdev, 0); |
---|
| 2610 | +} |
---|
| 2611 | + |
---|
| 2612 | +static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) |
---|
| 2613 | +{ |
---|
| 2614 | +#define HCLGEVF_STATE_ALIVE 1 |
---|
| 2615 | +#define HCLGEVF_STATE_NOT_ALIVE 0 |
---|
| 2616 | + |
---|
| 2617 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 2618 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 2619 | + |
---|
| 2620 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); |
---|
| 2621 | + send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : |
---|
| 2622 | + HCLGEVF_STATE_NOT_ALIVE; |
---|
| 2623 | + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 2624 | +} |
---|
| 2625 | + |
---|
| 2626 | +static int hclgevf_client_start(struct hnae3_handle *handle) |
---|
| 2627 | +{ |
---|
| 2628 | + return hclgevf_set_alive(handle, true); |
---|
| 2629 | +} |
---|
| 2630 | + |
---|
| 2631 | +static void hclgevf_client_stop(struct hnae3_handle *handle) |
---|
| 2632 | +{ |
---|
| 2633 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 2634 | + int ret; |
---|
| 2635 | + |
---|
| 2636 | + ret = hclgevf_set_alive(handle, false); |
---|
| 2637 | + if (ret) |
---|
| 2638 | + dev_warn(&hdev->pdev->dev, |
---|
| 2639 | + "%s failed %d\n", __func__, ret); |
---|
1484 | 2640 | } |
---|
1485 | 2641 | |
---|
1486 | 2642 | static void hclgevf_state_init(struct hclgevf_dev *hdev) |
---|
1487 | 2643 | { |
---|
1488 | | - /* if this is on going reset then skip this initialization */ |
---|
1489 | | - if (hclgevf_dev_ongoing_reset(hdev)) |
---|
1490 | | - return; |
---|
1491 | | - |
---|
1492 | | - /* setup tasks for the MBX */ |
---|
1493 | | - INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); |
---|
1494 | 2644 | clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); |
---|
1495 | 2645 | clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); |
---|
| 2646 | + clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); |
---|
1496 | 2647 | |
---|
1497 | | - /* setup tasks for service timer */ |
---|
1498 | | - timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); |
---|
1499 | | - |
---|
1500 | | - INIT_WORK(&hdev->service_task, hclgevf_service_task); |
---|
1501 | | - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); |
---|
1502 | | - |
---|
1503 | | - INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); |
---|
| 2648 | + INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); |
---|
1504 | 2649 | |
---|
1505 | 2650 | mutex_init(&hdev->mbx_resp.mbx_mutex); |
---|
| 2651 | + sema_init(&hdev->reset_sem, 1); |
---|
| 2652 | + |
---|
| 2653 | + spin_lock_init(&hdev->mac_table.mac_list_lock); |
---|
| 2654 | + INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); |
---|
| 2655 | + INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); |
---|
1506 | 2656 | |
---|
1507 | 2657 | /* bring the device down */ |
---|
1508 | 2658 | set_bit(HCLGEVF_STATE_DOWN, &hdev->state); |
---|
.. | .. |
---|
1511 | 2661 | static void hclgevf_state_uninit(struct hclgevf_dev *hdev) |
---|
1512 | 2662 | { |
---|
1513 | 2663 | set_bit(HCLGEVF_STATE_DOWN, &hdev->state); |
---|
| 2664 | + set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); |
---|
1514 | 2665 | |
---|
1515 | | - if (hdev->service_timer.function) |
---|
1516 | | - del_timer_sync(&hdev->service_timer); |
---|
1517 | | - if (hdev->service_task.func) |
---|
1518 | | - cancel_work_sync(&hdev->service_task); |
---|
1519 | | - if (hdev->mbx_service_task.func) |
---|
1520 | | - cancel_work_sync(&hdev->mbx_service_task); |
---|
1521 | | - if (hdev->rst_service_task.func) |
---|
1522 | | - cancel_work_sync(&hdev->rst_service_task); |
---|
| 2666 | + if (hdev->service_task.work.func) |
---|
| 2667 | + cancel_delayed_work_sync(&hdev->service_task); |
---|
1523 | 2668 | |
---|
1524 | 2669 | mutex_destroy(&hdev->mbx_resp.mbx_mutex); |
---|
1525 | 2670 | } |
---|
.. | .. |
---|
1530 | 2675 | int vectors; |
---|
1531 | 2676 | int i; |
---|
1532 | 2677 | |
---|
1533 | | - /* if this is on going reset then skip this initialization */ |
---|
1534 | | - if (hclgevf_dev_ongoing_reset(hdev)) |
---|
1535 | | - return 0; |
---|
1536 | | - |
---|
1537 | | - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) |
---|
| 2678 | + if (hnae3_dev_roce_supported(hdev)) |
---|
1538 | 2679 | vectors = pci_alloc_irq_vectors(pdev, |
---|
1539 | 2680 | hdev->roce_base_msix_offset + 1, |
---|
1540 | 2681 | hdev->num_msi, |
---|
1541 | 2682 | PCI_IRQ_MSIX); |
---|
1542 | 2683 | else |
---|
1543 | | - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
---|
| 2684 | + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, |
---|
| 2685 | + hdev->num_msi, |
---|
1544 | 2686 | PCI_IRQ_MSI | PCI_IRQ_MSIX); |
---|
1545 | 2687 | |
---|
1546 | 2688 | if (vectors < 0) { |
---|
.. | .. |
---|
1551 | 2693 | } |
---|
1552 | 2694 | if (vectors < hdev->num_msi) |
---|
1553 | 2695 | dev_warn(&hdev->pdev->dev, |
---|
1554 | | - "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", |
---|
| 2696 | + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", |
---|
1555 | 2697 | hdev->num_msi, vectors); |
---|
1556 | 2698 | |
---|
1557 | 2699 | hdev->num_msi = vectors; |
---|
1558 | 2700 | hdev->num_msi_left = vectors; |
---|
| 2701 | + |
---|
1559 | 2702 | hdev->base_msi_vector = pdev->irq; |
---|
1560 | 2703 | hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; |
---|
1561 | 2704 | |
---|
.. | .. |
---|
1572 | 2715 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
---|
1573 | 2716 | sizeof(int), GFP_KERNEL); |
---|
1574 | 2717 | if (!hdev->vector_irq) { |
---|
| 2718 | + devm_kfree(&pdev->dev, hdev->vector_status); |
---|
1575 | 2719 | pci_free_irq_vectors(pdev); |
---|
1576 | 2720 | return -ENOMEM; |
---|
1577 | 2721 | } |
---|
.. | .. |
---|
1583 | 2727 | { |
---|
1584 | 2728 | struct pci_dev *pdev = hdev->pdev; |
---|
1585 | 2729 | |
---|
| 2730 | + devm_kfree(&pdev->dev, hdev->vector_status); |
---|
| 2731 | + devm_kfree(&pdev->dev, hdev->vector_irq); |
---|
1586 | 2732 | pci_free_irq_vectors(pdev); |
---|
1587 | 2733 | } |
---|
1588 | 2734 | |
---|
1589 | 2735 | static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) |
---|
1590 | 2736 | { |
---|
1591 | | - int ret = 0; |
---|
1592 | | - |
---|
1593 | | - /* if this is on going reset then skip this initialization */ |
---|
1594 | | - if (hclgevf_dev_ongoing_reset(hdev)) |
---|
1595 | | - return 0; |
---|
| 2737 | + int ret; |
---|
1596 | 2738 | |
---|
1597 | 2739 | hclgevf_get_misc_vector(hdev); |
---|
1598 | 2740 | |
---|
| 2741 | + snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", |
---|
| 2742 | + HCLGEVF_NAME, pci_name(hdev->pdev)); |
---|
1599 | 2743 | ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, |
---|
1600 | | - 0, "hclgevf_cmd", hdev); |
---|
| 2744 | + 0, hdev->misc_vector.name, hdev); |
---|
1601 | 2745 | if (ret) { |
---|
1602 | 2746 | dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", |
---|
1603 | 2747 | hdev->misc_vector.vector_irq); |
---|
.. | .. |
---|
1621 | 2765 | hclgevf_free_vector(hdev, 0); |
---|
1622 | 2766 | } |
---|
1623 | 2767 | |
---|
| 2768 | +static void hclgevf_info_show(struct hclgevf_dev *hdev) |
---|
| 2769 | +{ |
---|
| 2770 | + struct device *dev = &hdev->pdev->dev; |
---|
| 2771 | + |
---|
| 2772 | + dev_info(dev, "VF info begin:\n"); |
---|
| 2773 | + |
---|
| 2774 | + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); |
---|
| 2775 | + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); |
---|
| 2776 | + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); |
---|
| 2777 | + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); |
---|
| 2778 | + dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); |
---|
| 2779 | + dev_info(dev, "PF media type of this VF: %u\n", |
---|
| 2780 | + hdev->hw.mac.media_type); |
---|
| 2781 | + |
---|
| 2782 | + dev_info(dev, "VF info end.\n"); |
---|
| 2783 | +} |
---|
| 2784 | + |
---|
| 2785 | +static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, |
---|
| 2786 | + struct hnae3_client *client) |
---|
| 2787 | +{ |
---|
| 2788 | + struct hclgevf_dev *hdev = ae_dev->priv; |
---|
| 2789 | + int rst_cnt = hdev->rst_stats.rst_cnt; |
---|
| 2790 | + int ret; |
---|
| 2791 | + |
---|
| 2792 | + ret = client->ops->init_instance(&hdev->nic); |
---|
| 2793 | + if (ret) |
---|
| 2794 | + return ret; |
---|
| 2795 | + |
---|
| 2796 | + set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); |
---|
| 2797 | + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || |
---|
| 2798 | + rst_cnt != hdev->rst_stats.rst_cnt) { |
---|
| 2799 | + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); |
---|
| 2800 | + |
---|
| 2801 | + client->ops->uninit_instance(&hdev->nic, 0); |
---|
| 2802 | + return -EBUSY; |
---|
| 2803 | + } |
---|
| 2804 | + |
---|
| 2805 | + hnae3_set_client_init_flag(client, ae_dev, 1); |
---|
| 2806 | + |
---|
| 2807 | + if (netif_msg_drv(&hdev->nic)) |
---|
| 2808 | + hclgevf_info_show(hdev); |
---|
| 2809 | + |
---|
| 2810 | + return 0; |
---|
| 2811 | +} |
---|
| 2812 | + |
---|
| 2813 | +static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, |
---|
| 2814 | + struct hnae3_client *client) |
---|
| 2815 | +{ |
---|
| 2816 | + struct hclgevf_dev *hdev = ae_dev->priv; |
---|
| 2817 | + int ret; |
---|
| 2818 | + |
---|
| 2819 | + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || |
---|
| 2820 | + !hdev->nic_client) |
---|
| 2821 | + return 0; |
---|
| 2822 | + |
---|
| 2823 | + ret = hclgevf_init_roce_base_info(hdev); |
---|
| 2824 | + if (ret) |
---|
| 2825 | + return ret; |
---|
| 2826 | + |
---|
| 2827 | + ret = client->ops->init_instance(&hdev->roce); |
---|
| 2828 | + if (ret) |
---|
| 2829 | + return ret; |
---|
| 2830 | + |
---|
| 2831 | + set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); |
---|
| 2832 | + hnae3_set_client_init_flag(client, ae_dev, 1); |
---|
| 2833 | + |
---|
| 2834 | + return 0; |
---|
| 2835 | +} |
---|
| 2836 | + |
---|
1624 | 2837 | static int hclgevf_init_client_instance(struct hnae3_client *client, |
---|
1625 | 2838 | struct hnae3_ae_dev *ae_dev) |
---|
1626 | 2839 | { |
---|
.. | .. |
---|
1632 | 2845 | hdev->nic_client = client; |
---|
1633 | 2846 | hdev->nic.client = client; |
---|
1634 | 2847 | |
---|
1635 | | - ret = client->ops->init_instance(&hdev->nic); |
---|
| 2848 | + ret = hclgevf_init_nic_client_instance(ae_dev, client); |
---|
1636 | 2849 | if (ret) |
---|
1637 | 2850 | goto clear_nic; |
---|
1638 | 2851 | |
---|
1639 | | - hnae3_set_client_init_flag(client, ae_dev, 1); |
---|
1640 | | - |
---|
1641 | | - if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { |
---|
1642 | | - struct hnae3_client *rc = hdev->roce_client; |
---|
1643 | | - |
---|
1644 | | - ret = hclgevf_init_roce_base_info(hdev); |
---|
1645 | | - if (ret) |
---|
1646 | | - goto clear_roce; |
---|
1647 | | - ret = rc->ops->init_instance(&hdev->roce); |
---|
1648 | | - if (ret) |
---|
1649 | | - goto clear_roce; |
---|
1650 | | - |
---|
1651 | | - hnae3_set_client_init_flag(hdev->roce_client, ae_dev, |
---|
1652 | | - 1); |
---|
1653 | | - } |
---|
1654 | | - break; |
---|
1655 | | - case HNAE3_CLIENT_UNIC: |
---|
1656 | | - hdev->nic_client = client; |
---|
1657 | | - hdev->nic.client = client; |
---|
1658 | | - |
---|
1659 | | - ret = client->ops->init_instance(&hdev->nic); |
---|
| 2852 | + ret = hclgevf_init_roce_client_instance(ae_dev, |
---|
| 2853 | + hdev->roce_client); |
---|
1660 | 2854 | if (ret) |
---|
1661 | | - goto clear_nic; |
---|
| 2855 | + goto clear_roce; |
---|
1662 | 2856 | |
---|
1663 | | - hnae3_set_client_init_flag(client, ae_dev, 1); |
---|
1664 | 2857 | break; |
---|
1665 | 2858 | case HNAE3_CLIENT_ROCE: |
---|
1666 | 2859 | if (hnae3_dev_roce_supported(hdev)) { |
---|
.. | .. |
---|
1668 | 2861 | hdev->roce.client = client; |
---|
1669 | 2862 | } |
---|
1670 | 2863 | |
---|
1671 | | - if (hdev->roce_client && hdev->nic_client) { |
---|
1672 | | - ret = hclgevf_init_roce_base_info(hdev); |
---|
1673 | | - if (ret) |
---|
1674 | | - goto clear_roce; |
---|
| 2864 | + ret = hclgevf_init_roce_client_instance(ae_dev, client); |
---|
| 2865 | + if (ret) |
---|
| 2866 | + goto clear_roce; |
---|
1675 | 2867 | |
---|
1676 | | - ret = client->ops->init_instance(&hdev->roce); |
---|
1677 | | - if (ret) |
---|
1678 | | - goto clear_roce; |
---|
1679 | | - } |
---|
1680 | | - |
---|
1681 | | - hnae3_set_client_init_flag(client, ae_dev, 1); |
---|
| 2868 | + break; |
---|
| 2869 | + default: |
---|
| 2870 | + return -EINVAL; |
---|
1682 | 2871 | } |
---|
1683 | 2872 | |
---|
1684 | 2873 | return 0; |
---|
.. | .. |
---|
1700 | 2889 | |
---|
1701 | 2890 | /* un-init roce, if it exists */ |
---|
1702 | 2891 | if (hdev->roce_client) { |
---|
| 2892 | + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) |
---|
| 2893 | + msleep(HCLGEVF_WAIT_RESET_DONE); |
---|
| 2894 | + clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); |
---|
| 2895 | + |
---|
1703 | 2896 | hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); |
---|
1704 | 2897 | hdev->roce_client = NULL; |
---|
1705 | 2898 | hdev->roce.client = NULL; |
---|
.. | .. |
---|
1708 | 2901 | /* un-init nic/unic, if this was not called by roce client */ |
---|
1709 | 2902 | if (client->ops->uninit_instance && hdev->nic_client && |
---|
1710 | 2903 | client->type != HNAE3_CLIENT_ROCE) { |
---|
| 2904 | + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) |
---|
| 2905 | + msleep(HCLGEVF_WAIT_RESET_DONE); |
---|
| 2906 | + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); |
---|
| 2907 | + |
---|
1711 | 2908 | client->ops->uninit_instance(&hdev->nic, 0); |
---|
1712 | 2909 | hdev->nic_client = NULL; |
---|
1713 | 2910 | hdev->nic.client = NULL; |
---|
.. | .. |
---|
1719 | 2916 | struct pci_dev *pdev = hdev->pdev; |
---|
1720 | 2917 | struct hclgevf_hw *hw; |
---|
1721 | 2918 | int ret; |
---|
1722 | | - |
---|
1723 | | - /* check if we need to skip initialization of pci. This will happen if |
---|
1724 | | - * device is undergoing VF reset. Otherwise, we would need to |
---|
1725 | | - * re-initialize pci interface again i.e. when device is not going |
---|
1726 | | - * through *any* reset or actually undergoing full reset. |
---|
1727 | | - */ |
---|
1728 | | - if (hclgevf_dev_ongoing_reset(hdev)) |
---|
1729 | | - return 0; |
---|
1730 | 2919 | |
---|
1731 | 2920 | ret = pci_enable_device(pdev); |
---|
1732 | 2921 | if (ret) { |
---|
.. | .. |
---|
1793 | 2982 | |
---|
1794 | 2983 | req = (struct hclgevf_query_res_cmd *)desc.data; |
---|
1795 | 2984 | |
---|
1796 | | - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { |
---|
| 2985 | + if (hnae3_dev_roce_supported(hdev)) { |
---|
1797 | 2986 | hdev->roce_base_msix_offset = |
---|
1798 | | - hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), |
---|
| 2987 | + hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), |
---|
1799 | 2988 | HCLGEVF_MSIX_OFT_ROCEE_M, |
---|
1800 | 2989 | HCLGEVF_MSIX_OFT_ROCEE_S); |
---|
1801 | 2990 | hdev->num_roce_msix = |
---|
1802 | | - hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), |
---|
| 2991 | + hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), |
---|
1803 | 2992 | HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); |
---|
| 2993 | + |
---|
| 2994 | + /* nic's msix numbers is always equals to the roce's. */ |
---|
| 2995 | + hdev->num_nic_msix = hdev->num_roce_msix; |
---|
1804 | 2996 | |
---|
1805 | 2997 | /* VF should have NIC vectors and Roce vectors, NIC vectors |
---|
1806 | 2998 | * are queued before Roce vectors. The offset is fixed to 64. |
---|
.. | .. |
---|
1809 | 3001 | hdev->roce_base_msix_offset; |
---|
1810 | 3002 | } else { |
---|
1811 | 3003 | hdev->num_msi = |
---|
1812 | | - hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), |
---|
| 3004 | + hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), |
---|
1813 | 3005 | HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); |
---|
| 3006 | + |
---|
| 3007 | + hdev->num_nic_msix = hdev->num_msi; |
---|
1814 | 3008 | } |
---|
| 3009 | + |
---|
| 3010 | + if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { |
---|
| 3011 | + dev_err(&hdev->pdev->dev, |
---|
| 3012 | + "Just %u msi resources, not enough for vf(min:2).\n", |
---|
| 3013 | + hdev->num_nic_msix); |
---|
| 3014 | + return -EINVAL; |
---|
| 3015 | + } |
---|
| 3016 | + |
---|
| 3017 | + return 0; |
---|
| 3018 | +} |
---|
| 3019 | + |
---|
| 3020 | +static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) |
---|
| 3021 | +{ |
---|
| 3022 | +#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U |
---|
| 3023 | + |
---|
| 3024 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
---|
| 3025 | + |
---|
| 3026 | + ae_dev->dev_specs.max_non_tso_bd_num = |
---|
| 3027 | + HCLGEVF_MAX_NON_TSO_BD_NUM; |
---|
| 3028 | + ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; |
---|
| 3029 | + ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; |
---|
| 3030 | +} |
---|
| 3031 | + |
---|
| 3032 | +static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, |
---|
| 3033 | + struct hclgevf_desc *desc) |
---|
| 3034 | +{ |
---|
| 3035 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
---|
| 3036 | + struct hclgevf_dev_specs_0_cmd *req0; |
---|
| 3037 | + |
---|
| 3038 | + req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; |
---|
| 3039 | + |
---|
| 3040 | + ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; |
---|
| 3041 | + ae_dev->dev_specs.rss_ind_tbl_size = |
---|
| 3042 | + le16_to_cpu(req0->rss_ind_tbl_size); |
---|
| 3043 | + ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); |
---|
| 3044 | +} |
---|
| 3045 | + |
---|
| 3046 | +static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) |
---|
| 3047 | +{ |
---|
| 3048 | + struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; |
---|
| 3049 | + |
---|
| 3050 | + if (!dev_specs->max_non_tso_bd_num) |
---|
| 3051 | + dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; |
---|
| 3052 | + if (!dev_specs->rss_ind_tbl_size) |
---|
| 3053 | + dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; |
---|
| 3054 | + if (!dev_specs->rss_key_size) |
---|
| 3055 | + dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; |
---|
| 3056 | +} |
---|
| 3057 | + |
---|
| 3058 | +static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) |
---|
| 3059 | +{ |
---|
| 3060 | + struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; |
---|
| 3061 | + int ret; |
---|
| 3062 | + int i; |
---|
| 3063 | + |
---|
| 3064 | + /* set default specifications as devices lower than version V3 do not |
---|
| 3065 | + * support querying specifications from firmware. |
---|
| 3066 | + */ |
---|
| 3067 | + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { |
---|
| 3068 | + hclgevf_set_default_dev_specs(hdev); |
---|
| 3069 | + return 0; |
---|
| 3070 | + } |
---|
| 3071 | + |
---|
| 3072 | + for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { |
---|
| 3073 | + hclgevf_cmd_setup_basic_desc(&desc[i], |
---|
| 3074 | + HCLGEVF_OPC_QUERY_DEV_SPECS, true); |
---|
| 3075 | + desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); |
---|
| 3076 | + } |
---|
| 3077 | + hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, |
---|
| 3078 | + true); |
---|
| 3079 | + |
---|
| 3080 | + ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); |
---|
| 3081 | + if (ret) |
---|
| 3082 | + return ret; |
---|
| 3083 | + |
---|
| 3084 | + hclgevf_parse_dev_specs(hdev, desc); |
---|
| 3085 | + hclgevf_check_dev_specs(hdev); |
---|
| 3086 | + |
---|
| 3087 | + return 0; |
---|
| 3088 | +} |
---|
| 3089 | + |
---|
| 3090 | +static int hclgevf_pci_reset(struct hclgevf_dev *hdev) |
---|
| 3091 | +{ |
---|
| 3092 | + struct pci_dev *pdev = hdev->pdev; |
---|
| 3093 | + int ret = 0; |
---|
| 3094 | + |
---|
| 3095 | + if ((hdev->reset_type == HNAE3_VF_FULL_RESET || |
---|
| 3096 | + hdev->reset_type == HNAE3_FLR_RESET) && |
---|
| 3097 | + test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { |
---|
| 3098 | + hclgevf_misc_irq_uninit(hdev); |
---|
| 3099 | + hclgevf_uninit_msi(hdev); |
---|
| 3100 | + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); |
---|
| 3101 | + } |
---|
| 3102 | + |
---|
| 3103 | + if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { |
---|
| 3104 | + pci_set_master(pdev); |
---|
| 3105 | + ret = hclgevf_init_msi(hdev); |
---|
| 3106 | + if (ret) { |
---|
| 3107 | + dev_err(&pdev->dev, |
---|
| 3108 | + "failed(%d) to init MSI/MSI-X\n", ret); |
---|
| 3109 | + return ret; |
---|
| 3110 | + } |
---|
| 3111 | + |
---|
| 3112 | + ret = hclgevf_misc_irq_init(hdev); |
---|
| 3113 | + if (ret) { |
---|
| 3114 | + hclgevf_uninit_msi(hdev); |
---|
| 3115 | + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", |
---|
| 3116 | + ret); |
---|
| 3117 | + return ret; |
---|
| 3118 | + } |
---|
| 3119 | + |
---|
| 3120 | + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); |
---|
| 3121 | + } |
---|
| 3122 | + |
---|
| 3123 | + return ret; |
---|
| 3124 | +} |
---|
| 3125 | + |
---|
| 3126 | +static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) |
---|
| 3127 | +{ |
---|
| 3128 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 3129 | + |
---|
| 3130 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, |
---|
| 3131 | + HCLGE_MBX_VPORT_LIST_CLEAR); |
---|
| 3132 | + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 3133 | +} |
---|
| 3134 | + |
---|
| 3135 | +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) |
---|
| 3136 | +{ |
---|
| 3137 | + struct pci_dev *pdev = hdev->pdev; |
---|
| 3138 | + int ret; |
---|
| 3139 | + |
---|
| 3140 | + ret = hclgevf_pci_reset(hdev); |
---|
| 3141 | + if (ret) { |
---|
| 3142 | + dev_err(&pdev->dev, "pci reset failed %d\n", ret); |
---|
| 3143 | + return ret; |
---|
| 3144 | + } |
---|
| 3145 | + |
---|
| 3146 | + ret = hclgevf_cmd_init(hdev); |
---|
| 3147 | + if (ret) { |
---|
| 3148 | + dev_err(&pdev->dev, "cmd failed %d\n", ret); |
---|
| 3149 | + return ret; |
---|
| 3150 | + } |
---|
| 3151 | + |
---|
| 3152 | + ret = hclgevf_rss_init_hw(hdev); |
---|
| 3153 | + if (ret) { |
---|
| 3154 | + dev_err(&hdev->pdev->dev, |
---|
| 3155 | + "failed(%d) to initialize RSS\n", ret); |
---|
| 3156 | + return ret; |
---|
| 3157 | + } |
---|
| 3158 | + |
---|
| 3159 | + ret = hclgevf_config_gro(hdev, true); |
---|
| 3160 | + if (ret) |
---|
| 3161 | + return ret; |
---|
| 3162 | + |
---|
| 3163 | + ret = hclgevf_init_vlan_config(hdev); |
---|
| 3164 | + if (ret) { |
---|
| 3165 | + dev_err(&hdev->pdev->dev, |
---|
| 3166 | + "failed(%d) to initialize VLAN config\n", ret); |
---|
| 3167 | + return ret; |
---|
| 3168 | + } |
---|
| 3169 | + |
---|
| 3170 | + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); |
---|
| 3171 | + |
---|
| 3172 | + dev_info(&hdev->pdev->dev, "Reset done\n"); |
---|
1815 | 3173 | |
---|
1816 | 3174 | return 0; |
---|
1817 | 3175 | } |
---|
.. | .. |
---|
1821 | 3179 | struct pci_dev *pdev = hdev->pdev; |
---|
1822 | 3180 | int ret; |
---|
1823 | 3181 | |
---|
1824 | | - /* check if device is on-going full reset(i.e. pcie as well) */ |
---|
1825 | | - if (hclgevf_dev_ongoing_full_reset(hdev)) { |
---|
1826 | | - dev_warn(&pdev->dev, "device is going full reset\n"); |
---|
1827 | | - hclgevf_uninit_hdev(hdev); |
---|
1828 | | - } |
---|
1829 | | - |
---|
1830 | 3182 | ret = hclgevf_pci_init(hdev); |
---|
1831 | | - if (ret) { |
---|
1832 | | - dev_err(&pdev->dev, "PCI initialization failed\n"); |
---|
| 3183 | + if (ret) |
---|
1833 | 3184 | return ret; |
---|
1834 | | - } |
---|
| 3185 | + |
---|
| 3186 | + ret = hclgevf_cmd_queue_init(hdev); |
---|
| 3187 | + if (ret) |
---|
| 3188 | + goto err_cmd_queue_init; |
---|
1835 | 3189 | |
---|
1836 | 3190 | ret = hclgevf_cmd_init(hdev); |
---|
1837 | 3191 | if (ret) |
---|
.. | .. |
---|
1839 | 3193 | |
---|
1840 | 3194 | /* Get vf resource */ |
---|
1841 | 3195 | ret = hclgevf_query_vf_resource(hdev); |
---|
| 3196 | + if (ret) |
---|
| 3197 | + goto err_cmd_init; |
---|
| 3198 | + |
---|
| 3199 | + ret = hclgevf_query_dev_specs(hdev); |
---|
1842 | 3200 | if (ret) { |
---|
1843 | | - dev_err(&hdev->pdev->dev, |
---|
1844 | | - "Query vf status error, ret = %d.\n", ret); |
---|
1845 | | - goto err_query_vf; |
---|
| 3201 | + dev_err(&pdev->dev, |
---|
| 3202 | + "failed to query dev specifications, ret = %d\n", ret); |
---|
| 3203 | + goto err_cmd_init; |
---|
1846 | 3204 | } |
---|
1847 | 3205 | |
---|
1848 | 3206 | ret = hclgevf_init_msi(hdev); |
---|
1849 | 3207 | if (ret) { |
---|
1850 | 3208 | dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); |
---|
1851 | | - goto err_query_vf; |
---|
| 3209 | + goto err_cmd_init; |
---|
1852 | 3210 | } |
---|
1853 | 3211 | |
---|
1854 | 3212 | hclgevf_state_init(hdev); |
---|
| 3213 | + hdev->reset_level = HNAE3_VF_FUNC_RESET; |
---|
| 3214 | + hdev->reset_type = HNAE3_NONE_RESET; |
---|
1855 | 3215 | |
---|
1856 | 3216 | ret = hclgevf_misc_irq_init(hdev); |
---|
1857 | | - if (ret) { |
---|
1858 | | - dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", |
---|
1859 | | - ret); |
---|
| 3217 | + if (ret) |
---|
1860 | 3218 | goto err_misc_irq_init; |
---|
1861 | | - } |
---|
| 3219 | + |
---|
| 3220 | + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); |
---|
1862 | 3221 | |
---|
1863 | 3222 | ret = hclgevf_configure(hdev); |
---|
1864 | 3223 | if (ret) { |
---|
.. | .. |
---|
1873 | 3232 | } |
---|
1874 | 3233 | |
---|
1875 | 3234 | ret = hclgevf_set_handle_info(hdev); |
---|
1876 | | - if (ret) { |
---|
1877 | | - dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); |
---|
| 3235 | + if (ret) |
---|
1878 | 3236 | goto err_config; |
---|
1879 | | - } |
---|
1880 | 3237 | |
---|
1881 | | - /* Initialize mta type for this VF */ |
---|
1882 | | - ret = hclgevf_cfg_func_mta_type(hdev); |
---|
1883 | | - if (ret) { |
---|
1884 | | - dev_err(&hdev->pdev->dev, |
---|
1885 | | - "failed(%d) to initialize MTA type\n", ret); |
---|
| 3238 | + ret = hclgevf_config_gro(hdev, true); |
---|
| 3239 | + if (ret) |
---|
1886 | 3240 | goto err_config; |
---|
1887 | | - } |
---|
1888 | 3241 | |
---|
1889 | 3242 | /* Initialize RSS for this VF */ |
---|
| 3243 | + hclgevf_rss_init_cfg(hdev); |
---|
1890 | 3244 | ret = hclgevf_rss_init_hw(hdev); |
---|
1891 | 3245 | if (ret) { |
---|
1892 | 3246 | dev_err(&hdev->pdev->dev, |
---|
1893 | 3247 | "failed(%d) to initialize RSS\n", ret); |
---|
| 3248 | + goto err_config; |
---|
| 3249 | + } |
---|
| 3250 | + |
---|
| 3251 | + /* ensure vf tbl list as empty before init*/ |
---|
| 3252 | + ret = hclgevf_clear_vport_list(hdev); |
---|
| 3253 | + if (ret) { |
---|
| 3254 | + dev_err(&pdev->dev, |
---|
| 3255 | + "failed to clear tbl list configuration, ret = %d.\n", |
---|
| 3256 | + ret); |
---|
1894 | 3257 | goto err_config; |
---|
1895 | 3258 | } |
---|
1896 | 3259 | |
---|
.. | .. |
---|
1901 | 3264 | goto err_config; |
---|
1902 | 3265 | } |
---|
1903 | 3266 | |
---|
1904 | | - pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); |
---|
| 3267 | + hdev->last_reset_time = jiffies; |
---|
| 3268 | + dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", |
---|
| 3269 | + HCLGEVF_DRIVER_NAME); |
---|
| 3270 | + |
---|
| 3271 | + hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); |
---|
1905 | 3272 | |
---|
1906 | 3273 | return 0; |
---|
1907 | 3274 | |
---|
.. | .. |
---|
1910 | 3277 | err_misc_irq_init: |
---|
1911 | 3278 | hclgevf_state_uninit(hdev); |
---|
1912 | 3279 | hclgevf_uninit_msi(hdev); |
---|
1913 | | -err_query_vf: |
---|
1914 | | - hclgevf_cmd_uninit(hdev); |
---|
1915 | 3280 | err_cmd_init: |
---|
| 3281 | + hclgevf_cmd_uninit(hdev); |
---|
| 3282 | +err_cmd_queue_init: |
---|
1916 | 3283 | hclgevf_pci_uninit(hdev); |
---|
| 3284 | + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); |
---|
1917 | 3285 | return ret; |
---|
1918 | 3286 | } |
---|
1919 | 3287 | |
---|
1920 | 3288 | static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) |
---|
1921 | 3289 | { |
---|
| 3290 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 3291 | + |
---|
1922 | 3292 | hclgevf_state_uninit(hdev); |
---|
1923 | | - hclgevf_misc_irq_uninit(hdev); |
---|
| 3293 | + |
---|
| 3294 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); |
---|
| 3295 | + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 3296 | + |
---|
| 3297 | + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { |
---|
| 3298 | + hclgevf_misc_irq_uninit(hdev); |
---|
| 3299 | + hclgevf_uninit_msi(hdev); |
---|
| 3300 | + } |
---|
| 3301 | + |
---|
1924 | 3302 | hclgevf_cmd_uninit(hdev); |
---|
1925 | | - hclgevf_uninit_msi(hdev); |
---|
1926 | 3303 | hclgevf_pci_uninit(hdev); |
---|
| 3304 | + hclgevf_uninit_mac_list(hdev); |
---|
1927 | 3305 | } |
---|
1928 | 3306 | |
---|
1929 | 3307 | static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
---|
.. | .. |
---|
1938 | 3316 | } |
---|
1939 | 3317 | |
---|
1940 | 3318 | ret = hclgevf_init_hdev(ae_dev->priv); |
---|
1941 | | - if (ret) |
---|
| 3319 | + if (ret) { |
---|
1942 | 3320 | dev_err(&pdev->dev, "hclge device initialization failed\n"); |
---|
| 3321 | + return ret; |
---|
| 3322 | + } |
---|
1943 | 3323 | |
---|
1944 | | - return ret; |
---|
| 3324 | + return 0; |
---|
1945 | 3325 | } |
---|
1946 | 3326 | |
---|
1947 | 3327 | static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
---|
.. | .. |
---|
1983 | 3363 | } |
---|
1984 | 3364 | |
---|
1985 | 3365 | static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, |
---|
1986 | | - u16 *free_tqps, u16 *max_rss_size) |
---|
| 3366 | + u16 *alloc_tqps, u16 *max_rss_size) |
---|
1987 | 3367 | { |
---|
1988 | 3368 | struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
1989 | 3369 | |
---|
1990 | | - *free_tqps = 0; |
---|
| 3370 | + *alloc_tqps = hdev->num_tqps; |
---|
1991 | 3371 | *max_rss_size = hdev->rss_size_max; |
---|
| 3372 | +} |
---|
| 3373 | + |
---|
| 3374 | +static void hclgevf_update_rss_size(struct hnae3_handle *handle, |
---|
| 3375 | + u32 new_tqps_num) |
---|
| 3376 | +{ |
---|
| 3377 | + struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
| 3378 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3379 | + u16 max_rss_size; |
---|
| 3380 | + |
---|
| 3381 | + kinfo->req_rss_size = new_tqps_num; |
---|
| 3382 | + |
---|
| 3383 | + max_rss_size = min_t(u16, hdev->rss_size_max, |
---|
| 3384 | + hdev->num_tqps / kinfo->num_tc); |
---|
| 3385 | + |
---|
| 3386 | + /* Use the user's configuration when it is not larger than |
---|
| 3387 | + * max_rss_size, otherwise, use the maximum specification value. |
---|
| 3388 | + */ |
---|
| 3389 | + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && |
---|
| 3390 | + kinfo->req_rss_size <= max_rss_size) |
---|
| 3391 | + kinfo->rss_size = kinfo->req_rss_size; |
---|
| 3392 | + else if (kinfo->rss_size > max_rss_size || |
---|
| 3393 | + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) |
---|
| 3394 | + kinfo->rss_size = max_rss_size; |
---|
| 3395 | + |
---|
| 3396 | + kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; |
---|
| 3397 | +} |
---|
| 3398 | + |
---|
| 3399 | +static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, |
---|
| 3400 | + bool rxfh_configured) |
---|
| 3401 | +{ |
---|
| 3402 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3403 | + struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
| 3404 | + u16 cur_rss_size = kinfo->rss_size; |
---|
| 3405 | + u16 cur_tqps = kinfo->num_tqps; |
---|
| 3406 | + u32 *rss_indir; |
---|
| 3407 | + unsigned int i; |
---|
| 3408 | + int ret; |
---|
| 3409 | + |
---|
| 3410 | + hclgevf_update_rss_size(handle, new_tqps_num); |
---|
| 3411 | + |
---|
| 3412 | + ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); |
---|
| 3413 | + if (ret) |
---|
| 3414 | + return ret; |
---|
| 3415 | + |
---|
| 3416 | + /* RSS indirection table has been configuared by user */ |
---|
| 3417 | + if (rxfh_configured) |
---|
| 3418 | + goto out; |
---|
| 3419 | + |
---|
| 3420 | + /* Reinitializes the rss indirect table according to the new RSS size */ |
---|
| 3421 | + rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); |
---|
| 3422 | + if (!rss_indir) |
---|
| 3423 | + return -ENOMEM; |
---|
| 3424 | + |
---|
| 3425 | + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) |
---|
| 3426 | + rss_indir[i] = i % kinfo->rss_size; |
---|
| 3427 | + |
---|
| 3428 | + hdev->rss_cfg.rss_size = kinfo->rss_size; |
---|
| 3429 | + |
---|
| 3430 | + ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); |
---|
| 3431 | + if (ret) |
---|
| 3432 | + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", |
---|
| 3433 | + ret); |
---|
| 3434 | + |
---|
| 3435 | + kfree(rss_indir); |
---|
| 3436 | + |
---|
| 3437 | +out: |
---|
| 3438 | + if (!ret) |
---|
| 3439 | + dev_info(&hdev->pdev->dev, |
---|
| 3440 | + "Channels changed, rss_size from %u to %u, tqps from %u to %u", |
---|
| 3441 | + cur_rss_size, kinfo->rss_size, |
---|
| 3442 | + cur_tqps, kinfo->rss_size * kinfo->num_tc); |
---|
| 3443 | + |
---|
| 3444 | + return ret; |
---|
1992 | 3445 | } |
---|
1993 | 3446 | |
---|
1994 | 3447 | static int hclgevf_get_status(struct hnae3_handle *handle) |
---|
.. | .. |
---|
2019 | 3472 | hdev->hw.mac.duplex = duplex; |
---|
2020 | 3473 | } |
---|
2021 | 3474 | |
---|
| 3475 | +static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) |
---|
| 3476 | +{ |
---|
| 3477 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3478 | + |
---|
| 3479 | + return hclgevf_config_gro(hdev, enable); |
---|
| 3480 | +} |
---|
| 3481 | + |
---|
| 3482 | +static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, |
---|
| 3483 | + u8 *module_type) |
---|
| 3484 | +{ |
---|
| 3485 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3486 | + |
---|
| 3487 | + if (media_type) |
---|
| 3488 | + *media_type = hdev->hw.mac.media_type; |
---|
| 3489 | + |
---|
| 3490 | + if (module_type) |
---|
| 3491 | + *module_type = hdev->hw.mac.module_type; |
---|
| 3492 | +} |
---|
| 3493 | + |
---|
| 3494 | +static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) |
---|
| 3495 | +{ |
---|
| 3496 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3497 | + |
---|
| 3498 | + return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); |
---|
| 3499 | +} |
---|
| 3500 | + |
---|
| 3501 | +static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) |
---|
| 3502 | +{ |
---|
| 3503 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3504 | + |
---|
| 3505 | + return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); |
---|
| 3506 | +} |
---|
| 3507 | + |
---|
| 3508 | +static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) |
---|
| 3509 | +{ |
---|
| 3510 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3511 | + |
---|
| 3512 | + return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); |
---|
| 3513 | +} |
---|
| 3514 | + |
---|
| 3515 | +static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) |
---|
| 3516 | +{ |
---|
| 3517 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3518 | + |
---|
| 3519 | + return hdev->rst_stats.hw_rst_done_cnt; |
---|
| 3520 | +} |
---|
| 3521 | + |
---|
| 3522 | +static void hclgevf_get_link_mode(struct hnae3_handle *handle, |
---|
| 3523 | + unsigned long *supported, |
---|
| 3524 | + unsigned long *advertising) |
---|
| 3525 | +{ |
---|
| 3526 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3527 | + |
---|
| 3528 | + *supported = hdev->hw.mac.supported; |
---|
| 3529 | + *advertising = hdev->hw.mac.advertising; |
---|
| 3530 | +} |
---|
| 3531 | + |
---|
| 3532 | +#define MAX_SEPARATE_NUM 4 |
---|
| 3533 | +#define SEPARATOR_VALUE 0xFFFFFFFF |
---|
| 3534 | +#define REG_NUM_PER_LINE 4 |
---|
| 3535 | +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) |
---|
| 3536 | + |
---|
| 3537 | +static int hclgevf_get_regs_len(struct hnae3_handle *handle) |
---|
| 3538 | +{ |
---|
| 3539 | + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; |
---|
| 3540 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3541 | + |
---|
| 3542 | + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; |
---|
| 3543 | + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; |
---|
| 3544 | + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; |
---|
| 3545 | + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; |
---|
| 3546 | + |
---|
| 3547 | + return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + |
---|
| 3548 | + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; |
---|
| 3549 | +} |
---|
| 3550 | + |
---|
| 3551 | +static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, |
---|
| 3552 | + void *data) |
---|
| 3553 | +{ |
---|
| 3554 | + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); |
---|
| 3555 | + int i, j, reg_um, separator_num; |
---|
| 3556 | + u32 *reg = data; |
---|
| 3557 | + |
---|
| 3558 | + *version = hdev->fw_version; |
---|
| 3559 | + |
---|
| 3560 | + /* fetching per-VF registers values from VF PCIe register space */ |
---|
| 3561 | + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); |
---|
| 3562 | + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; |
---|
| 3563 | + for (i = 0; i < reg_um; i++) |
---|
| 3564 | + *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); |
---|
| 3565 | + for (i = 0; i < separator_num; i++) |
---|
| 3566 | + *reg++ = SEPARATOR_VALUE; |
---|
| 3567 | + |
---|
| 3568 | + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); |
---|
| 3569 | + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; |
---|
| 3570 | + for (i = 0; i < reg_um; i++) |
---|
| 3571 | + *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); |
---|
| 3572 | + for (i = 0; i < separator_num; i++) |
---|
| 3573 | + *reg++ = SEPARATOR_VALUE; |
---|
| 3574 | + |
---|
| 3575 | + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); |
---|
| 3576 | + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; |
---|
| 3577 | + for (j = 0; j < hdev->num_tqps; j++) { |
---|
| 3578 | + for (i = 0; i < reg_um; i++) |
---|
| 3579 | + *reg++ = hclgevf_read_dev(&hdev->hw, |
---|
| 3580 | + ring_reg_addr_list[i] + |
---|
| 3581 | + 0x200 * j); |
---|
| 3582 | + for (i = 0; i < separator_num; i++) |
---|
| 3583 | + *reg++ = SEPARATOR_VALUE; |
---|
| 3584 | + } |
---|
| 3585 | + |
---|
| 3586 | + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); |
---|
| 3587 | + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; |
---|
| 3588 | + for (j = 0; j < hdev->num_msi_used - 1; j++) { |
---|
| 3589 | + for (i = 0; i < reg_um; i++) |
---|
| 3590 | + *reg++ = hclgevf_read_dev(&hdev->hw, |
---|
| 3591 | + tqp_intr_reg_addr_list[i] + |
---|
| 3592 | + 4 * j); |
---|
| 3593 | + for (i = 0; i < separator_num; i++) |
---|
| 3594 | + *reg++ = SEPARATOR_VALUE; |
---|
| 3595 | + } |
---|
| 3596 | +} |
---|
| 3597 | + |
---|
| 3598 | +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, |
---|
| 3599 | + u8 *port_base_vlan_info, u8 data_size) |
---|
| 3600 | +{ |
---|
| 3601 | + struct hnae3_handle *nic = &hdev->nic; |
---|
| 3602 | + struct hclge_vf_to_pf_msg send_msg; |
---|
| 3603 | + int ret; |
---|
| 3604 | + |
---|
| 3605 | + rtnl_lock(); |
---|
| 3606 | + |
---|
| 3607 | + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || |
---|
| 3608 | + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { |
---|
| 3609 | + dev_warn(&hdev->pdev->dev, |
---|
| 3610 | + "is resetting when updating port based vlan info\n"); |
---|
| 3611 | + rtnl_unlock(); |
---|
| 3612 | + return; |
---|
| 3613 | + } |
---|
| 3614 | + |
---|
| 3615 | + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); |
---|
| 3616 | + if (ret) { |
---|
| 3617 | + rtnl_unlock(); |
---|
| 3618 | + return; |
---|
| 3619 | + } |
---|
| 3620 | + |
---|
| 3621 | + /* send msg to PF and wait update port based vlan info */ |
---|
| 3622 | + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, |
---|
| 3623 | + HCLGE_MBX_PORT_BASE_VLAN_CFG); |
---|
| 3624 | + memcpy(send_msg.data, port_base_vlan_info, data_size); |
---|
| 3625 | + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); |
---|
| 3626 | + if (!ret) { |
---|
| 3627 | + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) |
---|
| 3628 | + nic->port_base_vlan_state = state; |
---|
| 3629 | + else |
---|
| 3630 | + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; |
---|
| 3631 | + } |
---|
| 3632 | + |
---|
| 3633 | + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); |
---|
| 3634 | + rtnl_unlock(); |
---|
| 3635 | +} |
---|
| 3636 | + |
---|
2022 | 3637 | static const struct hnae3_ae_ops hclgevf_ops = { |
---|
2023 | 3638 | .init_ae_dev = hclgevf_init_ae_dev, |
---|
2024 | 3639 | .uninit_ae_dev = hclgevf_uninit_ae_dev, |
---|
| 3640 | + .flr_prepare = hclgevf_flr_prepare, |
---|
| 3641 | + .flr_done = hclgevf_flr_done, |
---|
2025 | 3642 | .init_client_instance = hclgevf_init_client_instance, |
---|
2026 | 3643 | .uninit_client_instance = hclgevf_uninit_client_instance, |
---|
2027 | 3644 | .start = hclgevf_ae_start, |
---|
2028 | 3645 | .stop = hclgevf_ae_stop, |
---|
| 3646 | + .client_start = hclgevf_client_start, |
---|
| 3647 | + .client_stop = hclgevf_client_stop, |
---|
2029 | 3648 | .map_ring_to_vector = hclgevf_map_ring_to_vector, |
---|
2030 | 3649 | .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, |
---|
2031 | 3650 | .get_vector = hclgevf_get_vector, |
---|
2032 | 3651 | .put_vector = hclgevf_put_vector, |
---|
2033 | 3652 | .reset_queue = hclgevf_reset_tqp, |
---|
2034 | | - .set_promisc_mode = hclgevf_set_promisc_mode, |
---|
2035 | 3653 | .get_mac_addr = hclgevf_get_mac_addr, |
---|
2036 | 3654 | .set_mac_addr = hclgevf_set_mac_addr, |
---|
2037 | 3655 | .add_uc_addr = hclgevf_add_uc_addr, |
---|
2038 | 3656 | .rm_uc_addr = hclgevf_rm_uc_addr, |
---|
2039 | 3657 | .add_mc_addr = hclgevf_add_mc_addr, |
---|
2040 | 3658 | .rm_mc_addr = hclgevf_rm_mc_addr, |
---|
2041 | | - .update_mta_status = hclgevf_update_mta_status, |
---|
2042 | 3659 | .get_stats = hclgevf_get_stats, |
---|
2043 | 3660 | .update_stats = hclgevf_update_stats, |
---|
2044 | 3661 | .get_strings = hclgevf_get_strings, |
---|
.. | .. |
---|
2047 | 3664 | .get_rss_indir_size = hclgevf_get_rss_indir_size, |
---|
2048 | 3665 | .get_rss = hclgevf_get_rss, |
---|
2049 | 3666 | .set_rss = hclgevf_set_rss, |
---|
| 3667 | + .get_rss_tuple = hclgevf_get_rss_tuple, |
---|
| 3668 | + .set_rss_tuple = hclgevf_set_rss_tuple, |
---|
2050 | 3669 | .get_tc_size = hclgevf_get_tc_size, |
---|
2051 | 3670 | .get_fw_version = hclgevf_get_fw_version, |
---|
2052 | 3671 | .set_vlan_filter = hclgevf_set_vlan_filter, |
---|
2053 | 3672 | .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, |
---|
2054 | 3673 | .reset_event = hclgevf_reset_event, |
---|
| 3674 | + .set_default_reset_request = hclgevf_set_def_reset_request, |
---|
| 3675 | + .set_channels = hclgevf_set_channels, |
---|
2055 | 3676 | .get_channels = hclgevf_get_channels, |
---|
2056 | 3677 | .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, |
---|
| 3678 | + .get_regs_len = hclgevf_get_regs_len, |
---|
| 3679 | + .get_regs = hclgevf_get_regs, |
---|
2057 | 3680 | .get_status = hclgevf_get_status, |
---|
2058 | 3681 | .get_ksettings_an_result = hclgevf_get_ksettings_an_result, |
---|
| 3682 | + .get_media_type = hclgevf_get_media_type, |
---|
| 3683 | + .get_hw_reset_stat = hclgevf_get_hw_reset_stat, |
---|
| 3684 | + .ae_dev_resetting = hclgevf_ae_dev_resetting, |
---|
| 3685 | + .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, |
---|
| 3686 | + .set_gro_en = hclgevf_gro_en, |
---|
| 3687 | + .set_mtu = hclgevf_set_mtu, |
---|
| 3688 | + .get_global_queue_id = hclgevf_get_qid_global, |
---|
| 3689 | + .set_timer_task = hclgevf_set_timer_task, |
---|
| 3690 | + .get_link_mode = hclgevf_get_link_mode, |
---|
| 3691 | + .set_promisc_mode = hclgevf_set_promisc_mode, |
---|
| 3692 | + .request_update_promisc_mode = hclgevf_request_update_promisc_mode, |
---|
| 3693 | + .get_cmdq_stat = hclgevf_get_cmdq_stat, |
---|
2059 | 3694 | }; |
---|
2060 | 3695 | |
---|
2061 | 3696 | static struct hnae3_ae_algo ae_algovf = { |
---|
.. | .. |
---|
2067 | 3702 | { |
---|
2068 | 3703 | pr_info("%s is initializing\n", HCLGEVF_NAME); |
---|
2069 | 3704 | |
---|
| 3705 | + hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); |
---|
| 3706 | + if (!hclgevf_wq) { |
---|
| 3707 | + pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); |
---|
| 3708 | + return -ENOMEM; |
---|
| 3709 | + } |
---|
| 3710 | + |
---|
2070 | 3711 | hnae3_register_ae_algo(&ae_algovf); |
---|
2071 | 3712 | |
---|
2072 | 3713 | return 0; |
---|
.. | .. |
---|
2075 | 3716 | static void hclgevf_exit(void) |
---|
2076 | 3717 | { |
---|
2077 | 3718 | hnae3_unregister_ae_algo(&ae_algovf); |
---|
| 3719 | + destroy_workqueue(hclgevf_wq); |
---|
2078 | 3720 | } |
---|
2079 | 3721 | module_init(hclgevf_init); |
---|
2080 | 3722 | module_exit(hclgevf_exit); |
---|