forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
....@@ -5,6 +5,14 @@
55 #include "hclgevf_main.h"
66 #include "hnae3.h"
77
8
+#define CREATE_TRACE_POINTS
9
+#include "hclgevf_trace.h"
10
+
11
+static int hclgevf_resp_to_errno(u16 resp_code)
12
+{
13
+ return resp_code ? -resp_code : 0;
14
+}
15
+
816 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
917 {
1018 /* this function should be called with mbx_resp.mbx_mutex held
....@@ -26,28 +34,31 @@
2634 u8 *resp_data, u16 resp_len)
2735 {
2836 #define HCLGEVF_MAX_TRY_TIMES 500
29
-#define HCLGEVF_SLEEP_USCOEND 1000
37
+#define HCLGEVF_SLEEP_USECOND 1000
3038 struct hclgevf_mbx_resp_status *mbx_resp;
3139 u16 r_code0, r_code1;
3240 int i = 0;
3341
3442 if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
3543 dev_err(&hdev->pdev->dev,
36
- "VF mbx response len(=%d) exceeds maximum(=%d)\n",
44
+ "VF mbx response len(=%u) exceeds maximum(=%u)\n",
3745 resp_len,
3846 HCLGE_MBX_MAX_RESP_DATA_SIZE);
3947 return -EINVAL;
4048 }
4149
4250 while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
43
- udelay(HCLGEVF_SLEEP_USCOEND);
51
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
52
+ return -EIO;
53
+
54
+ usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
4455 i++;
4556 }
4657
4758 if (i >= HCLGEVF_MAX_TRY_TIMES) {
4859 dev_err(&hdev->pdev->dev,
49
- "VF could not get mbx resp(=%d) from PF in %d tries\n",
50
- hdev->mbx_resp.received_resp, i);
60
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
61
+ code0, code1, hdev->mbx_resp.received_resp, i);
5162 return -EIO;
5263 }
5364
....@@ -65,16 +76,19 @@
6576
6677 if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
6778 dev_err(&hdev->pdev->dev,
68
- "VF could not match resp code(code0=%d,code1=%d), %d",
79
+ "VF could not match resp code(code0=%u,code1=%u), %d\n",
6980 code0, code1, mbx_resp->resp_status);
81
+ dev_err(&hdev->pdev->dev,
82
+ "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
83
+ r_code0, r_code1);
7084 return -EIO;
7185 }
7286
7387 return 0;
7488 }
7589
76
-int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
77
- const u8 *msg_data, u8 msg_len, bool need_resp,
90
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
91
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
7892 u8 *resp_data, u16 resp_len)
7993 {
8094 struct hclge_mbx_vf_to_pf_cmd *req;
....@@ -83,18 +97,20 @@
8397
8498 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
8599
86
- /* first two bytes are reserved for code & subcode */
87
- if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
100
+ if (!send_msg) {
88101 dev_err(&hdev->pdev->dev,
89
- "VF send mbx msg fail, msg len %d exceeds max len %d\n",
90
- msg_len, HCLGE_MBX_MAX_MSG_SIZE);
102
+ "failed to send mbx, msg is NULL\n");
91103 return -EINVAL;
92104 }
93105
94106 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
95
- req->msg[0] = code;
96
- req->msg[1] = subcode;
97
- memcpy(&req->msg[2], msg_data, msg_len);
107
+ if (need_resp)
108
+ hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1);
109
+
110
+ memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
111
+
112
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
113
+ trace_hclge_vf_mbx_send(hdev, req);
98114
99115 /* synchronous send */
100116 if (need_resp) {
....@@ -109,7 +125,8 @@
109125 return status;
110126 }
111127
112
- status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
128
+ status = hclgevf_get_mbx_resp(hdev, send_msg->code,
129
+ send_msg->subcode, resp_data,
113130 resp_len);
114131 mutex_unlock(&hdev->mbx_resp.mbx_mutex);
115132 } else {
....@@ -148,14 +165,19 @@
148165 crq = &hdev->hw.cmq.crq;
149166
150167 while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
168
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
169
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
170
+ return;
171
+ }
172
+
151173 desc = &crq->desc[crq->next_to_use];
152174 req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
153175
154176 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
155177 if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
156178 dev_warn(&hdev->pdev->dev,
157
- "dropped invalid mailbox message, code = %d\n",
158
- req->msg[0]);
179
+ "dropped invalid mailbox message, code = %u\n",
180
+ req->msg.code);
159181
160182 /* dropping/not processing this invalid message */
161183 crq->desc[crq->next_to_use].flag = 0;
....@@ -163,25 +185,29 @@
163185 continue;
164186 }
165187
188
+ trace_hclge_vf_mbx_get(hdev, req);
189
+
166190 /* synchronous messages are time critical and need preferential
167191 * treatment. Therefore, we need to acknowledge all the sync
168192 * responses as quickly as possible so that waiting tasks do not
169193 * timeout and simultaneously queue the async messages for later
170194 * prcessing in context of mailbox task i.e. the slow path.
171195 */
172
- switch (req->msg[0]) {
196
+ switch (req->msg.code) {
173197 case HCLGE_MBX_PF_VF_RESP:
174198 if (resp->received_resp)
175199 dev_warn(&hdev->pdev->dev,
176
- "VF mbx resp flag not clear(%d)\n",
177
- req->msg[1]);
200
+ "VF mbx resp flag not clear(%u)\n",
201
+ req->msg.vf_mbx_msg_code);
178202 resp->received_resp = true;
179203
180
- resp->origin_mbx_msg = (req->msg[1] << 16);
181
- resp->origin_mbx_msg |= req->msg[2];
182
- resp->resp_status = req->msg[3];
204
+ resp->origin_mbx_msg =
205
+ (req->msg.vf_mbx_msg_code << 16);
206
+ resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
207
+ resp->resp_status =
208
+ hclgevf_resp_to_errno(req->msg.resp_status);
183209
184
- temp = (u8 *)&req->msg[4];
210
+ temp = (u8 *)req->msg.resp_data;
185211 for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
186212 resp->additional_info[i] = *temp;
187213 temp++;
....@@ -189,6 +215,9 @@
189215 break;
190216 case HCLGE_MBX_LINK_STAT_CHANGE:
191217 case HCLGE_MBX_ASSERTING_RESET:
218
+ case HCLGE_MBX_LINK_STAT_MODE:
219
+ case HCLGE_MBX_PUSH_VLAN_INFO:
220
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
192221 /* set this mbx event as pending. This is required as we
193222 * might loose interrupt event when mbx task is busy
194223 * handling. This shall be cleared when mbx task just
....@@ -199,27 +228,28 @@
199228 /* we will drop the async msg if we find ARQ as full
200229 * and continue with next message
201230 */
202
- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
231
+ if (atomic_read(&hdev->arq.count) >=
232
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
203233 dev_warn(&hdev->pdev->dev,
204
- "Async Q full, dropping msg(%d)\n",
205
- req->msg[1]);
234
+ "Async Q full, dropping msg(%u)\n",
235
+ req->msg.code);
206236 break;
207237 }
208238
209239 /* tail the async message in arq */
210240 msg_q = hdev->arq.msg_q[hdev->arq.tail];
211
- memcpy(&msg_q[0], req->msg,
241
+ memcpy(&msg_q[0], &req->msg,
212242 HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
213243 hclge_mbx_tail_ptr_move_arq(hdev->arq);
214
- hdev->arq.count++;
244
+ atomic_inc(&hdev->arq.count);
215245
216246 hclgevf_mbx_task_schedule(hdev);
217247
218248 break;
219249 default:
220250 dev_err(&hdev->pdev->dev,
221
- "VF received unsupported(%d) mbx msg from PF\n",
222
- req->msg[0]);
251
+ "VF received unsupported(%u) mbx msg from PF\n",
252
+ req->msg.code);
223253 break;
224254 }
225255 crq->desc[crq->next_to_use].flag = 0;
....@@ -231,13 +261,23 @@
231261 crq->next_to_use);
232262 }
233263
264
+static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
265
+ u16 promisc_info)
266
+{
267
+ if (!promisc_info)
268
+ dev_info(&hdev->pdev->dev,
269
+ "Promisc mode is closed by host for being untrusted.\n");
270
+}
271
+
234272 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
235273 {
236
- u16 link_status;
237
- u16 *msg_q;
274
+ enum hnae3_reset_type reset_type;
275
+ u16 link_status, state;
276
+ u16 *msg_q, *vlan_info;
238277 u8 duplex;
239278 u32 speed;
240279 u32 tail;
280
+ u8 idx;
241281
242282 /* we can safely clear it now as we are at start of the async message
243283 * processing
....@@ -248,18 +288,33 @@
248288
249289 /* process all the async queue messages */
250290 while (tail != hdev->arq.head) {
291
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
292
+ dev_info(&hdev->pdev->dev,
293
+ "vf crq need init in async\n");
294
+ return;
295
+ }
296
+
251297 msg_q = hdev->arq.msg_q[hdev->arq.head];
252298
253299 switch (msg_q[0]) {
254300 case HCLGE_MBX_LINK_STAT_CHANGE:
255
- link_status = le16_to_cpu(msg_q[1]);
301
+ link_status = msg_q[1];
256302 memcpy(&speed, &msg_q[2], sizeof(speed));
257
- duplex = (u8)le16_to_cpu(msg_q[4]);
303
+ duplex = (u8)msg_q[4];
258304
259305 /* update upper layer with new link link status */
260306 hclgevf_update_link_status(hdev, link_status);
261307 hclgevf_update_speed_duplex(hdev, speed, duplex);
262308
309
+ break;
310
+ case HCLGE_MBX_LINK_STAT_MODE:
311
+ idx = (u8)msg_q[1];
312
+ if (idx)
313
+ memcpy(&hdev->hw.mac.supported, &msg_q[2],
314
+ sizeof(unsigned long));
315
+ else
316
+ memcpy(&hdev->hw.mac.advertising, &msg_q[2],
317
+ sizeof(unsigned long));
263318 break;
264319 case HCLGE_MBX_ASSERTING_RESET:
265320 /* PF has asserted reset hence VF should go in pending
....@@ -267,20 +322,30 @@
267322 * has been completely reset. After this stack should
268323 * eventually be re-initialized.
269324 */
270
- hdev->nic.reset_level = HNAE3_VF_RESET;
325
+ reset_type = (enum hnae3_reset_type)msg_q[1];
326
+ set_bit(reset_type, &hdev->reset_pending);
271327 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
272328 hclgevf_reset_task_schedule(hdev);
273329
274330 break;
331
+ case HCLGE_MBX_PUSH_VLAN_INFO:
332
+ state = msg_q[1];
333
+ vlan_info = &msg_q[1];
334
+ hclgevf_update_port_base_vlan_info(hdev, state,
335
+ (u8 *)vlan_info, 8);
336
+ break;
337
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
338
+ hclgevf_parse_promisc_info(hdev, msg_q[1]);
339
+ break;
275340 default:
276341 dev_err(&hdev->pdev->dev,
277
- "fetched unsupported(%d) message from arq\n",
342
+ "fetched unsupported(%u) message from arq\n",
278343 msg_q[0]);
279344 break;
280345 }
281346
282347 hclge_mbx_head_ptr_move_arq(hdev->arq);
283
- hdev->arq.count--;
348
+ atomic_dec(&hdev->arq.count);
284349 msg_q = hdev->arq.msg_q[hdev->arq.head];
285350 }
286351 }