hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
....@@ -4,6 +4,7 @@
44 #ifndef __HCLGEVF_MAIN_H
55 #define __HCLGEVF_MAIN_H
66 #include <linux/fs.h>
7
+#include <linux/if_vlan.h>
78 #include <linux/types.h>
89 #include "hclge_mbx.h"
910 #include "hclgevf_cmd.h"
....@@ -12,9 +13,12 @@
1213 #define HCLGEVF_MOD_VERSION "1.0"
1314 #define HCLGEVF_DRIVER_NAME "hclgevf"
1415
16
+#define HCLGEVF_MAX_VLAN_ID 4095
1517 #define HCLGEVF_MISC_VECTOR_NUM 0
1618
1719 #define HCLGEVF_INVALID_VPORT 0xffff
20
+#define HCLGEVF_GENERAL_TASK_INTERVAL 5
21
+#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
1822
1923 /* This number in actual depends upon the total number of VFs
2024 * created by physical function. But the maximum number of
....@@ -27,15 +31,82 @@
2731 #define HCLGEVF_VECTOR_REG_OFFSET 0x4
2832 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000
2933
34
+/* bar registers for cmdq */
35
+#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000
36
+#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004
37
+#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008
38
+#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010
39
+#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014
40
+#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018
41
+#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C
42
+#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
43
+#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
44
+#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
45
+#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
46
+#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
47
+
48
+/* bar registers for common func */
49
+#define HCLGEVF_GRO_EN_REG 0x28000
50
+
51
+/* bar registers for rcb */
52
+#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
53
+#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004
54
+#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008
55
+#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C
56
+#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014
57
+#define HCLGEVF_RING_RX_TAIL_REG 0x80018
58
+#define HCLGEVF_RING_RX_HEAD_REG 0x8001C
59
+#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020
60
+#define HCLGEVF_RING_RX_OFFSET_REG 0x80024
61
+#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028
62
+#define HCLGEVF_RING_RX_STASH_REG 0x80030
63
+#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034
64
+#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040
65
+#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044
66
+#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048
67
+#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C
68
+#define HCLGEVF_RING_TX_TC_REG 0x80050
69
+#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054
70
+#define HCLGEVF_RING_TX_TAIL_REG 0x80058
71
+#define HCLGEVF_RING_TX_HEAD_REG 0x8005C
72
+#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060
73
+#define HCLGEVF_RING_TX_OFFSET_REG 0x80064
74
+#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068
75
+#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070
76
+#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074
77
+#define HCLGEVF_RING_EN_REG 0x80090
78
+
79
+/* bar registers for tqp interrupt */
80
+#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000
81
+#define HCLGEVF_TQP_INTR_GL0_REG 0x20100
82
+#define HCLGEVF_TQP_INTR_GL1_REG 0x20200
83
+#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
84
+#define HCLGEVF_TQP_INTR_RL_REG 0x20900
85
+
3086 /* Vector0 interrupt CMDQ event source register(RW) */
3187 #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
88
+/* Vector0 interrupt CMDQ event status register(RO) */
89
+#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
3290 /* CMDQ register bits for RX event(=MBX event) */
3391 #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
92
+/* RST register bits for RESET event */
93
+#define HCLGEVF_VECTOR0_RST_INT_B 2
3494
3595 #define HCLGEVF_TQP_RESET_TRY_TIMES 10
3696 /* Reset related Registers */
37
-#define HCLGEVF_FUN_RST_ING 0x20C00
38
-#define HCLGEVF_FUN_RST_ING_B 0
97
+#define HCLGEVF_RST_ING 0x20C00
98
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
99
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
100
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
101
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
102
+#define HCLGEVF_RST_ING_BITS \
103
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
104
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
105
+
106
+#define HCLGEVF_VF_RST_ING 0x07008
107
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
108
+
109
+#define HCLGEVF_WAIT_RESET_DONE 100
39110
40111 #define HCLGEVF_RSS_IND_TBL_SIZE 512
41112 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
....@@ -46,30 +117,53 @@
46117 #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
47118 #define HCLGEVF_RSS_CFG_TBL_NUM \
48119 (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
120
+#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
121
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
122
+#define HCLGEVF_D_PORT_BIT BIT(0)
123
+#define HCLGEVF_S_PORT_BIT BIT(1)
124
+#define HCLGEVF_D_IP_BIT BIT(2)
125
+#define HCLGEVF_S_IP_BIT BIT(3)
126
+#define HCLGEVF_V_TAG_BIT BIT(4)
127
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
128
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
49129
50
-#define HCLGEVF_MTA_TBL_SIZE 4096
51
-#define HCLGEVF_MTA_TYPE_SEL_MAX 4
130
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
131
+
132
+enum hclgevf_evt_cause {
133
+ HCLGEVF_VECTOR0_EVENT_RST,
134
+ HCLGEVF_VECTOR0_EVENT_MBX,
135
+ HCLGEVF_VECTOR0_EVENT_OTHER,
136
+};
52137
53138 /* states of hclgevf device & tasks */
54139 enum hclgevf_states {
55140 /* device states */
56141 HCLGEVF_STATE_DOWN,
57142 HCLGEVF_STATE_DISABLED,
143
+ HCLGEVF_STATE_IRQ_INITED,
144
+ HCLGEVF_STATE_REMOVING,
145
+ HCLGEVF_STATE_NIC_REGISTERED,
146
+ HCLGEVF_STATE_ROCE_REGISTERED,
58147 /* task states */
59
- HCLGEVF_STATE_SERVICE_SCHED,
60148 HCLGEVF_STATE_RST_SERVICE_SCHED,
61149 HCLGEVF_STATE_RST_HANDLING,
62150 HCLGEVF_STATE_MBX_SERVICE_SCHED,
63151 HCLGEVF_STATE_MBX_HANDLING,
152
+ HCLGEVF_STATE_CMD_DISABLE,
153
+ HCLGEVF_STATE_LINK_UPDATING,
154
+ HCLGEVF_STATE_PROMISC_CHANGED,
155
+ HCLGEVF_STATE_RST_FAIL,
64156 };
65157
66
-#define HCLGEVF_MPF_ENBALE 1
67
-
68158 struct hclgevf_mac {
159
+ u8 media_type;
160
+ u8 module_type;
69161 u8 mac_addr[ETH_ALEN];
70162 int link;
71163 u8 duplex;
72164 u32 speed;
165
+ u64 supported;
166
+ u64 advertising;
73167 };
74168
75169 struct hclgevf_hw {
....@@ -108,17 +202,63 @@
108202 u32 numa_node_map;
109203 };
110204
205
+struct hclgevf_rss_tuple_cfg {
206
+ u8 ipv4_tcp_en;
207
+ u8 ipv4_udp_en;
208
+ u8 ipv4_sctp_en;
209
+ u8 ipv4_fragment_en;
210
+ u8 ipv6_tcp_en;
211
+ u8 ipv6_udp_en;
212
+ u8 ipv6_sctp_en;
213
+ u8 ipv6_fragment_en;
214
+};
215
+
111216 struct hclgevf_rss_cfg {
112217 u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
113218 u32 hash_algo;
114219 u32 rss_size;
115220 u8 hw_tc_map;
116221 u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
222
+ struct hclgevf_rss_tuple_cfg rss_tuple_sets;
117223 };
118224
119225 struct hclgevf_misc_vector {
120226 u8 __iomem *addr;
121227 int vector_irq;
228
+ char name[HNAE3_INT_NAME_LEN];
229
+};
230
+
231
+struct hclgevf_rst_stats {
232
+ u32 rst_cnt; /* the number of reset */
233
+ u32 vf_func_rst_cnt; /* the number of VF function reset */
234
+ u32 flr_rst_cnt; /* the number of FLR */
235
+ u32 vf_rst_cnt; /* the number of VF reset */
236
+ u32 rst_done_cnt; /* the number of reset completed */
237
+ u32 hw_rst_done_cnt; /* the number of HW reset completed */
238
+ u32 rst_fail_cnt; /* the number of VF reset fail */
239
+};
240
+
241
+enum HCLGEVF_MAC_ADDR_TYPE {
242
+ HCLGEVF_MAC_ADDR_UC,
243
+ HCLGEVF_MAC_ADDR_MC
244
+};
245
+
246
+enum HCLGEVF_MAC_NODE_STATE {
247
+ HCLGEVF_MAC_TO_ADD,
248
+ HCLGEVF_MAC_TO_DEL,
249
+ HCLGEVF_MAC_ACTIVE
250
+};
251
+
252
+struct hclgevf_mac_addr_node {
253
+ struct list_head node;
254
+ enum HCLGEVF_MAC_NODE_STATE state;
255
+ u8 mac_addr[ETH_ALEN];
256
+};
257
+
258
+struct hclgevf_mac_table_cfg {
259
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
260
+ struct list_head uc_mac_list;
261
+ struct list_head mc_mac_list;
122262 };
123263
124264 struct hclgevf_dev {
....@@ -128,14 +268,22 @@
128268 struct hclgevf_misc_vector misc_vector;
129269 struct hclgevf_rss_cfg rss_cfg;
130270 unsigned long state;
271
+ unsigned long flr_state;
272
+ unsigned long default_reset_request;
273
+ unsigned long last_reset_time;
274
+ enum hnae3_reset_type reset_level;
275
+ unsigned long reset_pending;
276
+ enum hnae3_reset_type reset_type;
131277
132278 #define HCLGEVF_RESET_REQUESTED 0
133279 #define HCLGEVF_RESET_PENDING 1
134280 unsigned long reset_state; /* requested, pending */
281
+ struct hclgevf_rst_stats rst_stats;
135282 u32 reset_attempts;
283
+ struct semaphore reset_sem; /* protect reset process */
136284
137285 u32 fw_version;
138
- u16 num_tqps; /* num task queue pairs of this PF */
286
+ u16 num_tqps; /* num task queue pairs of this VF */
139287
140288 u16 alloc_rss_size; /* allocated RSS task queue */
141289 u16 rss_size_max; /* HW defined max RSS task queue */
....@@ -143,12 +291,15 @@
143291 u16 num_alloc_vport; /* num vports this driver supports */
144292 u32 numa_node_mask;
145293 u16 rx_buf_len;
146
- u16 num_desc;
294
+ u16 num_tx_desc; /* desc num of per tx queue */
295
+ u16 num_rx_desc; /* desc num of per rx queue */
147296 u8 hw_tc_map;
297
+ u8 has_pf_mac;
148298
149299 u16 num_msi;
150300 u16 num_msi_left;
151301 u16 num_msi_used;
302
+ u16 num_nic_msix; /* Num of nic vectors for this VF */
152303 u16 num_roce_msix; /* Num of roce vectors for this VF */
153304 u16 roce_base_msix_offset;
154305 int roce_base_vector;
....@@ -156,16 +307,15 @@
156307 u16 *vector_status;
157308 int *vector_irq;
158309
159
- bool accept_mta_mc; /* whether to accept mta filter multicast */
160
- u8 mta_mac_sel_type;
310
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
311
+
312
+ struct hclgevf_mac_table_cfg mac_table;
313
+
161314 bool mbx_event_pending;
162315 struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
163316 struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
164317
165
- struct timer_list service_timer;
166
- struct work_struct service_task;
167
- struct work_struct rst_service_task;
168
- struct work_struct mbx_service_task;
318
+ struct delayed_work service_task;
169319
170320 struct hclgevf_tqp *htqp;
171321
....@@ -175,24 +325,17 @@
175325 struct hnae3_client *nic_client;
176326 struct hnae3_client *roce_client;
177327 u32 flag;
328
+ unsigned long serv_processed_cnt;
329
+ unsigned long last_serv_processed;
178330 };
179331
180
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
332
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
181333 {
182
- return (hdev &&
183
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
184
- (hdev->nic.reset_level == HNAE3_VF_RESET));
334
+ return !!hdev->reset_pending;
185335 }
186336
187
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
188
-{
189
- return (hdev &&
190
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
191
- (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
192
-}
193
-
194
-int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
195
- const u8 *msg_data, u8 msg_len, bool need_resp,
337
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
338
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
196339 u8 *resp_data, u16 resp_len);
197340 void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
198341 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
....@@ -202,4 +345,6 @@
202345 u8 duplex);
203346 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
204347 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
348
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
349
+ u8 *port_base_vlan_info, u8 data_size);
205350 #endif