forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
....@@ -40,9 +40,11 @@
4040 #include <linux/skbuff.h>
4141 #include <linux/inetdevice.h>
4242 #include <linux/atomic.h>
43
+#include <net/tls.h>
4344 #include "cxgb4.h"
4445
4546 #define MAX_ULD_QSETS 16
47
+#define MAX_ULD_NPORTS 4
4648
4749 /* ulp_mem_io + ulptx_idata + payload + padding */
4850 #define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
....@@ -92,23 +94,35 @@
9294 union aopen_entry *next;
9395 };
9496
97
+struct eotid_entry {
98
+ void *data;
99
+};
100
+
95101 /*
96102 * Holds the size, base address, free list start, etc of the TID, server TID,
97103 * and active-open TID tables. The tables themselves are allocated dynamically.
98104 */
99105 struct tid_info {
100106 void **tid_tab;
107
+ unsigned int tid_base;
101108 unsigned int ntids;
102109
103110 struct serv_entry *stid_tab;
104111 unsigned long *stid_bmap;
105112 unsigned int nstids;
106113 unsigned int stid_base;
114
+
115
+ unsigned int nhash;
107116 unsigned int hash_base;
108117
109118 union aopen_entry *atid_tab;
110119 unsigned int natids;
111120 unsigned int atid_base;
121
+
122
+ struct filter_entry *hpftid_tab;
123
+ unsigned long *hpftid_bmap;
124
+ unsigned int nhpftids;
125
+ unsigned int hpftid_base;
112126
113127 struct filter_entry *ftid_tab;
114128 unsigned long *ftid_bmap;
....@@ -129,18 +143,35 @@
129143 unsigned int v6_stids_in_use;
130144 unsigned int sftids_in_use;
131145
146
+ /* ETHOFLD range */
147
+ struct eotid_entry *eotid_tab;
148
+ unsigned long *eotid_bmap;
149
+ unsigned int eotid_base;
150
+ unsigned int neotids;
151
+
132152 /* TIDs in the TCAM */
133153 atomic_t tids_in_use;
134154 /* TIDs in the HASH */
135155 atomic_t hash_tids_in_use;
136156 atomic_t conns_in_use;
157
+ /* ETHOFLD TIDs used for rate limiting */
158
+ atomic_t eotids_in_use;
159
+
137160 /* lock for setting/clearing filter bitmap */
138161 spinlock_t ftid_lock;
162
+
163
+ unsigned int tc_hash_tids_max_prio;
139164 };
140165
141166 static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
142167 {
168
+ tid -= t->tid_base;
143169 return tid < t->ntids ? t->tid_tab[tid] : NULL;
170
+}
171
+
172
+static inline bool tid_out_of_range(const struct tid_info *t, unsigned int tid)
173
+{
174
+ return ((tid - t->tid_base) >= t->ntids);
144175 }
145176
146177 static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
....@@ -164,7 +195,7 @@
164195 static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
165196 unsigned int tid, unsigned short family)
166197 {
167
- t->tid_tab[tid] = data;
198
+ t->tid_tab[tid - t->tid_base] = data;
168199 if (t->hash_base && (tid >= t->hash_base)) {
169200 if (family == AF_INET6)
170201 atomic_add(2, &t->hash_tids_in_use);
....@@ -177,6 +208,37 @@
177208 atomic_inc(&t->tids_in_use);
178209 }
179210 atomic_inc(&t->conns_in_use);
211
+}
212
+
213
+static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
214
+ u32 eotid)
215
+{
216
+ return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
217
+}
218
+
219
+static inline int cxgb4_get_free_eotid(struct tid_info *t)
220
+{
221
+ int eotid;
222
+
223
+ eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
224
+ if (eotid >= t->neotids)
225
+ eotid = -1;
226
+
227
+ return eotid;
228
+}
229
+
230
+static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
231
+{
232
+ set_bit(eotid, t->eotid_bmap);
233
+ t->eotid_tab[eotid].data = data;
234
+ atomic_inc(&t->eotids_in_use);
235
+}
236
+
237
+static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
238
+{
239
+ clear_bit(eotid, t->eotid_bmap);
240
+ t->eotid_tab[eotid].data = NULL;
241
+ atomic_dec(&t->eotids_in_use);
180242 }
181243
182244 int cxgb4_alloc_atid(struct tid_info *t, void *data);
....@@ -213,9 +275,14 @@
213275 u32 tid; /* to store tid */
214276 };
215277
278
+struct chcr_ktls {
279
+ refcount_t ktls_refcount;
280
+};
281
+
216282 struct ch_filter_specification;
217283
218
-int cxgb4_get_free_ftid(struct net_device *dev, int family);
284
+int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
285
+ u32 tc_prio);
219286 int __cxgb4_set_filter(struct net_device *dev, int filter_id,
220287 struct ch_filter_specification *fs,
221288 struct filter_ctx *ctx);
....@@ -240,7 +307,9 @@
240307 CXGB4_ULD_ISCSI,
241308 CXGB4_ULD_ISCSIT,
242309 CXGB4_ULD_CRYPTO,
310
+ CXGB4_ULD_IPSEC,
243311 CXGB4_ULD_TLS,
312
+ CXGB4_ULD_KTLS,
244313 CXGB4_ULD_MAX
245314 };
246315
....@@ -271,6 +340,7 @@
271340 CXGB4_CONTROL_DB_DROP,
272341 };
273342
343
+struct adapter;
274344 struct pci_dev;
275345 struct l2t_data;
276346 struct net_device;
....@@ -295,7 +365,35 @@
295365 struct cxgb4_range ocq;
296366 struct cxgb4_range key;
297367 unsigned int ncrypto_fc;
368
+ struct cxgb4_range ppod_edram;
298369 };
370
+
371
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
372
+struct ch_ktls_port_stats_debug {
373
+ atomic64_t ktls_tx_connection_open;
374
+ atomic64_t ktls_tx_connection_fail;
375
+ atomic64_t ktls_tx_connection_close;
376
+ atomic64_t ktls_tx_encrypted_packets;
377
+ atomic64_t ktls_tx_encrypted_bytes;
378
+ atomic64_t ktls_tx_ctx;
379
+ atomic64_t ktls_tx_ooo;
380
+ atomic64_t ktls_tx_skip_no_sync_data;
381
+ atomic64_t ktls_tx_drop_no_sync_data;
382
+ atomic64_t ktls_tx_drop_bypass_req;
383
+};
384
+
385
+struct ch_ktls_stats_debug {
386
+ struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
387
+ atomic64_t ktls_tx_send_records;
388
+ atomic64_t ktls_tx_end_pkts;
389
+ atomic64_t ktls_tx_start_pkts;
390
+ atomic64_t ktls_tx_middle_pkts;
391
+ atomic64_t ktls_tx_retransmit_pkts;
392
+ atomic64_t ktls_tx_complete_pkts;
393
+ atomic64_t ktls_tx_trimmed_pkts;
394
+ atomic64_t ktls_tx_fallback;
395
+};
396
+#endif
299397
300398 struct chcr_stats_debug {
301399 atomic_t cipher_rqst;
....@@ -304,11 +402,16 @@
304402 atomic_t complete;
305403 atomic_t error;
306404 atomic_t fallback;
307
- atomic_t ipsec_cnt;
308405 atomic_t tls_pdu_tx;
309406 atomic_t tls_pdu_rx;
310407 atomic_t tls_key;
311408 };
409
+
410
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
411
+struct ch_ipsec_stats_debug {
412
+ atomic_t ipsec_cnt;
413
+};
414
+#endif
312415
313416 #define OCQ_WIN_OFFSET(pdev, vres) \
314417 (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
....@@ -339,6 +442,7 @@
339442 unsigned int cclk_ps; /* Core clock period in psec */
340443 unsigned short udb_density; /* # of user DB/page */
341444 unsigned short ucq_density; /* # of user CQs/page */
445
+ unsigned int sge_host_page_size; /* SGE host page size */
342446 unsigned short filt_mode; /* filter optional components */
343447 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
344448 /* scheduler queue */
....@@ -385,9 +489,21 @@
385489 struct napi_struct *napi);
386490 void (*lro_flush)(struct t4_lro_mgr *);
387491 int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
492
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
493
+ const struct tlsdev_ops *tlsdev_ops;
494
+#endif
495
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
496
+ const struct xfrmdev_ops *xfrmdev_ops;
497
+#endif
388498 };
389499
390
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
500
+static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
501
+{
502
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
503
+}
504
+
505
+void cxgb4_uld_enable(struct adapter *adap);
506
+void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
391507 int cxgb4_unregister_uld(enum cxgb4_uld type);
392508 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
393509 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
....@@ -395,6 +511,7 @@
395511 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
396512 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
397513 unsigned int cxgb4_port_chan(const struct net_device *dev);
514
+unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
398515 unsigned int cxgb4_port_viid(const struct net_device *dev);
399516 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
400517 unsigned int cxgb4_port_idx(const struct net_device *dev);