hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/chelsio/chcr_core.c
....@@ -26,13 +26,10 @@
2626 #include "chcr_core.h"
2727 #include "cxgb4_uld.h"
2828
29
-static LIST_HEAD(uld_ctx_list);
30
-static DEFINE_MUTEX(dev_mutex);
31
-static atomic_t dev_count;
32
-static struct uld_ctx *ctx_rr;
29
+static struct chcr_driver_data drv_data;
3330
34
-typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
35
-static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
31
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
32
+static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
3633 static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
3734 static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
3835
....@@ -43,15 +40,35 @@
4340 static struct cxgb4_uld_info chcr_uld_info = {
4441 .name = DRV_MODULE_NAME,
4542 .nrxq = MAX_ULD_QSETS,
46
- .ntxq = MAX_ULD_QSETS,
43
+ /* Max ntxq will be derived from fw config file*/
4744 .rxq_size = 1024,
4845 .add = chcr_uld_add,
4946 .state_change = chcr_uld_state_change,
5047 .rx_handler = chcr_uld_rx_handler,
51
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
52
- .tx_handler = chcr_uld_tx_handler,
53
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
5448 };
49
+
50
+static void detach_work_fn(struct work_struct *work)
51
+{
52
+ struct chcr_dev *dev;
53
+
54
+ dev = container_of(work, struct chcr_dev, detach_work.work);
55
+
56
+ if (atomic_read(&dev->inflight)) {
57
+ dev->wqretry--;
58
+ if (dev->wqretry) {
59
+ pr_debug("Request Inflight Count %d\n",
60
+ atomic_read(&dev->inflight));
61
+
62
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
63
+ } else {
64
+ WARN(1, "CHCR:%d request Still Pending\n",
65
+ atomic_read(&dev->inflight));
66
+ complete(&dev->detach_comp);
67
+ }
68
+ } else {
69
+ complete(&dev->detach_comp);
70
+ }
71
+}
5572
5673 struct uld_ctx *assign_chcr_device(void)
5774 {
....@@ -63,67 +80,78 @@
6380 * Although One session must use the same device to
6481 * maintain request-response ordering.
6582 */
66
- mutex_lock(&dev_mutex);
67
- if (!list_empty(&uld_ctx_list)) {
68
- u_ctx = ctx_rr;
69
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
70
- ctx_rr = list_first_entry(&uld_ctx_list,
71
- struct uld_ctx,
72
- entry);
83
+ mutex_lock(&drv_data.drv_mutex);
84
+ if (!list_empty(&drv_data.act_dev)) {
85
+ u_ctx = drv_data.last_dev;
86
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
87
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
88
+ struct uld_ctx, entry);
7389 else
74
- ctx_rr = list_next_entry(ctx_rr, entry);
90
+ drv_data.last_dev =
91
+ list_next_entry(drv_data.last_dev, entry);
7592 }
76
- mutex_unlock(&dev_mutex);
93
+ mutex_unlock(&drv_data.drv_mutex);
7794 return u_ctx;
7895 }
7996
80
-static int chcr_dev_add(struct uld_ctx *u_ctx)
97
+static void chcr_dev_add(struct uld_ctx *u_ctx)
8198 {
8299 struct chcr_dev *dev;
83100
84
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
85
- if (!dev)
86
- return -ENXIO;
87
-
88
- spin_lock_init(&dev->lock_chcr_dev);
89
- u_ctx->dev = dev;
90
- dev->u_ctx = u_ctx;
91
- atomic_inc(&dev_count);
92
- mutex_lock(&dev_mutex);
93
- list_add_tail(&u_ctx->entry, &uld_ctx_list);
94
- if (!ctx_rr)
95
- ctx_rr = u_ctx;
96
- mutex_unlock(&dev_mutex);
97
- return 0;
101
+ dev = &u_ctx->dev;
102
+ dev->state = CHCR_ATTACH;
103
+ atomic_set(&dev->inflight, 0);
104
+ mutex_lock(&drv_data.drv_mutex);
105
+ list_move(&u_ctx->entry, &drv_data.act_dev);
106
+ if (!drv_data.last_dev)
107
+ drv_data.last_dev = u_ctx;
108
+ mutex_unlock(&drv_data.drv_mutex);
98109 }
99110
100
-static int chcr_dev_remove(struct uld_ctx *u_ctx)
111
+static void chcr_dev_init(struct uld_ctx *u_ctx)
101112 {
102
- if (ctx_rr == u_ctx) {
103
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
104
- ctx_rr = list_first_entry(&uld_ctx_list,
105
- struct uld_ctx,
106
- entry);
113
+ struct chcr_dev *dev;
114
+
115
+ dev = &u_ctx->dev;
116
+ spin_lock_init(&dev->lock_chcr_dev);
117
+ INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
118
+ init_completion(&dev->detach_comp);
119
+ dev->state = CHCR_INIT;
120
+ dev->wqretry = WQ_RETRY;
121
+ atomic_inc(&drv_data.dev_count);
122
+ atomic_set(&dev->inflight, 0);
123
+ mutex_lock(&drv_data.drv_mutex);
124
+ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
125
+ mutex_unlock(&drv_data.drv_mutex);
126
+}
127
+
128
+static int chcr_dev_move(struct uld_ctx *u_ctx)
129
+{
130
+ mutex_lock(&drv_data.drv_mutex);
131
+ if (drv_data.last_dev == u_ctx) {
132
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
133
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
134
+ struct uld_ctx, entry);
107135 else
108
- ctx_rr = list_next_entry(ctx_rr, entry);
136
+ drv_data.last_dev =
137
+ list_next_entry(drv_data.last_dev, entry);
109138 }
110
- list_del(&u_ctx->entry);
111
- if (list_empty(&uld_ctx_list))
112
- ctx_rr = NULL;
113
- kfree(u_ctx->dev);
114
- u_ctx->dev = NULL;
115
- atomic_dec(&dev_count);
139
+ list_move(&u_ctx->entry, &drv_data.inact_dev);
140
+ if (list_empty(&drv_data.act_dev))
141
+ drv_data.last_dev = NULL;
142
+ atomic_dec(&drv_data.dev_count);
143
+ mutex_unlock(&drv_data.drv_mutex);
144
+
116145 return 0;
117146 }
118147
119
-static int cpl_fw6_pld_handler(struct chcr_dev *dev,
148
+static int cpl_fw6_pld_handler(struct adapter *adap,
120149 unsigned char *input)
121150 {
122151 struct crypto_async_request *req;
123152 struct cpl_fw6_pld *fw6_pld;
124153 u32 ack_err_status = 0;
125154 int error_status = 0;
126
- struct adapter *adap = padap(dev);
127155
128156 fw6_pld = (struct cpl_fw6_pld *)input;
129157 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
....@@ -131,12 +159,8 @@
131159
132160 ack_err_status =
133161 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
134
- if (ack_err_status) {
135
- if (CHK_MAC_ERR_BIT(ack_err_status) ||
136
- CHK_PAD_ERR_BIT(ack_err_status))
137
- error_status = -EBADMSG;
138
- atomic_inc(&adap->chcr_stats.error);
139
- }
162
+ if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
163
+ error_status = -EBADMSG;
140164 /* call completion callback with failure status */
141165 if (req) {
142166 error_status = chcr_handle_resp(req, input, error_status);
....@@ -144,6 +168,9 @@
144168 pr_err("Incorrect request address from the firmware\n");
145169 return -EFAULT;
146170 }
171
+ if (error_status)
172
+ atomic_inc(&adap->chcr_stats.error);
173
+
147174 return 0;
148175 }
149176
....@@ -157,6 +184,7 @@
157184 struct uld_ctx *u_ctx;
158185
159186 /* Create the device and add it in the device list */
187
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
160188 if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
161189 return ERR_PTR(-EOPNOTSUPP);
162190
....@@ -167,10 +195,7 @@
167195 goto out;
168196 }
169197 u_ctx->lldi = *lld;
170
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
171
- if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
172
- chcr_add_xfrmops(lld);
173
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
198
+ chcr_dev_init(u_ctx);
174199 out:
175200 return u_ctx;
176201 }
....@@ -179,27 +204,39 @@
179204 const struct pkt_gl *pgl)
180205 {
181206 struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
182
- struct chcr_dev *dev = u_ctx->dev;
207
+ struct chcr_dev *dev = &u_ctx->dev;
208
+ struct adapter *adap = padap(dev);
183209 const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
184210
185
- if (rpl->opcode != CPL_FW6_PLD) {
186
- pr_err("Unsupported opcode\n");
211
+ if (!work_handlers[rpl->opcode]) {
212
+ pr_err("Unsupported opcode %d received\n", rpl->opcode);
187213 return 0;
188214 }
189215
190216 if (!pgl)
191
- work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
217
+ work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
192218 else
193
- work_handlers[rpl->opcode](dev, pgl->va);
219
+ work_handlers[rpl->opcode](adap, pgl->va);
194220 return 0;
195221 }
196222
197
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
198
-int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
223
+static void chcr_detach_device(struct uld_ctx *u_ctx)
199224 {
200
- return chcr_ipsec_xmit(skb, dev);
225
+ struct chcr_dev *dev = &u_ctx->dev;
226
+
227
+ if (dev->state == CHCR_DETACH) {
228
+ pr_debug("Detached Event received for already detach device\n");
229
+ return;
230
+ }
231
+ dev->state = CHCR_DETACH;
232
+ if (atomic_read(&dev->inflight) != 0) {
233
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
234
+ wait_for_completion(&dev->detach_comp);
235
+ }
236
+
237
+ // Move u_ctx to inactive_dev list
238
+ chcr_dev_move(u_ctx);
201239 }
202
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
203240
204241 static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
205242 {
....@@ -208,22 +245,17 @@
208245
209246 switch (state) {
210247 case CXGB4_STATE_UP:
211
- if (!u_ctx->dev) {
212
- ret = chcr_dev_add(u_ctx);
213
- if (ret != 0)
214
- return ret;
248
+ if (u_ctx->dev.state != CHCR_INIT) {
249
+ // ALready Initialised.
250
+ return 0;
215251 }
216
- if (atomic_read(&dev_count) == 1)
217
- ret = start_crypto();
252
+ chcr_dev_add(u_ctx);
253
+ ret = start_crypto();
218254 break;
219255
220256 case CXGB4_STATE_DETACH:
221
- if (u_ctx->dev) {
222
- mutex_lock(&dev_mutex);
223
- chcr_dev_remove(u_ctx);
224
- mutex_unlock(&dev_mutex);
225
- }
226
- if (!atomic_read(&dev_count))
257
+ chcr_detach_device(u_ctx);
258
+ if (!atomic_read(&drv_data.dev_count))
227259 stop_crypto();
228260 break;
229261
....@@ -237,8 +269,12 @@
237269
238270 static int __init chcr_crypto_init(void)
239271 {
240
- if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
241
- pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
272
+ INIT_LIST_HEAD(&drv_data.act_dev);
273
+ INIT_LIST_HEAD(&drv_data.inact_dev);
274
+ atomic_set(&drv_data.dev_count, 0);
275
+ mutex_init(&drv_data.drv_mutex);
276
+ drv_data.last_dev = NULL;
277
+ cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
242278
243279 return 0;
244280 }
....@@ -246,19 +282,25 @@
246282 static void __exit chcr_crypto_exit(void)
247283 {
248284 struct uld_ctx *u_ctx, *tmp;
285
+ struct adapter *adap;
249286
250
- if (atomic_read(&dev_count))
251
- stop_crypto();
252
-
287
+ stop_crypto();
288
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
253289 /* Remove all devices from list */
254
- mutex_lock(&dev_mutex);
255
- list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
256
- if (u_ctx->dev)
257
- chcr_dev_remove(u_ctx);
290
+ mutex_lock(&drv_data.drv_mutex);
291
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
292
+ adap = padap(&u_ctx->dev);
293
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
294
+ list_del(&u_ctx->entry);
258295 kfree(u_ctx);
259296 }
260
- mutex_unlock(&dev_mutex);
261
- cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
297
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
298
+ adap = padap(&u_ctx->dev);
299
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
300
+ list_del(&u_ctx->entry);
301
+ kfree(u_ctx);
302
+ }
303
+ mutex_unlock(&drv_data.drv_mutex);
262304 }
263305
264306 module_init(chcr_crypto_init);