.. | .. |
---|
26 | 26 | #include "chcr_core.h" |
---|
27 | 27 | #include "cxgb4_uld.h" |
---|
28 | 28 | |
---|
29 | | -static LIST_HEAD(uld_ctx_list); |
---|
30 | | -static DEFINE_MUTEX(dev_mutex); |
---|
31 | | -static atomic_t dev_count; |
---|
32 | | -static struct uld_ctx *ctx_rr; |
---|
| 29 | +static struct chcr_driver_data drv_data; |
---|
33 | 30 | |
---|
34 | | -typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); |
---|
35 | | -static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); |
---|
| 31 | +typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input); |
---|
| 32 | +static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); |
---|
36 | 33 | static void *chcr_uld_add(const struct cxgb4_lld_info *lld); |
---|
37 | 34 | static int chcr_uld_state_change(void *handle, enum cxgb4_state state); |
---|
38 | 35 | |
---|
.. | .. |
---|
43 | 40 | static struct cxgb4_uld_info chcr_uld_info = { |
---|
44 | 41 | .name = DRV_MODULE_NAME, |
---|
45 | 42 | .nrxq = MAX_ULD_QSETS, |
---|
46 | | - .ntxq = MAX_ULD_QSETS, |
---|
| 43 | + /* Max ntxq will be derived from fw config file*/ |
---|
47 | 44 | .rxq_size = 1024, |
---|
48 | 45 | .add = chcr_uld_add, |
---|
49 | 46 | .state_change = chcr_uld_state_change, |
---|
50 | 47 | .rx_handler = chcr_uld_rx_handler, |
---|
51 | | -#ifdef CONFIG_CHELSIO_IPSEC_INLINE |
---|
52 | | - .tx_handler = chcr_uld_tx_handler, |
---|
53 | | -#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
54 | 48 | }; |
---|
| 49 | + |
---|
| 50 | +static void detach_work_fn(struct work_struct *work) |
---|
| 51 | +{ |
---|
| 52 | + struct chcr_dev *dev; |
---|
| 53 | + |
---|
| 54 | + dev = container_of(work, struct chcr_dev, detach_work.work); |
---|
| 55 | + |
---|
| 56 | + if (atomic_read(&dev->inflight)) { |
---|
| 57 | + dev->wqretry--; |
---|
| 58 | + if (dev->wqretry) { |
---|
| 59 | + pr_debug("Request Inflight Count %d\n", |
---|
| 60 | + atomic_read(&dev->inflight)); |
---|
| 61 | + |
---|
| 62 | + schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); |
---|
| 63 | + } else { |
---|
| 64 | + WARN(1, "CHCR:%d request Still Pending\n", |
---|
| 65 | + atomic_read(&dev->inflight)); |
---|
| 66 | + complete(&dev->detach_comp); |
---|
| 67 | + } |
---|
| 68 | + } else { |
---|
| 69 | + complete(&dev->detach_comp); |
---|
| 70 | + } |
---|
| 71 | +} |
---|
55 | 72 | |
---|
56 | 73 | struct uld_ctx *assign_chcr_device(void) |
---|
57 | 74 | { |
---|
.. | .. |
---|
63 | 80 | * Although One session must use the same device to |
---|
64 | 81 | * maintain request-response ordering. |
---|
65 | 82 | */ |
---|
66 | | - mutex_lock(&dev_mutex); |
---|
67 | | - if (!list_empty(&uld_ctx_list)) { |
---|
68 | | - u_ctx = ctx_rr; |
---|
69 | | - if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) |
---|
70 | | - ctx_rr = list_first_entry(&uld_ctx_list, |
---|
71 | | - struct uld_ctx, |
---|
72 | | - entry); |
---|
| 83 | + mutex_lock(&drv_data.drv_mutex); |
---|
| 84 | + if (!list_empty(&drv_data.act_dev)) { |
---|
| 85 | + u_ctx = drv_data.last_dev; |
---|
| 86 | + if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) |
---|
| 87 | + drv_data.last_dev = list_first_entry(&drv_data.act_dev, |
---|
| 88 | + struct uld_ctx, entry); |
---|
73 | 89 | else |
---|
74 | | - ctx_rr = list_next_entry(ctx_rr, entry); |
---|
| 90 | + drv_data.last_dev = |
---|
| 91 | + list_next_entry(drv_data.last_dev, entry); |
---|
75 | 92 | } |
---|
76 | | - mutex_unlock(&dev_mutex); |
---|
| 93 | + mutex_unlock(&drv_data.drv_mutex); |
---|
77 | 94 | return u_ctx; |
---|
78 | 95 | } |
---|
79 | 96 | |
---|
80 | | -static int chcr_dev_add(struct uld_ctx *u_ctx) |
---|
| 97 | +static void chcr_dev_add(struct uld_ctx *u_ctx) |
---|
81 | 98 | { |
---|
82 | 99 | struct chcr_dev *dev; |
---|
83 | 100 | |
---|
84 | | - dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
---|
85 | | - if (!dev) |
---|
86 | | - return -ENXIO; |
---|
87 | | - |
---|
88 | | - spin_lock_init(&dev->lock_chcr_dev); |
---|
89 | | - u_ctx->dev = dev; |
---|
90 | | - dev->u_ctx = u_ctx; |
---|
91 | | - atomic_inc(&dev_count); |
---|
92 | | - mutex_lock(&dev_mutex); |
---|
93 | | - list_add_tail(&u_ctx->entry, &uld_ctx_list); |
---|
94 | | - if (!ctx_rr) |
---|
95 | | - ctx_rr = u_ctx; |
---|
96 | | - mutex_unlock(&dev_mutex); |
---|
97 | | - return 0; |
---|
| 101 | + dev = &u_ctx->dev; |
---|
| 102 | + dev->state = CHCR_ATTACH; |
---|
| 103 | + atomic_set(&dev->inflight, 0); |
---|
| 104 | + mutex_lock(&drv_data.drv_mutex); |
---|
| 105 | + list_move(&u_ctx->entry, &drv_data.act_dev); |
---|
| 106 | + if (!drv_data.last_dev) |
---|
| 107 | + drv_data.last_dev = u_ctx; |
---|
| 108 | + mutex_unlock(&drv_data.drv_mutex); |
---|
98 | 109 | } |
---|
99 | 110 | |
---|
100 | | -static int chcr_dev_remove(struct uld_ctx *u_ctx) |
---|
| 111 | +static void chcr_dev_init(struct uld_ctx *u_ctx) |
---|
101 | 112 | { |
---|
102 | | - if (ctx_rr == u_ctx) { |
---|
103 | | - if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) |
---|
104 | | - ctx_rr = list_first_entry(&uld_ctx_list, |
---|
105 | | - struct uld_ctx, |
---|
106 | | - entry); |
---|
| 113 | + struct chcr_dev *dev; |
---|
| 114 | + |
---|
| 115 | + dev = &u_ctx->dev; |
---|
| 116 | + spin_lock_init(&dev->lock_chcr_dev); |
---|
| 117 | + INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); |
---|
| 118 | + init_completion(&dev->detach_comp); |
---|
| 119 | + dev->state = CHCR_INIT; |
---|
| 120 | + dev->wqretry = WQ_RETRY; |
---|
| 121 | + atomic_inc(&drv_data.dev_count); |
---|
| 122 | + atomic_set(&dev->inflight, 0); |
---|
| 123 | + mutex_lock(&drv_data.drv_mutex); |
---|
| 124 | + list_add_tail(&u_ctx->entry, &drv_data.inact_dev); |
---|
| 125 | + mutex_unlock(&drv_data.drv_mutex); |
---|
| 126 | +} |
---|
| 127 | + |
---|
| 128 | +static int chcr_dev_move(struct uld_ctx *u_ctx) |
---|
| 129 | +{ |
---|
| 130 | + mutex_lock(&drv_data.drv_mutex); |
---|
| 131 | + if (drv_data.last_dev == u_ctx) { |
---|
| 132 | + if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) |
---|
| 133 | + drv_data.last_dev = list_first_entry(&drv_data.act_dev, |
---|
| 134 | + struct uld_ctx, entry); |
---|
107 | 135 | else |
---|
108 | | - ctx_rr = list_next_entry(ctx_rr, entry); |
---|
| 136 | + drv_data.last_dev = |
---|
| 137 | + list_next_entry(drv_data.last_dev, entry); |
---|
109 | 138 | } |
---|
110 | | - list_del(&u_ctx->entry); |
---|
111 | | - if (list_empty(&uld_ctx_list)) |
---|
112 | | - ctx_rr = NULL; |
---|
113 | | - kfree(u_ctx->dev); |
---|
114 | | - u_ctx->dev = NULL; |
---|
115 | | - atomic_dec(&dev_count); |
---|
| 139 | + list_move(&u_ctx->entry, &drv_data.inact_dev); |
---|
| 140 | + if (list_empty(&drv_data.act_dev)) |
---|
| 141 | + drv_data.last_dev = NULL; |
---|
| 142 | + atomic_dec(&drv_data.dev_count); |
---|
| 143 | + mutex_unlock(&drv_data.drv_mutex); |
---|
| 144 | + |
---|
116 | 145 | return 0; |
---|
117 | 146 | } |
---|
118 | 147 | |
---|
119 | | -static int cpl_fw6_pld_handler(struct chcr_dev *dev, |
---|
| 148 | +static int cpl_fw6_pld_handler(struct adapter *adap, |
---|
120 | 149 | unsigned char *input) |
---|
121 | 150 | { |
---|
122 | 151 | struct crypto_async_request *req; |
---|
123 | 152 | struct cpl_fw6_pld *fw6_pld; |
---|
124 | 153 | u32 ack_err_status = 0; |
---|
125 | 154 | int error_status = 0; |
---|
126 | | - struct adapter *adap = padap(dev); |
---|
127 | 155 | |
---|
128 | 156 | fw6_pld = (struct cpl_fw6_pld *)input; |
---|
129 | 157 | req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( |
---|
.. | .. |
---|
131 | 159 | |
---|
132 | 160 | ack_err_status = |
---|
133 | 161 | ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); |
---|
134 | | - if (ack_err_status) { |
---|
135 | | - if (CHK_MAC_ERR_BIT(ack_err_status) || |
---|
136 | | - CHK_PAD_ERR_BIT(ack_err_status)) |
---|
137 | | - error_status = -EBADMSG; |
---|
138 | | - atomic_inc(&adap->chcr_stats.error); |
---|
139 | | - } |
---|
| 162 | + if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) |
---|
| 163 | + error_status = -EBADMSG; |
---|
140 | 164 | /* call completion callback with failure status */ |
---|
141 | 165 | if (req) { |
---|
142 | 166 | error_status = chcr_handle_resp(req, input, error_status); |
---|
.. | .. |
---|
144 | 168 | pr_err("Incorrect request address from the firmware\n"); |
---|
145 | 169 | return -EFAULT; |
---|
146 | 170 | } |
---|
| 171 | + if (error_status) |
---|
| 172 | + atomic_inc(&adap->chcr_stats.error); |
---|
| 173 | + |
---|
147 | 174 | return 0; |
---|
148 | 175 | } |
---|
149 | 176 | |
---|
.. | .. |
---|
157 | 184 | struct uld_ctx *u_ctx; |
---|
158 | 185 | |
---|
159 | 186 | /* Create the device and add it in the device list */ |
---|
| 187 | + pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); |
---|
160 | 188 | if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) |
---|
161 | 189 | return ERR_PTR(-EOPNOTSUPP); |
---|
162 | 190 | |
---|
.. | .. |
---|
167 | 195 | goto out; |
---|
168 | 196 | } |
---|
169 | 197 | u_ctx->lldi = *lld; |
---|
170 | | -#ifdef CONFIG_CHELSIO_IPSEC_INLINE |
---|
171 | | - if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) |
---|
172 | | - chcr_add_xfrmops(lld); |
---|
173 | | -#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
| 198 | + chcr_dev_init(u_ctx); |
---|
174 | 199 | out: |
---|
175 | 200 | return u_ctx; |
---|
176 | 201 | } |
---|
.. | .. |
---|
179 | 204 | const struct pkt_gl *pgl) |
---|
180 | 205 | { |
---|
181 | 206 | struct uld_ctx *u_ctx = (struct uld_ctx *)handle; |
---|
182 | | - struct chcr_dev *dev = u_ctx->dev; |
---|
| 207 | + struct chcr_dev *dev = &u_ctx->dev; |
---|
| 208 | + struct adapter *adap = padap(dev); |
---|
183 | 209 | const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; |
---|
184 | 210 | |
---|
185 | | - if (rpl->opcode != CPL_FW6_PLD) { |
---|
186 | | - pr_err("Unsupported opcode\n"); |
---|
| 211 | + if (!work_handlers[rpl->opcode]) { |
---|
| 212 | + pr_err("Unsupported opcode %d received\n", rpl->opcode); |
---|
187 | 213 | return 0; |
---|
188 | 214 | } |
---|
189 | 215 | |
---|
190 | 216 | if (!pgl) |
---|
191 | | - work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]); |
---|
| 217 | + work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]); |
---|
192 | 218 | else |
---|
193 | | - work_handlers[rpl->opcode](dev, pgl->va); |
---|
| 219 | + work_handlers[rpl->opcode](adap, pgl->va); |
---|
194 | 220 | return 0; |
---|
195 | 221 | } |
---|
196 | 222 | |
---|
197 | | -#ifdef CONFIG_CHELSIO_IPSEC_INLINE |
---|
198 | | -int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) |
---|
| 223 | +static void chcr_detach_device(struct uld_ctx *u_ctx) |
---|
199 | 224 | { |
---|
200 | | - return chcr_ipsec_xmit(skb, dev); |
---|
| 225 | + struct chcr_dev *dev = &u_ctx->dev; |
---|
| 226 | + |
---|
| 227 | + if (dev->state == CHCR_DETACH) { |
---|
| 228 | + pr_debug("Detached Event received for already detach device\n"); |
---|
| 229 | + return; |
---|
| 230 | + } |
---|
| 231 | + dev->state = CHCR_DETACH; |
---|
| 232 | + if (atomic_read(&dev->inflight) != 0) { |
---|
| 233 | + schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); |
---|
| 234 | + wait_for_completion(&dev->detach_comp); |
---|
| 235 | + } |
---|
| 236 | + |
---|
| 237 | + // Move u_ctx to inactive_dev list |
---|
| 238 | + chcr_dev_move(u_ctx); |
---|
201 | 239 | } |
---|
202 | | -#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
203 | 240 | |
---|
204 | 241 | static int chcr_uld_state_change(void *handle, enum cxgb4_state state) |
---|
205 | 242 | { |
---|
.. | .. |
---|
208 | 245 | |
---|
209 | 246 | switch (state) { |
---|
210 | 247 | case CXGB4_STATE_UP: |
---|
211 | | - if (!u_ctx->dev) { |
---|
212 | | - ret = chcr_dev_add(u_ctx); |
---|
213 | | - if (ret != 0) |
---|
214 | | - return ret; |
---|
| 248 | + if (u_ctx->dev.state != CHCR_INIT) { |
---|
| 249 | + // ALready Initialised. |
---|
| 250 | + return 0; |
---|
215 | 251 | } |
---|
216 | | - if (atomic_read(&dev_count) == 1) |
---|
217 | | - ret = start_crypto(); |
---|
| 252 | + chcr_dev_add(u_ctx); |
---|
| 253 | + ret = start_crypto(); |
---|
218 | 254 | break; |
---|
219 | 255 | |
---|
220 | 256 | case CXGB4_STATE_DETACH: |
---|
221 | | - if (u_ctx->dev) { |
---|
222 | | - mutex_lock(&dev_mutex); |
---|
223 | | - chcr_dev_remove(u_ctx); |
---|
224 | | - mutex_unlock(&dev_mutex); |
---|
225 | | - } |
---|
226 | | - if (!atomic_read(&dev_count)) |
---|
| 257 | + chcr_detach_device(u_ctx); |
---|
| 258 | + if (!atomic_read(&drv_data.dev_count)) |
---|
227 | 259 | stop_crypto(); |
---|
228 | 260 | break; |
---|
229 | 261 | |
---|
.. | .. |
---|
237 | 269 | |
---|
238 | 270 | static int __init chcr_crypto_init(void) |
---|
239 | 271 | { |
---|
240 | | - if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) |
---|
241 | | - pr_err("ULD register fail: No chcr crypto support in cxgb4\n"); |
---|
| 272 | + INIT_LIST_HEAD(&drv_data.act_dev); |
---|
| 273 | + INIT_LIST_HEAD(&drv_data.inact_dev); |
---|
| 274 | + atomic_set(&drv_data.dev_count, 0); |
---|
| 275 | + mutex_init(&drv_data.drv_mutex); |
---|
| 276 | + drv_data.last_dev = NULL; |
---|
| 277 | + cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); |
---|
242 | 278 | |
---|
243 | 279 | return 0; |
---|
244 | 280 | } |
---|
.. | .. |
---|
246 | 282 | static void __exit chcr_crypto_exit(void) |
---|
247 | 283 | { |
---|
248 | 284 | struct uld_ctx *u_ctx, *tmp; |
---|
| 285 | + struct adapter *adap; |
---|
249 | 286 | |
---|
250 | | - if (atomic_read(&dev_count)) |
---|
251 | | - stop_crypto(); |
---|
252 | | - |
---|
| 287 | + stop_crypto(); |
---|
| 288 | + cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); |
---|
253 | 289 | /* Remove all devices from list */ |
---|
254 | | - mutex_lock(&dev_mutex); |
---|
255 | | - list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { |
---|
256 | | - if (u_ctx->dev) |
---|
257 | | - chcr_dev_remove(u_ctx); |
---|
| 290 | + mutex_lock(&drv_data.drv_mutex); |
---|
| 291 | + list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { |
---|
| 292 | + adap = padap(&u_ctx->dev); |
---|
| 293 | + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); |
---|
| 294 | + list_del(&u_ctx->entry); |
---|
258 | 295 | kfree(u_ctx); |
---|
259 | 296 | } |
---|
260 | | - mutex_unlock(&dev_mutex); |
---|
261 | | - cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); |
---|
| 297 | + list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { |
---|
| 298 | + adap = padap(&u_ctx->dev); |
---|
| 299 | + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); |
---|
| 300 | + list_del(&u_ctx->entry); |
---|
| 301 | + kfree(u_ctx); |
---|
| 302 | + } |
---|
| 303 | + mutex_unlock(&drv_data.drv_mutex); |
---|
262 | 304 | } |
---|
263 | 305 | |
---|
264 | 306 | module_init(chcr_crypto_init); |
---|