| .. | .. |
|---|
| 13 | 13 | #define FDATA_SIZE 32 |
|---|
| 14 | 14 | /* Base destination port for the solicited requests */ |
|---|
| 15 | 15 | #define SOLICIT_BASE_DPORT 256 |
|---|
| 16 | | -#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL |
|---|
| 17 | 16 | |
|---|
| 18 | 17 | #define REQ_NOT_POSTED 1 |
|---|
| 19 | 18 | #define REQ_BACKLOG 2 |
|---|
| .. | .. |
|---|
| 52 | 51 | return index; |
|---|
| 53 | 52 | } |
|---|
| 54 | 53 | |
|---|
| 55 | | -/** |
|---|
| 56 | | - * dma_free_sglist - unmap and free the sg lists. |
|---|
| 57 | | - * @ndev: N5 device |
|---|
| 58 | | - * @sgtbl: SG table |
|---|
| 59 | | - */ |
|---|
| 60 | 54 | static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) |
|---|
| 61 | 55 | { |
|---|
| 62 | 56 | struct nitrox_device *ndev = sr->ndev; |
|---|
| 63 | 57 | struct device *dev = DEV(ndev); |
|---|
| 64 | | - struct nitrox_sglist *sglist; |
|---|
| 65 | 58 | |
|---|
| 66 | | - /* unmap in sgbuf */ |
|---|
| 67 | | - sglist = sr->in.sglist; |
|---|
| 68 | | - if (!sglist) |
|---|
| 69 | | - goto out_unmap; |
|---|
| 70 | 59 | |
|---|
| 71 | | - /* unmap iv */ |
|---|
| 72 | | - dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); |
|---|
| 73 | | - /* unmpa src sglist */ |
|---|
| 74 | | - dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); |
|---|
| 75 | | - /* unamp gather component */ |
|---|
| 76 | | - dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); |
|---|
| 77 | | - kfree(sr->in.sglist); |
|---|
| 60 | + dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); |
|---|
| 61 | + dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, |
|---|
| 62 | + DMA_TO_DEVICE); |
|---|
| 78 | 63 | kfree(sr->in.sgcomp); |
|---|
| 79 | | - sr->in.sglist = NULL; |
|---|
| 80 | | - sr->in.buf = NULL; |
|---|
| 81 | | - sr->in.map_bufs_cnt = 0; |
|---|
| 64 | + sr->in.sg = NULL; |
|---|
| 65 | + sr->in.sgmap_cnt = 0; |
|---|
| 82 | 66 | |
|---|
| 83 | | -out_unmap: |
|---|
| 84 | | - /* unmap out sgbuf */ |
|---|
| 85 | | - sglist = sr->out.sglist; |
|---|
| 86 | | - if (!sglist) |
|---|
| 87 | | - return; |
|---|
| 88 | | - |
|---|
| 89 | | - /* unmap orh */ |
|---|
| 90 | | - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); |
|---|
| 91 | | - |
|---|
| 92 | | - /* unmap dst sglist */ |
|---|
| 93 | | - if (!sr->inplace) { |
|---|
| 94 | | - dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), |
|---|
| 95 | | - sr->out.dir); |
|---|
| 96 | | - } |
|---|
| 97 | | - /* unmap completion */ |
|---|
| 98 | | - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); |
|---|
| 99 | | - |
|---|
| 100 | | - /* unmap scatter component */ |
|---|
| 101 | | - dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); |
|---|
| 102 | | - kfree(sr->out.sglist); |
|---|
| 67 | + dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, |
|---|
| 68 | + DMA_BIDIRECTIONAL); |
|---|
| 69 | + dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, |
|---|
| 70 | + DMA_TO_DEVICE); |
|---|
| 103 | 71 | kfree(sr->out.sgcomp); |
|---|
| 104 | | - sr->out.sglist = NULL; |
|---|
| 105 | | - sr->out.buf = NULL; |
|---|
| 106 | | - sr->out.map_bufs_cnt = 0; |
|---|
| 72 | + sr->out.sg = NULL; |
|---|
| 73 | + sr->out.sgmap_cnt = 0; |
|---|
| 107 | 74 | } |
|---|
| 108 | 75 | |
|---|
| 109 | 76 | static void softreq_destroy(struct nitrox_softreq *sr) |
|---|
| .. | .. |
|---|
| 116 | 83 | * create_sg_component - create SG componets for N5 device. |
|---|
| 117 | 84 | * @sr: Request structure |
|---|
| 118 | 85 | * @sgtbl: SG table |
|---|
| 119 | | - * @nr_comp: total number of components required |
|---|
| 86 | + * @map_nents: number of dma mapped entries |
|---|
| 120 | 87 | * |
|---|
| 121 | 88 | * Component structure |
|---|
| 122 | 89 | * |
|---|
| .. | .. |
|---|
| 140 | 107 | { |
|---|
| 141 | 108 | struct nitrox_device *ndev = sr->ndev; |
|---|
| 142 | 109 | struct nitrox_sgcomp *sgcomp; |
|---|
| 143 | | - struct nitrox_sglist *sglist; |
|---|
| 110 | + struct scatterlist *sg; |
|---|
| 144 | 111 | dma_addr_t dma; |
|---|
| 145 | 112 | size_t sz_comp; |
|---|
| 146 | 113 | int i, j, nr_sgcomp; |
|---|
| .. | .. |
|---|
| 154 | 121 | return -ENOMEM; |
|---|
| 155 | 122 | |
|---|
| 156 | 123 | sgtbl->sgcomp = sgcomp; |
|---|
| 157 | | - sgtbl->nr_sgcomp = nr_sgcomp; |
|---|
| 158 | 124 | |
|---|
| 159 | | - sglist = sgtbl->sglist; |
|---|
| 125 | + sg = sgtbl->sg; |
|---|
| 160 | 126 | /* populate device sg component */ |
|---|
| 161 | 127 | for (i = 0; i < nr_sgcomp; i++) { |
|---|
| 162 | | - for (j = 0; j < 4; j++) { |
|---|
| 163 | | - sgcomp->len[j] = cpu_to_be16(sglist->len); |
|---|
| 164 | | - sgcomp->dma[j] = cpu_to_be64(sglist->dma); |
|---|
| 165 | | - sglist++; |
|---|
| 128 | + for (j = 0; j < 4 && sg; j++) { |
|---|
| 129 | + sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg)); |
|---|
| 130 | + sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg)); |
|---|
| 131 | + sg = sg_next(sg); |
|---|
| 166 | 132 | } |
|---|
| 167 | | - sgcomp++; |
|---|
| 168 | 133 | } |
|---|
| 169 | 134 | /* map the device sg component */ |
|---|
| 170 | 135 | dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); |
|---|
| .. | .. |
|---|
| 174 | 139 | return -ENOMEM; |
|---|
| 175 | 140 | } |
|---|
| 176 | 141 | |
|---|
| 177 | | - sgtbl->dma = dma; |
|---|
| 178 | | - sgtbl->len = sz_comp; |
|---|
| 142 | + sgtbl->sgcomp_dma = dma; |
|---|
| 143 | + sgtbl->sgcomp_len = sz_comp; |
|---|
| 179 | 144 | |
|---|
| 180 | 145 | return 0; |
|---|
| 181 | 146 | } |
|---|
| .. | .. |
|---|
| 193 | 158 | { |
|---|
| 194 | 159 | struct device *dev = DEV(sr->ndev); |
|---|
| 195 | 160 | struct scatterlist *sg = req->src; |
|---|
| 196 | | - struct nitrox_sglist *glist; |
|---|
| 197 | 161 | int i, nents, ret = 0; |
|---|
| 198 | | - dma_addr_t dma; |
|---|
| 199 | | - size_t sz; |
|---|
| 200 | 162 | |
|---|
| 201 | | - nents = sg_nents(req->src); |
|---|
| 163 | + nents = dma_map_sg(dev, req->src, sg_nents(req->src), |
|---|
| 164 | + DMA_BIDIRECTIONAL); |
|---|
| 165 | + if (!nents) |
|---|
| 166 | + return -EINVAL; |
|---|
| 202 | 167 | |
|---|
| 203 | | - /* creater gather list IV and src entries */ |
|---|
| 204 | | - sz = roundup((1 + nents), 4) * sizeof(*glist); |
|---|
| 205 | | - glist = kzalloc(sz, sr->gfp); |
|---|
| 206 | | - if (!glist) |
|---|
| 207 | | - return -ENOMEM; |
|---|
| 168 | + for_each_sg(req->src, sg, nents, i) |
|---|
| 169 | + sr->in.total_bytes += sg_dma_len(sg); |
|---|
| 208 | 170 | |
|---|
| 209 | | - sr->in.sglist = glist; |
|---|
| 210 | | - /* map IV */ |
|---|
| 211 | | - dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); |
|---|
| 212 | | - if (dma_mapping_error(dev, dma)) { |
|---|
| 213 | | - ret = -EINVAL; |
|---|
| 214 | | - goto iv_map_err; |
|---|
| 215 | | - } |
|---|
| 216 | | - |
|---|
| 217 | | - sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
|---|
| 218 | | - /* map src entries */ |
|---|
| 219 | | - nents = dma_map_sg(dev, req->src, nents, sr->in.dir); |
|---|
| 220 | | - if (!nents) { |
|---|
| 221 | | - ret = -EINVAL; |
|---|
| 222 | | - goto src_map_err; |
|---|
| 223 | | - } |
|---|
| 224 | | - sr->in.buf = req->src; |
|---|
| 225 | | - |
|---|
| 226 | | - /* store the mappings */ |
|---|
| 227 | | - glist->len = req->ivsize; |
|---|
| 228 | | - glist->dma = dma; |
|---|
| 229 | | - glist++; |
|---|
| 230 | | - sr->in.total_bytes += req->ivsize; |
|---|
| 231 | | - |
|---|
| 232 | | - for_each_sg(req->src, sg, nents, i) { |
|---|
| 233 | | - glist->len = sg_dma_len(sg); |
|---|
| 234 | | - glist->dma = sg_dma_address(sg); |
|---|
| 235 | | - sr->in.total_bytes += glist->len; |
|---|
| 236 | | - glist++; |
|---|
| 237 | | - } |
|---|
| 238 | | - /* roundup map count to align with entires in sg component */ |
|---|
| 239 | | - sr->in.map_bufs_cnt = (1 + nents); |
|---|
| 240 | | - |
|---|
| 241 | | - /* create NITROX gather component */ |
|---|
| 242 | | - ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); |
|---|
| 171 | + sr->in.sg = req->src; |
|---|
| 172 | + sr->in.sgmap_cnt = nents; |
|---|
| 173 | + ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt); |
|---|
| 243 | 174 | if (ret) |
|---|
| 244 | 175 | goto incomp_err; |
|---|
| 245 | 176 | |
|---|
| 246 | 177 | return 0; |
|---|
| 247 | 178 | |
|---|
| 248 | 179 | incomp_err: |
|---|
| 249 | | - dma_unmap_sg(dev, req->src, nents, sr->in.dir); |
|---|
| 250 | | - sr->in.map_bufs_cnt = 0; |
|---|
| 251 | | -src_map_err: |
|---|
| 252 | | - dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); |
|---|
| 253 | | -iv_map_err: |
|---|
| 254 | | - kfree(sr->in.sglist); |
|---|
| 255 | | - sr->in.sglist = NULL; |
|---|
| 180 | + dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); |
|---|
| 181 | + sr->in.sgmap_cnt = 0; |
|---|
| 256 | 182 | return ret; |
|---|
| 257 | 183 | } |
|---|
| 258 | 184 | |
|---|
| .. | .. |
|---|
| 260 | 186 | struct se_crypto_request *req) |
|---|
| 261 | 187 | { |
|---|
| 262 | 188 | struct device *dev = DEV(sr->ndev); |
|---|
| 263 | | - struct nitrox_sglist *glist = sr->in.sglist; |
|---|
| 264 | | - struct nitrox_sglist *slist; |
|---|
| 265 | | - struct scatterlist *sg; |
|---|
| 266 | | - int i, nents, map_bufs_cnt, ret = 0; |
|---|
| 267 | | - size_t sz; |
|---|
| 189 | + int nents, ret = 0; |
|---|
| 268 | 190 | |
|---|
| 269 | | - nents = sg_nents(req->dst); |
|---|
| 191 | + nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), |
|---|
| 192 | + DMA_BIDIRECTIONAL); |
|---|
| 193 | + if (!nents) |
|---|
| 194 | + return -EINVAL; |
|---|
| 270 | 195 | |
|---|
| 271 | | - /* create scatter list ORH, IV, dst entries and Completion header */ |
|---|
| 272 | | - sz = roundup((3 + nents), 4) * sizeof(*slist); |
|---|
| 273 | | - slist = kzalloc(sz, sr->gfp); |
|---|
| 274 | | - if (!slist) |
|---|
| 275 | | - return -ENOMEM; |
|---|
| 276 | | - |
|---|
| 277 | | - sr->out.sglist = slist; |
|---|
| 278 | | - sr->out.dir = DMA_BIDIRECTIONAL; |
|---|
| 279 | | - /* map ORH */ |
|---|
| 280 | | - sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, |
|---|
| 281 | | - sr->out.dir); |
|---|
| 282 | | - if (dma_mapping_error(dev, sr->resp.orh_dma)) { |
|---|
| 283 | | - ret = -EINVAL; |
|---|
| 284 | | - goto orh_map_err; |
|---|
| 285 | | - } |
|---|
| 286 | | - |
|---|
| 287 | | - /* map completion */ |
|---|
| 288 | | - sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, |
|---|
| 289 | | - COMP_HLEN, sr->out.dir); |
|---|
| 290 | | - if (dma_mapping_error(dev, sr->resp.completion_dma)) { |
|---|
| 291 | | - ret = -EINVAL; |
|---|
| 292 | | - goto compl_map_err; |
|---|
| 293 | | - } |
|---|
| 294 | | - |
|---|
| 295 | | - sr->inplace = (req->src == req->dst) ? true : false; |
|---|
| 296 | | - /* out place */ |
|---|
| 297 | | - if (!sr->inplace) { |
|---|
| 298 | | - nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); |
|---|
| 299 | | - if (!nents) { |
|---|
| 300 | | - ret = -EINVAL; |
|---|
| 301 | | - goto dst_map_err; |
|---|
| 302 | | - } |
|---|
| 303 | | - } |
|---|
| 304 | | - sr->out.buf = req->dst; |
|---|
| 305 | | - |
|---|
| 306 | | - /* store the mappings */ |
|---|
| 307 | | - /* orh */ |
|---|
| 308 | | - slist->len = ORH_HLEN; |
|---|
| 309 | | - slist->dma = sr->resp.orh_dma; |
|---|
| 310 | | - slist++; |
|---|
| 311 | | - |
|---|
| 312 | | - /* copy the glist mappings */ |
|---|
| 313 | | - if (sr->inplace) { |
|---|
| 314 | | - nents = sr->in.map_bufs_cnt - 1; |
|---|
| 315 | | - map_bufs_cnt = sr->in.map_bufs_cnt; |
|---|
| 316 | | - while (map_bufs_cnt--) { |
|---|
| 317 | | - slist->len = glist->len; |
|---|
| 318 | | - slist->dma = glist->dma; |
|---|
| 319 | | - slist++; |
|---|
| 320 | | - glist++; |
|---|
| 321 | | - } |
|---|
| 322 | | - } else { |
|---|
| 323 | | - /* copy iv mapping */ |
|---|
| 324 | | - slist->len = glist->len; |
|---|
| 325 | | - slist->dma = glist->dma; |
|---|
| 326 | | - slist++; |
|---|
| 327 | | - /* copy remaining maps */ |
|---|
| 328 | | - for_each_sg(req->dst, sg, nents, i) { |
|---|
| 329 | | - slist->len = sg_dma_len(sg); |
|---|
| 330 | | - slist->dma = sg_dma_address(sg); |
|---|
| 331 | | - slist++; |
|---|
| 332 | | - } |
|---|
| 333 | | - } |
|---|
| 334 | | - |
|---|
| 335 | | - /* completion */ |
|---|
| 336 | | - slist->len = COMP_HLEN; |
|---|
| 337 | | - slist->dma = sr->resp.completion_dma; |
|---|
| 338 | | - |
|---|
| 339 | | - sr->out.map_bufs_cnt = (3 + nents); |
|---|
| 340 | | - |
|---|
| 341 | | - ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); |
|---|
| 196 | + sr->out.sg = req->dst; |
|---|
| 197 | + sr->out.sgmap_cnt = nents; |
|---|
| 198 | + ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt); |
|---|
| 342 | 199 | if (ret) |
|---|
| 343 | 200 | goto outcomp_map_err; |
|---|
| 344 | 201 | |
|---|
| 345 | 202 | return 0; |
|---|
| 346 | 203 | |
|---|
| 347 | 204 | outcomp_map_err: |
|---|
| 348 | | - if (!sr->inplace) |
|---|
| 349 | | - dma_unmap_sg(dev, req->dst, nents, sr->out.dir); |
|---|
| 350 | | - sr->out.map_bufs_cnt = 0; |
|---|
| 351 | | - sr->out.buf = NULL; |
|---|
| 352 | | -dst_map_err: |
|---|
| 353 | | - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); |
|---|
| 354 | | - sr->resp.completion_dma = 0; |
|---|
| 355 | | -compl_map_err: |
|---|
| 356 | | - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); |
|---|
| 357 | | - sr->resp.orh_dma = 0; |
|---|
| 358 | | -orh_map_err: |
|---|
| 359 | | - kfree(sr->out.sglist); |
|---|
| 360 | | - sr->out.sglist = NULL; |
|---|
| 205 | + dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); |
|---|
| 206 | + sr->out.sgmap_cnt = 0; |
|---|
| 207 | + sr->out.sg = NULL; |
|---|
| 361 | 208 | return ret; |
|---|
| 362 | 209 | } |
|---|
| 363 | 210 | |
|---|
| .. | .. |
|---|
| 382 | 229 | { |
|---|
| 383 | 230 | INIT_LIST_HEAD(&sr->backlog); |
|---|
| 384 | 231 | |
|---|
| 385 | | - spin_lock_bh(&cmdq->backlog_lock); |
|---|
| 232 | + spin_lock_bh(&cmdq->backlog_qlock); |
|---|
| 386 | 233 | list_add_tail(&sr->backlog, &cmdq->backlog_head); |
|---|
| 387 | 234 | atomic_inc(&cmdq->backlog_count); |
|---|
| 388 | 235 | atomic_set(&sr->status, REQ_BACKLOG); |
|---|
| 389 | | - spin_unlock_bh(&cmdq->backlog_lock); |
|---|
| 236 | + spin_unlock_bh(&cmdq->backlog_qlock); |
|---|
| 390 | 237 | } |
|---|
| 391 | 238 | |
|---|
| 392 | 239 | static inline void response_list_add(struct nitrox_softreq *sr, |
|---|
| .. | .. |
|---|
| 394 | 241 | { |
|---|
| 395 | 242 | INIT_LIST_HEAD(&sr->response); |
|---|
| 396 | 243 | |
|---|
| 397 | | - spin_lock_bh(&cmdq->response_lock); |
|---|
| 244 | + spin_lock_bh(&cmdq->resp_qlock); |
|---|
| 398 | 245 | list_add_tail(&sr->response, &cmdq->response_head); |
|---|
| 399 | | - spin_unlock_bh(&cmdq->response_lock); |
|---|
| 246 | + spin_unlock_bh(&cmdq->resp_qlock); |
|---|
| 400 | 247 | } |
|---|
| 401 | 248 | |
|---|
| 402 | 249 | static inline void response_list_del(struct nitrox_softreq *sr, |
|---|
| 403 | 250 | struct nitrox_cmdq *cmdq) |
|---|
| 404 | 251 | { |
|---|
| 405 | | - spin_lock_bh(&cmdq->response_lock); |
|---|
| 252 | + spin_lock_bh(&cmdq->resp_qlock); |
|---|
| 406 | 253 | list_del(&sr->response); |
|---|
| 407 | | - spin_unlock_bh(&cmdq->response_lock); |
|---|
| 254 | + spin_unlock_bh(&cmdq->resp_qlock); |
|---|
| 408 | 255 | } |
|---|
| 409 | 256 | |
|---|
| 410 | 257 | static struct nitrox_softreq * |
|---|
| .. | .. |
|---|
| 422 | 269 | smp_mb__after_atomic(); |
|---|
| 423 | 270 | return true; |
|---|
| 424 | 271 | } |
|---|
| 272 | + /* sync with other cpus */ |
|---|
| 273 | + smp_mb__after_atomic(); |
|---|
| 425 | 274 | return false; |
|---|
| 426 | 275 | } |
|---|
| 427 | 276 | |
|---|
| .. | .. |
|---|
| 439 | 288 | int idx; |
|---|
| 440 | 289 | u8 *ent; |
|---|
| 441 | 290 | |
|---|
| 442 | | - spin_lock_bh(&cmdq->cmdq_lock); |
|---|
| 291 | + spin_lock_bh(&cmdq->cmd_qlock); |
|---|
| 443 | 292 | |
|---|
| 444 | 293 | idx = cmdq->write_idx; |
|---|
| 445 | 294 | /* copy the instruction */ |
|---|
| 446 | | - ent = cmdq->head + (idx * cmdq->instr_size); |
|---|
| 295 | + ent = cmdq->base + (idx * cmdq->instr_size); |
|---|
| 447 | 296 | memcpy(ent, &sr->instr, cmdq->instr_size); |
|---|
| 448 | 297 | |
|---|
| 449 | 298 | atomic_set(&sr->status, REQ_POSTED); |
|---|
| .. | .. |
|---|
| 454 | 303 | |
|---|
| 455 | 304 | /* Ring doorbell with count 1 */ |
|---|
| 456 | 305 | writeq(1, cmdq->dbell_csr_addr); |
|---|
| 457 | | - /* orders the doorbell rings */ |
|---|
| 458 | | - mmiowb(); |
|---|
| 459 | 306 | |
|---|
| 460 | 307 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); |
|---|
| 461 | 308 | |
|---|
| 462 | | - spin_unlock_bh(&cmdq->cmdq_lock); |
|---|
| 309 | + spin_unlock_bh(&cmdq->cmd_qlock); |
|---|
| 310 | + |
|---|
| 311 | + /* increment the posted command count */ |
|---|
| 312 | + atomic64_inc(&ndev->stats.posted); |
|---|
| 463 | 313 | } |
|---|
| 464 | 314 | |
|---|
| 465 | 315 | static int post_backlog_cmds(struct nitrox_cmdq *cmdq) |
|---|
| .. | .. |
|---|
| 471 | 321 | if (!atomic_read(&cmdq->backlog_count)) |
|---|
| 472 | 322 | return 0; |
|---|
| 473 | 323 | |
|---|
| 474 | | - spin_lock_bh(&cmdq->backlog_lock); |
|---|
| 324 | + spin_lock_bh(&cmdq->backlog_qlock); |
|---|
| 475 | 325 | |
|---|
| 476 | 326 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { |
|---|
| 477 | | - struct skcipher_request *skreq; |
|---|
| 478 | | - |
|---|
| 479 | 327 | /* submit until space available */ |
|---|
| 480 | 328 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
|---|
| 481 | 329 | ret = -ENOSPC; |
|---|
| .. | .. |
|---|
| 487 | 335 | /* sync with other cpus */ |
|---|
| 488 | 336 | smp_mb__after_atomic(); |
|---|
| 489 | 337 | |
|---|
| 490 | | - skreq = sr->skreq; |
|---|
| 491 | 338 | /* post the command */ |
|---|
| 492 | 339 | post_se_instr(sr, cmdq); |
|---|
| 493 | | - |
|---|
| 494 | | - /* backlog requests are posted, wakeup with -EINPROGRESS */ |
|---|
| 495 | | - skcipher_request_complete(skreq, -EINPROGRESS); |
|---|
| 496 | 340 | } |
|---|
| 497 | | - spin_unlock_bh(&cmdq->backlog_lock); |
|---|
| 341 | + spin_unlock_bh(&cmdq->backlog_qlock); |
|---|
| 498 | 342 | |
|---|
| 499 | 343 | return ret; |
|---|
| 500 | 344 | } |
|---|
| .. | .. |
|---|
| 508 | 352 | post_backlog_cmds(cmdq); |
|---|
| 509 | 353 | |
|---|
| 510 | 354 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
|---|
| 511 | | - if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
|---|
| 355 | + if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
|---|
| 356 | + /* increment drop count */ |
|---|
| 357 | + atomic64_inc(&ndev->stats.dropped); |
|---|
| 512 | 358 | return -ENOSPC; |
|---|
| 359 | + } |
|---|
| 513 | 360 | /* add to backlog list */ |
|---|
| 514 | 361 | backlog_list_add(sr, cmdq); |
|---|
| 515 | | - return -EBUSY; |
|---|
| 362 | + return -EINPROGRESS; |
|---|
| 516 | 363 | } |
|---|
| 517 | 364 | post_se_instr(sr, cmdq); |
|---|
| 518 | 365 | |
|---|
| .. | .. |
|---|
| 529 | 376 | int nitrox_process_se_request(struct nitrox_device *ndev, |
|---|
| 530 | 377 | struct se_crypto_request *req, |
|---|
| 531 | 378 | completion_t callback, |
|---|
| 532 | | - struct skcipher_request *skreq) |
|---|
| 379 | + void *cb_arg) |
|---|
| 533 | 380 | { |
|---|
| 534 | 381 | struct nitrox_softreq *sr; |
|---|
| 535 | 382 | dma_addr_t ctx_handle = 0; |
|---|
| .. | .. |
|---|
| 546 | 393 | sr->flags = req->flags; |
|---|
| 547 | 394 | sr->gfp = req->gfp; |
|---|
| 548 | 395 | sr->callback = callback; |
|---|
| 549 | | - sr->skreq = skreq; |
|---|
| 396 | + sr->cb_arg = cb_arg; |
|---|
| 550 | 397 | |
|---|
| 551 | 398 | atomic_set(&sr->status, REQ_NOT_POSTED); |
|---|
| 552 | 399 | |
|---|
| 553 | | - WRITE_ONCE(sr->resp.orh, PENDING_SIG); |
|---|
| 554 | | - WRITE_ONCE(sr->resp.completion, PENDING_SIG); |
|---|
| 400 | + sr->resp.orh = req->orh; |
|---|
| 401 | + sr->resp.completion = req->comp; |
|---|
| 555 | 402 | |
|---|
| 556 | 403 | ret = softreq_map_iobuf(sr, req); |
|---|
| 557 | 404 | if (ret) { |
|---|
| .. | .. |
|---|
| 572 | 419 | /* select the queue */ |
|---|
| 573 | 420 | qno = smp_processor_id() % ndev->nr_queues; |
|---|
| 574 | 421 | |
|---|
| 575 | | - sr->cmdq = &ndev->pkt_cmdqs[qno]; |
|---|
| 422 | + sr->cmdq = &ndev->pkt_inq[qno]; |
|---|
| 576 | 423 | |
|---|
| 577 | 424 | /* |
|---|
| 578 | 425 | * 64-Byte Instruction Format |
|---|
| .. | .. |
|---|
| 592 | 439 | |
|---|
| 593 | 440 | /* fill the packet instruction */ |
|---|
| 594 | 441 | /* word 0 */ |
|---|
| 595 | | - sr->instr.dptr0 = cpu_to_be64(sr->in.dma); |
|---|
| 442 | + sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma); |
|---|
| 596 | 443 | |
|---|
| 597 | 444 | /* word 1 */ |
|---|
| 598 | 445 | sr->instr.ih.value = 0; |
|---|
| 599 | 446 | sr->instr.ih.s.g = 1; |
|---|
| 600 | | - sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; |
|---|
| 601 | | - sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; |
|---|
| 447 | + sr->instr.ih.s.gsz = sr->in.sgmap_cnt; |
|---|
| 448 | + sr->instr.ih.s.ssz = sr->out.sgmap_cnt; |
|---|
| 602 | 449 | sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); |
|---|
| 603 | 450 | sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; |
|---|
| 604 | 451 | sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); |
|---|
| .. | .. |
|---|
| 620 | 467 | |
|---|
| 621 | 468 | /* word 4 */ |
|---|
| 622 | 469 | sr->instr.slc.value[0] = 0; |
|---|
| 623 | | - sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; |
|---|
| 470 | + sr->instr.slc.s.ssz = sr->out.sgmap_cnt; |
|---|
| 624 | 471 | sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); |
|---|
| 625 | 472 | |
|---|
| 626 | 473 | /* word 5 */ |
|---|
| 627 | | - sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); |
|---|
| 474 | + sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma); |
|---|
| 628 | 475 | |
|---|
| 629 | 476 | /* |
|---|
| 630 | 477 | * No conversion for front data, |
|---|
| .. | .. |
|---|
| 658 | 505 | post_backlog_cmds(cmdq); |
|---|
| 659 | 506 | } |
|---|
| 660 | 507 | |
|---|
| 508 | +static bool sr_completed(struct nitrox_softreq *sr) |
|---|
| 509 | +{ |
|---|
| 510 | + u64 orh = READ_ONCE(*sr->resp.orh); |
|---|
| 511 | + unsigned long timeout = jiffies + msecs_to_jiffies(1); |
|---|
| 512 | + |
|---|
| 513 | + if ((orh != PENDING_SIG) && (orh & 0xff)) |
|---|
| 514 | + return true; |
|---|
| 515 | + |
|---|
| 516 | + while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) { |
|---|
| 517 | + if (time_after(jiffies, timeout)) { |
|---|
| 518 | + pr_err("comp not done\n"); |
|---|
| 519 | + return false; |
|---|
| 520 | + } |
|---|
| 521 | + } |
|---|
| 522 | + |
|---|
| 523 | + return true; |
|---|
| 524 | +} |
|---|
| 525 | + |
|---|
| 661 | 526 | /** |
|---|
| 662 | 527 | * process_request_list - process completed requests |
|---|
| 663 | 528 | * @ndev: N5 device |
|---|
| .. | .. |
|---|
| 669 | 534 | { |
|---|
| 670 | 535 | struct nitrox_device *ndev = cmdq->ndev; |
|---|
| 671 | 536 | struct nitrox_softreq *sr; |
|---|
| 672 | | - struct skcipher_request *skreq; |
|---|
| 673 | | - completion_t callback; |
|---|
| 674 | 537 | int req_completed = 0, err = 0, budget; |
|---|
| 538 | + completion_t callback; |
|---|
| 539 | + void *cb_arg; |
|---|
| 675 | 540 | |
|---|
| 676 | 541 | /* check all pending requests */ |
|---|
| 677 | 542 | budget = atomic_read(&cmdq->pending_count); |
|---|
| .. | .. |
|---|
| 685 | 550 | break; |
|---|
| 686 | 551 | |
|---|
| 687 | 552 | /* check orh and completion bytes updates */ |
|---|
| 688 | | - if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { |
|---|
| 553 | + if (!sr_completed(sr)) { |
|---|
| 689 | 554 | /* request not completed, check for timeout */ |
|---|
| 690 | 555 | if (!cmd_timeout(sr->tstamp, ndev->timeout)) |
|---|
| 691 | 556 | break; |
|---|
| 692 | 557 | dev_err_ratelimited(DEV(ndev), |
|---|
| 693 | 558 | "Request timeout, orh 0x%016llx\n", |
|---|
| 694 | | - READ_ONCE(sr->resp.orh)); |
|---|
| 559 | + READ_ONCE(*sr->resp.orh)); |
|---|
| 695 | 560 | } |
|---|
| 696 | 561 | atomic_dec(&cmdq->pending_count); |
|---|
| 562 | + atomic64_inc(&ndev->stats.completed); |
|---|
| 697 | 563 | /* sync with other cpus */ |
|---|
| 698 | 564 | smp_mb__after_atomic(); |
|---|
| 699 | 565 | /* remove from response list */ |
|---|
| 700 | 566 | response_list_del(sr, cmdq); |
|---|
| 701 | | - |
|---|
| 702 | | - callback = sr->callback; |
|---|
| 703 | | - skreq = sr->skreq; |
|---|
| 704 | | - |
|---|
| 705 | 567 | /* ORH error code */ |
|---|
| 706 | | - err = READ_ONCE(sr->resp.orh) & 0xff; |
|---|
| 568 | + err = READ_ONCE(*sr->resp.orh) & 0xff; |
|---|
| 569 | + callback = sr->callback; |
|---|
| 570 | + cb_arg = sr->cb_arg; |
|---|
| 707 | 571 | softreq_destroy(sr); |
|---|
| 708 | | - |
|---|
| 709 | 572 | if (callback) |
|---|
| 710 | | - callback(skreq, err); |
|---|
| 573 | + callback(cb_arg, err); |
|---|
| 711 | 574 | |
|---|
| 712 | 575 | req_completed++; |
|---|
| 713 | 576 | } |
|---|
| 714 | 577 | } |
|---|
| 715 | 578 | |
|---|
| 716 | 579 | /** |
|---|
| 717 | | - * pkt_slc_resp_handler - post processing of SE responses |
|---|
| 580 | + * pkt_slc_resp_tasklet - post processing of SE responses |
|---|
| 718 | 581 | */ |
|---|
| 719 | | -void pkt_slc_resp_handler(unsigned long data) |
|---|
| 582 | +void pkt_slc_resp_tasklet(unsigned long data) |
|---|
| 720 | 583 | { |
|---|
| 721 | | - struct bh_data *bh = (void *)(uintptr_t)(data); |
|---|
| 722 | | - struct nitrox_cmdq *cmdq = bh->cmdq; |
|---|
| 723 | | - union nps_pkt_slc_cnts pkt_slc_cnts; |
|---|
| 584 | + struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data); |
|---|
| 585 | + struct nitrox_cmdq *cmdq = qvec->cmdq; |
|---|
| 586 | + union nps_pkt_slc_cnts slc_cnts; |
|---|
| 724 | 587 | |
|---|
| 725 | 588 | /* read completion count */ |
|---|
| 726 | | - pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr); |
|---|
| 589 | + slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); |
|---|
| 727 | 590 | /* resend the interrupt if more work to do */ |
|---|
| 728 | | - pkt_slc_cnts.s.resend = 1; |
|---|
| 591 | + slc_cnts.s.resend = 1; |
|---|
| 729 | 592 | |
|---|
| 730 | 593 | process_response_list(cmdq); |
|---|
| 731 | 594 | |
|---|
| .. | .. |
|---|
| 733 | 596 | * clear the interrupt with resend bit enabled, |
|---|
| 734 | 597 | * MSI-X interrupt generates if Completion count > Threshold |
|---|
| 735 | 598 | */ |
|---|
| 736 | | - writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr); |
|---|
| 737 | | - /* order the writes */ |
|---|
| 738 | | - mmiowb(); |
|---|
| 599 | + writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr); |
|---|
| 739 | 600 | |
|---|
| 740 | 601 | if (atomic_read(&cmdq->backlog_count)) |
|---|
| 741 | 602 | schedule_work(&cmdq->backlog_qflush); |
|---|