forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
....@@ -13,7 +13,6 @@
1313 #define FDATA_SIZE 32
1414 /* Base destination port for the solicited requests */
1515 #define SOLICIT_BASE_DPORT 256
16
-#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
1716
1817 #define REQ_NOT_POSTED 1
1918 #define REQ_BACKLOG 2
....@@ -52,58 +51,26 @@
5251 return index;
5352 }
5453
55
-/**
56
- * dma_free_sglist - unmap and free the sg lists.
57
- * @ndev: N5 device
58
- * @sgtbl: SG table
59
- */
6054 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
6155 {
6256 struct nitrox_device *ndev = sr->ndev;
6357 struct device *dev = DEV(ndev);
64
- struct nitrox_sglist *sglist;
6558
66
- /* unmap in sgbuf */
67
- sglist = sr->in.sglist;
68
- if (!sglist)
69
- goto out_unmap;
7059
71
- /* unmap iv */
72
- dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
73
- /* unmpa src sglist */
74
- dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
75
- /* unamp gather component */
76
- dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
77
- kfree(sr->in.sglist);
60
+ dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
61
+ dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
62
+ DMA_TO_DEVICE);
7863 kfree(sr->in.sgcomp);
79
- sr->in.sglist = NULL;
80
- sr->in.buf = NULL;
81
- sr->in.map_bufs_cnt = 0;
64
+ sr->in.sg = NULL;
65
+ sr->in.sgmap_cnt = 0;
8266
83
-out_unmap:
84
- /* unmap out sgbuf */
85
- sglist = sr->out.sglist;
86
- if (!sglist)
87
- return;
88
-
89
- /* unmap orh */
90
- dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
91
-
92
- /* unmap dst sglist */
93
- if (!sr->inplace) {
94
- dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
95
- sr->out.dir);
96
- }
97
- /* unmap completion */
98
- dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
99
-
100
- /* unmap scatter component */
101
- dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
102
- kfree(sr->out.sglist);
67
+ dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
68
+ DMA_BIDIRECTIONAL);
69
+ dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
70
+ DMA_TO_DEVICE);
10371 kfree(sr->out.sgcomp);
104
- sr->out.sglist = NULL;
105
- sr->out.buf = NULL;
106
- sr->out.map_bufs_cnt = 0;
72
+ sr->out.sg = NULL;
73
+ sr->out.sgmap_cnt = 0;
10774 }
10875
10976 static void softreq_destroy(struct nitrox_softreq *sr)
....@@ -116,7 +83,7 @@
11683 * create_sg_component - create SG componets for N5 device.
11784 * @sr: Request structure
11885 * @sgtbl: SG table
119
- * @nr_comp: total number of components required
86
+ * @map_nents: number of dma mapped entries
12087 *
12188 * Component structure
12289 *
....@@ -140,7 +107,7 @@
140107 {
141108 struct nitrox_device *ndev = sr->ndev;
142109 struct nitrox_sgcomp *sgcomp;
143
- struct nitrox_sglist *sglist;
110
+ struct scatterlist *sg;
144111 dma_addr_t dma;
145112 size_t sz_comp;
146113 int i, j, nr_sgcomp;
....@@ -154,17 +121,15 @@
154121 return -ENOMEM;
155122
156123 sgtbl->sgcomp = sgcomp;
157
- sgtbl->nr_sgcomp = nr_sgcomp;
158124
159
- sglist = sgtbl->sglist;
125
+ sg = sgtbl->sg;
160126 /* populate device sg component */
161127 for (i = 0; i < nr_sgcomp; i++) {
162
- for (j = 0; j < 4; j++) {
163
- sgcomp->len[j] = cpu_to_be16(sglist->len);
164
- sgcomp->dma[j] = cpu_to_be64(sglist->dma);
165
- sglist++;
128
+ for (j = 0; j < 4 && sg; j++) {
129
+ sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
130
+ sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
131
+ sg = sg_next(sg);
166132 }
167
- sgcomp++;
168133 }
169134 /* map the device sg component */
170135 dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
....@@ -174,8 +139,8 @@
174139 return -ENOMEM;
175140 }
176141
177
- sgtbl->dma = dma;
178
- sgtbl->len = sz_comp;
142
+ sgtbl->sgcomp_dma = dma;
143
+ sgtbl->sgcomp_len = sz_comp;
179144
180145 return 0;
181146 }
....@@ -193,66 +158,27 @@
193158 {
194159 struct device *dev = DEV(sr->ndev);
195160 struct scatterlist *sg = req->src;
196
- struct nitrox_sglist *glist;
197161 int i, nents, ret = 0;
198
- dma_addr_t dma;
199
- size_t sz;
200162
201
- nents = sg_nents(req->src);
163
+ nents = dma_map_sg(dev, req->src, sg_nents(req->src),
164
+ DMA_BIDIRECTIONAL);
165
+ if (!nents)
166
+ return -EINVAL;
202167
203
- /* creater gather list IV and src entries */
204
- sz = roundup((1 + nents), 4) * sizeof(*glist);
205
- glist = kzalloc(sz, sr->gfp);
206
- if (!glist)
207
- return -ENOMEM;
168
+ for_each_sg(req->src, sg, nents, i)
169
+ sr->in.total_bytes += sg_dma_len(sg);
208170
209
- sr->in.sglist = glist;
210
- /* map IV */
211
- dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
212
- if (dma_mapping_error(dev, dma)) {
213
- ret = -EINVAL;
214
- goto iv_map_err;
215
- }
216
-
217
- sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
218
- /* map src entries */
219
- nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
220
- if (!nents) {
221
- ret = -EINVAL;
222
- goto src_map_err;
223
- }
224
- sr->in.buf = req->src;
225
-
226
- /* store the mappings */
227
- glist->len = req->ivsize;
228
- glist->dma = dma;
229
- glist++;
230
- sr->in.total_bytes += req->ivsize;
231
-
232
- for_each_sg(req->src, sg, nents, i) {
233
- glist->len = sg_dma_len(sg);
234
- glist->dma = sg_dma_address(sg);
235
- sr->in.total_bytes += glist->len;
236
- glist++;
237
- }
238
- /* roundup map count to align with entires in sg component */
239
- sr->in.map_bufs_cnt = (1 + nents);
240
-
241
- /* create NITROX gather component */
242
- ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
171
+ sr->in.sg = req->src;
172
+ sr->in.sgmap_cnt = nents;
173
+ ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
243174 if (ret)
244175 goto incomp_err;
245176
246177 return 0;
247178
248179 incomp_err:
249
- dma_unmap_sg(dev, req->src, nents, sr->in.dir);
250
- sr->in.map_bufs_cnt = 0;
251
-src_map_err:
252
- dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
253
-iv_map_err:
254
- kfree(sr->in.sglist);
255
- sr->in.sglist = NULL;
180
+ dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
181
+ sr->in.sgmap_cnt = 0;
256182 return ret;
257183 }
258184
....@@ -260,104 +186,25 @@
260186 struct se_crypto_request *req)
261187 {
262188 struct device *dev = DEV(sr->ndev);
263
- struct nitrox_sglist *glist = sr->in.sglist;
264
- struct nitrox_sglist *slist;
265
- struct scatterlist *sg;
266
- int i, nents, map_bufs_cnt, ret = 0;
267
- size_t sz;
189
+ int nents, ret = 0;
268190
269
- nents = sg_nents(req->dst);
191
+ nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
192
+ DMA_BIDIRECTIONAL);
193
+ if (!nents)
194
+ return -EINVAL;
270195
271
- /* create scatter list ORH, IV, dst entries and Completion header */
272
- sz = roundup((3 + nents), 4) * sizeof(*slist);
273
- slist = kzalloc(sz, sr->gfp);
274
- if (!slist)
275
- return -ENOMEM;
276
-
277
- sr->out.sglist = slist;
278
- sr->out.dir = DMA_BIDIRECTIONAL;
279
- /* map ORH */
280
- sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
281
- sr->out.dir);
282
- if (dma_mapping_error(dev, sr->resp.orh_dma)) {
283
- ret = -EINVAL;
284
- goto orh_map_err;
285
- }
286
-
287
- /* map completion */
288
- sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
289
- COMP_HLEN, sr->out.dir);
290
- if (dma_mapping_error(dev, sr->resp.completion_dma)) {
291
- ret = -EINVAL;
292
- goto compl_map_err;
293
- }
294
-
295
- sr->inplace = (req->src == req->dst) ? true : false;
296
- /* out place */
297
- if (!sr->inplace) {
298
- nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
299
- if (!nents) {
300
- ret = -EINVAL;
301
- goto dst_map_err;
302
- }
303
- }
304
- sr->out.buf = req->dst;
305
-
306
- /* store the mappings */
307
- /* orh */
308
- slist->len = ORH_HLEN;
309
- slist->dma = sr->resp.orh_dma;
310
- slist++;
311
-
312
- /* copy the glist mappings */
313
- if (sr->inplace) {
314
- nents = sr->in.map_bufs_cnt - 1;
315
- map_bufs_cnt = sr->in.map_bufs_cnt;
316
- while (map_bufs_cnt--) {
317
- slist->len = glist->len;
318
- slist->dma = glist->dma;
319
- slist++;
320
- glist++;
321
- }
322
- } else {
323
- /* copy iv mapping */
324
- slist->len = glist->len;
325
- slist->dma = glist->dma;
326
- slist++;
327
- /* copy remaining maps */
328
- for_each_sg(req->dst, sg, nents, i) {
329
- slist->len = sg_dma_len(sg);
330
- slist->dma = sg_dma_address(sg);
331
- slist++;
332
- }
333
- }
334
-
335
- /* completion */
336
- slist->len = COMP_HLEN;
337
- slist->dma = sr->resp.completion_dma;
338
-
339
- sr->out.map_bufs_cnt = (3 + nents);
340
-
341
- ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
196
+ sr->out.sg = req->dst;
197
+ sr->out.sgmap_cnt = nents;
198
+ ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
342199 if (ret)
343200 goto outcomp_map_err;
344201
345202 return 0;
346203
347204 outcomp_map_err:
348
- if (!sr->inplace)
349
- dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
350
- sr->out.map_bufs_cnt = 0;
351
- sr->out.buf = NULL;
352
-dst_map_err:
353
- dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
354
- sr->resp.completion_dma = 0;
355
-compl_map_err:
356
- dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
357
- sr->resp.orh_dma = 0;
358
-orh_map_err:
359
- kfree(sr->out.sglist);
360
- sr->out.sglist = NULL;
205
+ dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
206
+ sr->out.sgmap_cnt = 0;
207
+ sr->out.sg = NULL;
361208 return ret;
362209 }
363210
....@@ -382,11 +229,11 @@
382229 {
383230 INIT_LIST_HEAD(&sr->backlog);
384231
385
- spin_lock_bh(&cmdq->backlog_lock);
232
+ spin_lock_bh(&cmdq->backlog_qlock);
386233 list_add_tail(&sr->backlog, &cmdq->backlog_head);
387234 atomic_inc(&cmdq->backlog_count);
388235 atomic_set(&sr->status, REQ_BACKLOG);
389
- spin_unlock_bh(&cmdq->backlog_lock);
236
+ spin_unlock_bh(&cmdq->backlog_qlock);
390237 }
391238
392239 static inline void response_list_add(struct nitrox_softreq *sr,
....@@ -394,17 +241,17 @@
394241 {
395242 INIT_LIST_HEAD(&sr->response);
396243
397
- spin_lock_bh(&cmdq->response_lock);
244
+ spin_lock_bh(&cmdq->resp_qlock);
398245 list_add_tail(&sr->response, &cmdq->response_head);
399
- spin_unlock_bh(&cmdq->response_lock);
246
+ spin_unlock_bh(&cmdq->resp_qlock);
400247 }
401248
402249 static inline void response_list_del(struct nitrox_softreq *sr,
403250 struct nitrox_cmdq *cmdq)
404251 {
405
- spin_lock_bh(&cmdq->response_lock);
252
+ spin_lock_bh(&cmdq->resp_qlock);
406253 list_del(&sr->response);
407
- spin_unlock_bh(&cmdq->response_lock);
254
+ spin_unlock_bh(&cmdq->resp_qlock);
408255 }
409256
410257 static struct nitrox_softreq *
....@@ -422,6 +269,8 @@
422269 smp_mb__after_atomic();
423270 return true;
424271 }
272
+ /* sync with other cpus */
273
+ smp_mb__after_atomic();
425274 return false;
426275 }
427276
....@@ -439,11 +288,11 @@
439288 int idx;
440289 u8 *ent;
441290
442
- spin_lock_bh(&cmdq->cmdq_lock);
291
+ spin_lock_bh(&cmdq->cmd_qlock);
443292
444293 idx = cmdq->write_idx;
445294 /* copy the instruction */
446
- ent = cmdq->head + (idx * cmdq->instr_size);
295
+ ent = cmdq->base + (idx * cmdq->instr_size);
447296 memcpy(ent, &sr->instr, cmdq->instr_size);
448297
449298 atomic_set(&sr->status, REQ_POSTED);
....@@ -454,12 +303,13 @@
454303
455304 /* Ring doorbell with count 1 */
456305 writeq(1, cmdq->dbell_csr_addr);
457
- /* orders the doorbell rings */
458
- mmiowb();
459306
460307 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
461308
462
- spin_unlock_bh(&cmdq->cmdq_lock);
309
+ spin_unlock_bh(&cmdq->cmd_qlock);
310
+
311
+ /* increment the posted command count */
312
+ atomic64_inc(&ndev->stats.posted);
463313 }
464314
465315 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
....@@ -471,11 +321,9 @@
471321 if (!atomic_read(&cmdq->backlog_count))
472322 return 0;
473323
474
- spin_lock_bh(&cmdq->backlog_lock);
324
+ spin_lock_bh(&cmdq->backlog_qlock);
475325
476326 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
477
- struct skcipher_request *skreq;
478
-
479327 /* submit until space available */
480328 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
481329 ret = -ENOSPC;
....@@ -487,14 +335,10 @@
487335 /* sync with other cpus */
488336 smp_mb__after_atomic();
489337
490
- skreq = sr->skreq;
491338 /* post the command */
492339 post_se_instr(sr, cmdq);
493
-
494
- /* backlog requests are posted, wakeup with -EINPROGRESS */
495
- skcipher_request_complete(skreq, -EINPROGRESS);
496340 }
497
- spin_unlock_bh(&cmdq->backlog_lock);
341
+ spin_unlock_bh(&cmdq->backlog_qlock);
498342
499343 return ret;
500344 }
....@@ -508,11 +352,14 @@
508352 post_backlog_cmds(cmdq);
509353
510354 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
511
- if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
355
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
356
+ /* increment drop count */
357
+ atomic64_inc(&ndev->stats.dropped);
512358 return -ENOSPC;
359
+ }
513360 /* add to backlog list */
514361 backlog_list_add(sr, cmdq);
515
- return -EBUSY;
362
+ return -EINPROGRESS;
516363 }
517364 post_se_instr(sr, cmdq);
518365
....@@ -529,7 +376,7 @@
529376 int nitrox_process_se_request(struct nitrox_device *ndev,
530377 struct se_crypto_request *req,
531378 completion_t callback,
532
- struct skcipher_request *skreq)
379
+ void *cb_arg)
533380 {
534381 struct nitrox_softreq *sr;
535382 dma_addr_t ctx_handle = 0;
....@@ -546,12 +393,12 @@
546393 sr->flags = req->flags;
547394 sr->gfp = req->gfp;
548395 sr->callback = callback;
549
- sr->skreq = skreq;
396
+ sr->cb_arg = cb_arg;
550397
551398 atomic_set(&sr->status, REQ_NOT_POSTED);
552399
553
- WRITE_ONCE(sr->resp.orh, PENDING_SIG);
554
- WRITE_ONCE(sr->resp.completion, PENDING_SIG);
400
+ sr->resp.orh = req->orh;
401
+ sr->resp.completion = req->comp;
555402
556403 ret = softreq_map_iobuf(sr, req);
557404 if (ret) {
....@@ -572,7 +419,7 @@
572419 /* select the queue */
573420 qno = smp_processor_id() % ndev->nr_queues;
574421
575
- sr->cmdq = &ndev->pkt_cmdqs[qno];
422
+ sr->cmdq = &ndev->pkt_inq[qno];
576423
577424 /*
578425 * 64-Byte Instruction Format
....@@ -592,13 +439,13 @@
592439
593440 /* fill the packet instruction */
594441 /* word 0 */
595
- sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
442
+ sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
596443
597444 /* word 1 */
598445 sr->instr.ih.value = 0;
599446 sr->instr.ih.s.g = 1;
600
- sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
601
- sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
447
+ sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
448
+ sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
602449 sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
603450 sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
604451 sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
....@@ -620,11 +467,11 @@
620467
621468 /* word 4 */
622469 sr->instr.slc.value[0] = 0;
623
- sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
470
+ sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
624471 sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
625472
626473 /* word 5 */
627
- sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
474
+ sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
628475
629476 /*
630477 * No conversion for front data,
....@@ -658,6 +505,24 @@
658505 post_backlog_cmds(cmdq);
659506 }
660507
508
+static bool sr_completed(struct nitrox_softreq *sr)
509
+{
510
+ u64 orh = READ_ONCE(*sr->resp.orh);
511
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
512
+
513
+ if ((orh != PENDING_SIG) && (orh & 0xff))
514
+ return true;
515
+
516
+ while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
517
+ if (time_after(jiffies, timeout)) {
518
+ pr_err("comp not done\n");
519
+ return false;
520
+ }
521
+ }
522
+
523
+ return true;
524
+}
525
+
661526 /**
662527 * process_request_list - process completed requests
663528 * @ndev: N5 device
....@@ -669,9 +534,9 @@
669534 {
670535 struct nitrox_device *ndev = cmdq->ndev;
671536 struct nitrox_softreq *sr;
672
- struct skcipher_request *skreq;
673
- completion_t callback;
674537 int req_completed = 0, err = 0, budget;
538
+ completion_t callback;
539
+ void *cb_arg;
675540
676541 /* check all pending requests */
677542 budget = atomic_read(&cmdq->pending_count);
....@@ -685,47 +550,45 @@
685550 break;
686551
687552 /* check orh and completion bytes updates */
688
- if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
553
+ if (!sr_completed(sr)) {
689554 /* request not completed, check for timeout */
690555 if (!cmd_timeout(sr->tstamp, ndev->timeout))
691556 break;
692557 dev_err_ratelimited(DEV(ndev),
693558 "Request timeout, orh 0x%016llx\n",
694
- READ_ONCE(sr->resp.orh));
559
+ READ_ONCE(*sr->resp.orh));
695560 }
696561 atomic_dec(&cmdq->pending_count);
562
+ atomic64_inc(&ndev->stats.completed);
697563 /* sync with other cpus */
698564 smp_mb__after_atomic();
699565 /* remove from response list */
700566 response_list_del(sr, cmdq);
701
-
702
- callback = sr->callback;
703
- skreq = sr->skreq;
704
-
705567 /* ORH error code */
706
- err = READ_ONCE(sr->resp.orh) & 0xff;
568
+ err = READ_ONCE(*sr->resp.orh) & 0xff;
569
+ callback = sr->callback;
570
+ cb_arg = sr->cb_arg;
707571 softreq_destroy(sr);
708
-
709572 if (callback)
710
- callback(skreq, err);
573
+ callback(cb_arg, err);
711574
712575 req_completed++;
713576 }
714577 }
715578
716579 /**
717
- * pkt_slc_resp_handler - post processing of SE responses
580
+ * pkt_slc_resp_tasklet - post processing of SE responses
718581 */
719
-void pkt_slc_resp_handler(unsigned long data)
582
+void pkt_slc_resp_tasklet(unsigned long data)
720583 {
721
- struct bh_data *bh = (void *)(uintptr_t)(data);
722
- struct nitrox_cmdq *cmdq = bh->cmdq;
723
- union nps_pkt_slc_cnts pkt_slc_cnts;
584
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
585
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
586
+ union nps_pkt_slc_cnts slc_cnts;
724587
725588 /* read completion count */
726
- pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
589
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
727590 /* resend the interrupt if more work to do */
728
- pkt_slc_cnts.s.resend = 1;
591
+ slc_cnts.s.resend = 1;
729592
730593 process_response_list(cmdq);
731594
....@@ -733,9 +596,7 @@
733596 * clear the interrupt with resend bit enabled,
734597 * MSI-X interrupt generates if Completion count > Threshold
735598 */
736
- writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
737
- /* order the writes */
738
- mmiowb();
599
+ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
739600
740601 if (atomic_read(&cmdq->backlog_count))
741602 schedule_work(&cmdq->backlog_qflush);