hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/cavium/nitrox/nitrox_lib.c
....@@ -17,30 +17,29 @@
1717
1818 #define CRYPTO_CTX_SIZE 256
1919
20
-/* command queue alignments */
21
-#define PKT_IN_ALIGN 16
20
+/* packet inuput ring alignments */
21
+#define PKTIN_Q_ALIGN_BYTES 16
22
+/* AQM Queue input alignments */
23
+#define AQM_Q_ALIGN_BYTES 32
2224
23
-static int cmdq_common_init(struct nitrox_cmdq *cmdq)
25
+static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
2426 {
2527 struct nitrox_device *ndev = cmdq->ndev;
26
- u32 qsize;
2728
28
- qsize = (ndev->qlen) * cmdq->instr_size;
29
- cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
30
- (qsize + PKT_IN_ALIGN),
31
- &cmdq->dma_unaligned,
32
- GFP_KERNEL);
33
- if (!cmdq->head_unaligned)
29
+ cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
30
+ cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
31
+ &cmdq->unalign_dma,
32
+ GFP_KERNEL);
33
+ if (!cmdq->unalign_base)
3434 return -ENOMEM;
3535
36
- cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
37
- cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
38
- cmdq->qsize = (qsize + PKT_IN_ALIGN);
36
+ cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
37
+ cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
3938 cmdq->write_idx = 0;
4039
41
- spin_lock_init(&cmdq->response_lock);
42
- spin_lock_init(&cmdq->cmdq_lock);
43
- spin_lock_init(&cmdq->backlog_lock);
40
+ spin_lock_init(&cmdq->cmd_qlock);
41
+ spin_lock_init(&cmdq->resp_qlock);
42
+ spin_lock_init(&cmdq->backlog_qlock);
4443
4544 INIT_LIST_HEAD(&cmdq->response_head);
4645 INIT_LIST_HEAD(&cmdq->backlog_head);
....@@ -51,68 +50,138 @@
5150 return 0;
5251 }
5352
54
-static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
53
+static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
5554 {
56
- struct nitrox_device *ndev = cmdq->ndev;
55
+ cmdq->write_idx = 0;
56
+ atomic_set(&cmdq->pending_count, 0);
57
+ atomic_set(&cmdq->backlog_count, 0);
58
+}
5759
60
+static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
61
+{
62
+ struct nitrox_device *ndev;
63
+
64
+ if (!cmdq)
65
+ return;
66
+
67
+ if (!cmdq->unalign_base)
68
+ return;
69
+
70
+ ndev = cmdq->ndev;
5871 cancel_work_sync(&cmdq->backlog_qflush);
5972
6073 dma_free_coherent(DEV(ndev), cmdq->qsize,
61
- cmdq->head_unaligned, cmdq->dma_unaligned);
62
-
63
- atomic_set(&cmdq->pending_count, 0);
64
- atomic_set(&cmdq->backlog_count, 0);
74
+ cmdq->unalign_base, cmdq->unalign_dma);
75
+ nitrox_cmdq_reset(cmdq);
6576
6677 cmdq->dbell_csr_addr = NULL;
67
- cmdq->head = NULL;
78
+ cmdq->compl_cnt_csr_addr = NULL;
79
+ cmdq->unalign_base = NULL;
80
+ cmdq->base = NULL;
81
+ cmdq->unalign_dma = 0;
6882 cmdq->dma = 0;
6983 cmdq->qsize = 0;
7084 cmdq->instr_size = 0;
7185 }
7286
73
-static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
87
+static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
7488 {
7589 int i;
7690
7791 for (i = 0; i < ndev->nr_queues; i++) {
78
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
79
-
80
- cmdq_common_cleanup(cmdq);
92
+ nitrox_cmdq_cleanup(ndev->aqmq[i]);
93
+ kfree_sensitive(ndev->aqmq[i]);
94
+ ndev->aqmq[i] = NULL;
8195 }
82
- kfree(ndev->pkt_cmdqs);
83
- ndev->pkt_cmdqs = NULL;
8496 }
8597
86
-static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
98
+static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
8799 {
88
- int i, err, size;
100
+ int i, err;
89101
90
- size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
91
- ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
92
- if (!ndev->pkt_cmdqs)
102
+ for (i = 0; i < ndev->nr_queues; i++) {
103
+ struct nitrox_cmdq *cmdq;
104
+ u64 offset;
105
+
106
+ cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
107
+ if (!cmdq) {
108
+ err = -ENOMEM;
109
+ goto aqmq_fail;
110
+ }
111
+
112
+ cmdq->ndev = ndev;
113
+ cmdq->qno = i;
114
+ cmdq->instr_size = sizeof(struct aqmq_command_s);
115
+
116
+ /* AQM Queue Doorbell Counter Register Address */
117
+ offset = AQMQ_DRBLX(i);
118
+ cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
119
+ /* AQM Queue Commands Completed Count Register Address */
120
+ offset = AQMQ_CMD_CNTX(i);
121
+ cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
122
+
123
+ err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
124
+ if (err) {
125
+ kfree_sensitive(cmdq);
126
+ goto aqmq_fail;
127
+ }
128
+ ndev->aqmq[i] = cmdq;
129
+ }
130
+
131
+ return 0;
132
+
133
+aqmq_fail:
134
+ nitrox_free_aqm_queues(ndev);
135
+ return err;
136
+}
137
+
138
+static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
139
+{
140
+ int i;
141
+
142
+ for (i = 0; i < ndev->nr_queues; i++) {
143
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
144
+
145
+ nitrox_cmdq_cleanup(cmdq);
146
+ }
147
+ kfree(ndev->pkt_inq);
148
+ ndev->pkt_inq = NULL;
149
+}
150
+
151
+static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
152
+{
153
+ int i, err;
154
+
155
+ ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
156
+ sizeof(struct nitrox_cmdq),
157
+ GFP_KERNEL, ndev->node);
158
+ if (!ndev->pkt_inq)
93159 return -ENOMEM;
94160
95161 for (i = 0; i < ndev->nr_queues; i++) {
96162 struct nitrox_cmdq *cmdq;
97163 u64 offset;
98164
99
- cmdq = &ndev->pkt_cmdqs[i];
165
+ cmdq = &ndev->pkt_inq[i];
100166 cmdq->ndev = ndev;
101167 cmdq->qno = i;
102168 cmdq->instr_size = sizeof(struct nps_pkt_instr);
103169
170
+ /* packet input ring doorbell address */
104171 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
105
- /* SE ring doorbell address for this queue */
106172 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
173
+ /* packet solicit port completion count address */
174
+ offset = NPS_PKT_SLC_CNTSX(i);
175
+ cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
107176
108
- err = cmdq_common_init(cmdq);
177
+ err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
109178 if (err)
110
- goto pkt_cmdq_fail;
179
+ goto pktq_fail;
111180 }
112181 return 0;
113182
114
-pkt_cmdq_fail:
115
- nitrox_cleanup_pkt_cmdqs(ndev);
183
+pktq_fail:
184
+ nitrox_free_pktin_queues(ndev);
116185 return err;
117186 }
118187
....@@ -122,7 +191,7 @@
122191
123192 /* Crypto context pool, 16 byte aligned */
124193 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
125
- ndev->ctx_pool = dma_pool_create("crypto-context",
194
+ ndev->ctx_pool = dma_pool_create("nitrox-context",
126195 DEV(ndev), size, 16, 0);
127196 if (!ndev->ctx_pool)
128197 return -ENOMEM;
....@@ -154,7 +223,7 @@
154223 if (!chdr)
155224 return NULL;
156225
157
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
226
+ vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
158227 if (!vaddr) {
159228 kfree(chdr);
160229 return NULL;
....@@ -206,9 +275,15 @@
206275 if (err)
207276 return err;
208277
209
- err = nitrox_init_pkt_cmdqs(ndev);
278
+ err = nitrox_alloc_pktin_queues(ndev);
210279 if (err)
211280 destroy_crypto_dma_pool(ndev);
281
+
282
+ err = nitrox_alloc_aqm_queues(ndev);
283
+ if (err) {
284
+ nitrox_free_pktin_queues(ndev);
285
+ destroy_crypto_dma_pool(ndev);
286
+ }
212287
213288 return err;
214289 }
....@@ -219,6 +294,7 @@
219294 */
220295 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
221296 {
222
- nitrox_cleanup_pkt_cmdqs(ndev);
297
+ nitrox_free_aqm_queues(ndev);
298
+ nitrox_free_pktin_queues(ndev);
223299 destroy_crypto_dma_pool(ndev);
224300 }