hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/inside-secure/safexcel.c
....@@ -14,6 +14,7 @@
1414 #include <linux/module.h>
1515 #include <linux/of_platform.h>
1616 #include <linux/of_irq.h>
17
+#include <linux/pci.h>
1718 #include <linux/platform_device.h>
1819 #include <linux/workqueue.h>
1920
....@@ -27,62 +28,217 @@
2728 module_param(max_rings, uint, 0644);
2829 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
2930
30
-static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
31
+static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
3132 {
32
- u32 val, htable_offset;
33
- int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
34
-
35
- if (priv->version == EIP197B) {
36
- cs_rc_max = EIP197B_CS_RC_MAX;
37
- cs_ht_wc = EIP197B_CS_HT_WC;
38
- cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
39
- cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
40
- } else {
41
- cs_rc_max = EIP197D_CS_RC_MAX;
42
- cs_ht_wc = EIP197D_CS_HT_WC;
43
- cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
44
- cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
45
- }
46
-
47
- /* Enable the record cache memory access */
48
- val = readl(priv->base + EIP197_CS_RAM_CTRL);
49
- val &= ~EIP197_TRC_ENABLE_MASK;
50
- val |= EIP197_TRC_ENABLE_0;
51
- writel(val, priv->base + EIP197_CS_RAM_CTRL);
52
-
53
- /* Clear all ECC errors */
54
- writel(0, priv->base + EIP197_TRC_ECCCTRL);
33
+ int i;
5534
5635 /*
57
- * Make sure the cache memory is accessible by taking record cache into
58
- * reset.
36
+ * Map all interfaces/rings to register index 0
37
+ * so they can share contexts. Without this, the EIP197 will
38
+ * assume each interface/ring to be in its own memory domain
39
+ * i.e. have its own subset of UNIQUE memory addresses.
40
+ * Which would cause records with the SAME memory address to
41
+ * use DIFFERENT cache buffers, causing both poor cache utilization
42
+ * AND serious coherence/invalidation issues.
5943 */
60
- val = readl(priv->base + EIP197_TRC_PARAMS);
61
- val |= EIP197_TRC_PARAMS_SW_RESET;
62
- val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
63
- writel(val, priv->base + EIP197_TRC_PARAMS);
44
+ for (i = 0; i < 4; i++)
45
+ writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
6446
65
- /* Clear all records */
47
+ /*
48
+ * Initialize other virtualization regs for cache
49
+ * These may not be in their reset state ...
50
+ */
51
+ for (i = 0; i < priv->config.rings; i++) {
52
+ writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53
+ writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54
+ writel(EIP197_FLUE_CONFIG_MAGIC,
55
+ priv->base + EIP197_FLUE_CONFIG(i));
56
+ }
57
+ writel(0, priv->base + EIP197_FLUE_OFFSETS);
58
+ writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
59
+}
60
+
61
+static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
62
+ u32 addrmid, int *actbank)
63
+{
64
+ u32 val;
65
+ int curbank;
66
+
67
+ curbank = addrmid >> 16;
68
+ if (curbank != *actbank) {
69
+ val = readl(priv->base + EIP197_CS_RAM_CTRL);
70
+ val = (val & ~EIP197_CS_BANKSEL_MASK) |
71
+ (curbank << EIP197_CS_BANKSEL_OFS);
72
+ writel(val, priv->base + EIP197_CS_RAM_CTRL);
73
+ *actbank = curbank;
74
+ }
75
+}
76
+
77
+static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
78
+ int maxbanks, u32 probemask, u32 stride)
79
+{
80
+ u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
81
+ int actbank;
82
+
83
+ /*
84
+ * And probe the actual size of the physically attached cache data RAM
85
+ * Using a binary subdivision algorithm downto 32 byte cache lines.
86
+ */
87
+ addrhi = 1 << (16 + maxbanks);
88
+ addrlo = 0;
89
+ actbank = min(maxbanks - 1, 0);
90
+ while ((addrhi - addrlo) > stride) {
91
+ /* write marker to lowest address in top half */
92
+ addrmid = (addrhi + addrlo) >> 1;
93
+ marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
94
+ eip197_trc_cache_banksel(priv, addrmid, &actbank);
95
+ writel(marker,
96
+ priv->base + EIP197_CLASSIFICATION_RAMS +
97
+ (addrmid & 0xffff));
98
+
99
+ /* write invalid markers to possible aliases */
100
+ delta = 1 << __fls(addrmid);
101
+ while (delta >= stride) {
102
+ addralias = addrmid - delta;
103
+ eip197_trc_cache_banksel(priv, addralias, &actbank);
104
+ writel(~marker,
105
+ priv->base + EIP197_CLASSIFICATION_RAMS +
106
+ (addralias & 0xffff));
107
+ delta >>= 1;
108
+ }
109
+
110
+ /* read back marker from top half */
111
+ eip197_trc_cache_banksel(priv, addrmid, &actbank);
112
+ val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
113
+ (addrmid & 0xffff));
114
+
115
+ if ((val & probemask) == marker)
116
+ /* read back correct, continue with top half */
117
+ addrlo = addrmid;
118
+ else
119
+ /* not read back correct, continue with bottom half */
120
+ addrhi = addrmid;
121
+ }
122
+ return addrhi;
123
+}
124
+
125
+static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
126
+ int cs_rc_max, int cs_ht_wc)
127
+{
128
+ int i;
129
+ u32 htable_offset, val, offset;
130
+
131
+ /* Clear all records in administration RAM */
66132 for (i = 0; i < cs_rc_max; i++) {
67
- u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
133
+ offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
68134
69135 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
70136 EIP197_CS_RC_PREV(EIP197_RC_NULL),
71137 priv->base + offset);
72138
73
- val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
139
+ val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
74140 if (i == 0)
75141 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
76142 else if (i == cs_rc_max - 1)
77143 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
78
- writel(val, priv->base + offset + sizeof(u32));
144
+ writel(val, priv->base + offset + 4);
145
+ /* must also initialize the address key due to ECC! */
146
+ writel(0, priv->base + offset + 8);
147
+ writel(0, priv->base + offset + 12);
79148 }
80149
81150 /* Clear the hash table entries */
82151 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
83152 for (i = 0; i < cs_ht_wc; i++)
84153 writel(GENMASK(29, 0),
85
- priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
154
+ priv->base + EIP197_CLASSIFICATION_RAMS +
155
+ htable_offset + i * sizeof(u32));
156
+}
157
+
158
+static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
159
+{
160
+ u32 val, dsize, asize;
161
+ int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
162
+ int cs_rc_abs_max, cs_ht_sz;
163
+ int maxbanks;
164
+
165
+ /* Setup (dummy) virtualization for cache */
166
+ eip197_trc_cache_setupvirt(priv);
167
+
168
+ /*
169
+ * Enable the record cache memory access and
170
+ * probe the bank select width
171
+ */
172
+ val = readl(priv->base + EIP197_CS_RAM_CTRL);
173
+ val &= ~EIP197_TRC_ENABLE_MASK;
174
+ val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
175
+ writel(val, priv->base + EIP197_CS_RAM_CTRL);
176
+ val = readl(priv->base + EIP197_CS_RAM_CTRL);
177
+ maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
178
+
179
+ /* Clear all ECC errors */
180
+ writel(0, priv->base + EIP197_TRC_ECCCTRL);
181
+
182
+ /*
183
+ * Make sure the cache memory is accessible by taking record cache into
184
+ * reset. Need data memory access here, not admin access.
185
+ */
186
+ val = readl(priv->base + EIP197_TRC_PARAMS);
187
+ val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
188
+ writel(val, priv->base + EIP197_TRC_PARAMS);
189
+
190
+ /* Probed data RAM size in bytes */
191
+ dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
192
+
193
+ /*
194
+ * Now probe the administration RAM size pretty much the same way
195
+ * Except that only the lower 30 bits are writable and we don't need
196
+ * bank selects
197
+ */
198
+ val = readl(priv->base + EIP197_TRC_PARAMS);
199
+ /* admin access now */
200
+ val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
201
+ writel(val, priv->base + EIP197_TRC_PARAMS);
202
+
203
+ /* Probed admin RAM size in admin words */
204
+ asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
205
+
206
+ /* Clear any ECC errors detected while probing! */
207
+ writel(0, priv->base + EIP197_TRC_ECCCTRL);
208
+
209
+ /* Sanity check probing results */
210
+ if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
211
+ dev_err(priv->dev, "Record cache probing failed (%d,%d).",
212
+ dsize, asize);
213
+ return -ENODEV;
214
+ }
215
+
216
+ /*
217
+ * Determine optimal configuration from RAM sizes
218
+ * Note that we assume that the physical RAM configuration is sane
219
+ * Therefore, we don't do any parameter error checking here ...
220
+ */
221
+
222
+ /* For now, just use a single record format covering everything */
223
+ cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
224
+ cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
225
+
226
+ /*
227
+ * Step #1: How many records will physically fit?
228
+ * Hard upper limit is 1023!
229
+ */
230
+ cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
231
+ /* Step #2: Need at least 2 words in the admin RAM per record */
232
+ cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
233
+ /* Step #3: Determine log2 of hash table size */
234
+ cs_ht_sz = __fls(asize - cs_rc_max) - 2;
235
+ /* Step #4: determine current size of hash table in dwords */
236
+ cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
237
+ /* Step #5: add back excess words and see if we can fit more records */
238
+ cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
239
+
240
+ /* Clear the cache RAMs */
241
+ eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
86242
87243 /* Disable the record cache memory access */
88244 val = readl(priv->base + EIP197_CS_RAM_CTRL);
....@@ -102,83 +258,25 @@
102258 /* Configure the record cache #2 */
103259 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
104260 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
105
- EIP197_TRC_PARAMS_HTABLE_SZ(2);
261
+ EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
106262 writel(val, priv->base + EIP197_TRC_PARAMS);
263
+
264
+ dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
265
+ dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
266
+ return 0;
107267 }
108268
109
-static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
110
- const struct firmware *fw, int pe, u32 ctrl,
111
- u32 prog_en)
269
+static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
112270 {
113
- const u32 *data = (const u32 *)fw->data;
271
+ int pe, i;
114272 u32 val;
115
- int i;
116
-
117
- /* Reset the engine to make its program memory accessible */
118
- writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
119
- EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
120
- EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
121
- EIP197_PE(priv) + ctrl);
122
-
123
- /* Enable access to the program memory */
124
- writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
125
-
126
- /* Write the firmware */
127
- for (i = 0; i < fw->size / sizeof(u32); i++)
128
- writel(be32_to_cpu(data[i]),
129
- priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
130
-
131
- /* Disable access to the program memory */
132
- writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
133
-
134
- /* Release engine from reset */
135
- val = readl(EIP197_PE(priv) + ctrl);
136
- val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
137
- writel(val, EIP197_PE(priv) + ctrl);
138
-}
139
-
140
-static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
141
-{
142
- const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
143
- const struct firmware *fw[FW_NB];
144
- char fw_path[31], *dir = NULL;
145
- int i, j, ret = 0, pe;
146
- u32 val;
147
-
148
- switch (priv->version) {
149
- case EIP197B:
150
- dir = "eip197b";
151
- break;
152
- case EIP197D:
153
- dir = "eip197d";
154
- break;
155
- default:
156
- /* No firmware is required */
157
- return 0;
158
- }
159
-
160
- for (i = 0; i < FW_NB; i++) {
161
- snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
162
- ret = request_firmware(&fw[i], fw_path, priv->dev);
163
- if (ret) {
164
- if (priv->version != EIP197B)
165
- goto release_fw;
166
-
167
- /* Fallback to the old firmware location for the
168
- * EIP197b.
169
- */
170
- ret = request_firmware(&fw[i], fw_name[i], priv->dev);
171
- if (ret) {
172
- dev_err(priv->dev,
173
- "Failed to request firmware %s (%d)\n",
174
- fw_name[i], ret);
175
- goto release_fw;
176
- }
177
- }
178
- }
179273
180274 for (pe = 0; pe < priv->config.pes; pe++) {
181
- /* Clear the scratchpad memory */
275
+ /* Configure the token FIFO's */
276
+ writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
277
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
278
+
279
+ /* Clear the ICE scratchpad memory */
182280 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
183281 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
184282 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
....@@ -186,35 +284,220 @@
186284 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
187285 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
188286
189
- memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
190
- EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
287
+ /* clear the scratchpad RAM using 32 bit writes only */
288
+ for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
289
+ writel(0, EIP197_PE(priv) +
290
+ EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
191291
192
- eip197_write_firmware(priv, fw[FW_IFPP], pe,
193
- EIP197_PE_ICE_FPP_CTRL(pe),
194
- EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
292
+ /* Reset the IFPP engine to make its program mem accessible */
293
+ writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
294
+ EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
295
+ EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
296
+ EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
195297
196
- eip197_write_firmware(priv, fw[FW_IPUE], pe,
197
- EIP197_PE_ICE_PUE_CTRL(pe),
198
- EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
298
+ /* Reset the IPUE engine to make its program mem accessible */
299
+ writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
300
+ EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
301
+ EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
302
+ EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
303
+
304
+ /* Enable access to all IFPP program memories */
305
+ writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
306
+ EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
307
+
308
+ /* bypass the OCE, if present */
309
+ if (priv->flags & EIP197_OCE)
310
+ writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
311
+ EIP197_PE_DEBUG(pe));
199312 }
313
+
314
+}
315
+
316
+static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
317
+ const struct firmware *fw)
318
+{
319
+ const __be32 *data = (const __be32 *)fw->data;
320
+ int i;
321
+
322
+ /* Write the firmware */
323
+ for (i = 0; i < fw->size / sizeof(u32); i++)
324
+ writel(be32_to_cpu(data[i]),
325
+ priv->base + EIP197_CLASSIFICATION_RAMS +
326
+ i * sizeof(__be32));
327
+
328
+ /* Exclude final 2 NOPs from size */
329
+ return i - EIP197_FW_TERMINAL_NOPS;
330
+}
331
+
332
+/*
333
+ * If FW is actual production firmware, then poll for its initialization
334
+ * to complete and check if it is good for the HW, otherwise just return OK.
335
+ */
336
+static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
337
+{
338
+ int pe, pollcnt;
339
+ u32 base, pollofs;
340
+
341
+ if (fpp)
342
+ pollofs = EIP197_FW_FPP_READY;
343
+ else
344
+ pollofs = EIP197_FW_PUE_READY;
345
+
346
+ for (pe = 0; pe < priv->config.pes; pe++) {
347
+ base = EIP197_PE_ICE_SCRATCH_RAM(pe);
348
+ pollcnt = EIP197_FW_START_POLLCNT;
349
+ while (pollcnt &&
350
+ (readl_relaxed(EIP197_PE(priv) + base +
351
+ pollofs) != 1)) {
352
+ pollcnt--;
353
+ }
354
+ if (!pollcnt) {
355
+ dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
356
+ fpp, pe);
357
+ return false;
358
+ }
359
+ }
360
+ return true;
361
+}
362
+
363
+static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
364
+ int ipuesz, int ifppsz, int minifw)
365
+{
366
+ int pe;
367
+ u32 val;
368
+
369
+ for (pe = 0; pe < priv->config.pes; pe++) {
370
+ /* Disable access to all program memory */
371
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
372
+
373
+ /* Start IFPP microengines */
374
+ if (minifw)
375
+ val = 0;
376
+ else
377
+ val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
378
+ EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
379
+ EIP197_PE_ICE_UENG_DEBUG_RESET;
380
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
381
+
382
+ /* Start IPUE microengines */
383
+ if (minifw)
384
+ val = 0;
385
+ else
386
+ val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
387
+ EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
388
+ EIP197_PE_ICE_UENG_DEBUG_RESET;
389
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
390
+ }
391
+
392
+ /* For miniFW startup, there is no initialization, so always succeed */
393
+ if (minifw)
394
+ return true;
395
+
396
+ /* Wait until all the firmwares have properly started up */
397
+ if (!poll_fw_ready(priv, 1))
398
+ return false;
399
+ if (!poll_fw_ready(priv, 0))
400
+ return false;
401
+
402
+ return true;
403
+}
404
+
405
+static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
406
+{
407
+ const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
408
+ const struct firmware *fw[FW_NB];
409
+ char fw_path[37], *dir = NULL;
410
+ int i, j, ret = 0, pe;
411
+ int ipuesz, ifppsz, minifw = 0;
412
+
413
+ if (priv->version == EIP197D_MRVL)
414
+ dir = "eip197d";
415
+ else if (priv->version == EIP197B_MRVL ||
416
+ priv->version == EIP197_DEVBRD)
417
+ dir = "eip197b";
418
+ else
419
+ return -ENODEV;
420
+
421
+retry_fw:
422
+ for (i = 0; i < FW_NB; i++) {
423
+ snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
424
+ ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
425
+ if (ret) {
426
+ if (minifw || priv->version != EIP197B_MRVL)
427
+ goto release_fw;
428
+
429
+ /* Fallback to the old firmware location for the
430
+ * EIP197b.
431
+ */
432
+ ret = firmware_request_nowarn(&fw[i], fw_name[i],
433
+ priv->dev);
434
+ if (ret)
435
+ goto release_fw;
436
+ }
437
+ }
438
+
439
+ eip197_init_firmware(priv);
440
+
441
+ ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
442
+
443
+ /* Enable access to IPUE program memories */
444
+ for (pe = 0; pe < priv->config.pes; pe++)
445
+ writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
446
+ EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
447
+
448
+ ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
449
+
450
+ if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
451
+ dev_dbg(priv->dev, "Firmware loaded successfully\n");
452
+ return 0;
453
+ }
454
+
455
+ ret = -ENODEV;
200456
201457 release_fw:
202458 for (j = 0; j < i; j++)
203459 release_firmware(fw[j]);
460
+
461
+ if (!minifw) {
462
+ /* Retry with minifw path */
463
+ dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
464
+ dir = "eip197_minifw";
465
+ minifw = 1;
466
+ goto retry_fw;
467
+ }
468
+
469
+ dev_dbg(priv->dev, "Firmware load failed.\n");
204470
205471 return ret;
206472 }
207473
208474 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
209475 {
210
- u32 hdw, cd_size_rnd, val;
211
- int i;
476
+ u32 cd_size_rnd, val;
477
+ int i, cd_fetch_cnt;
212478
213
- hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
214
- hdw &= GENMASK(27, 25);
215
- hdw >>= 25;
216
-
217
- cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
479
+ cd_size_rnd = (priv->config.cd_size +
480
+ (BIT(priv->hwconfig.hwdataw) - 1)) >>
481
+ priv->hwconfig.hwdataw;
482
+ /* determine number of CD's we can fetch into the CD FIFO as 1 block */
483
+ if (priv->flags & SAFEXCEL_HW_EIP197) {
484
+ /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
485
+ cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
486
+ cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
487
+ (priv->config.pes * EIP197_FETCH_DEPTH));
488
+ } else {
489
+ /* for the EIP97, just fetch all that fits minus 1 */
490
+ cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
491
+ cd_size_rnd) - 1;
492
+ }
493
+ /*
494
+ * Since we're using command desc's way larger than formally specified,
495
+ * we need to check whether we can fit even 1 for low-end EIP196's!
496
+ */
497
+ if (!cd_fetch_cnt) {
498
+ dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
499
+ return -ENODEV;
500
+ }
218501
219502 for (i = 0; i < priv->config.rings; i++) {
220503 /* ring base address */
....@@ -223,11 +506,12 @@
223506 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
224507 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
225508
226
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
227
- priv->config.cd_size,
509
+ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
510
+ (priv->config.cd_offset << 14) | priv->config.cd_size,
228511 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
229
- writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
230
- (EIP197_FETCH_COUNT * priv->config.cd_offset),
512
+ writel(((cd_fetch_cnt *
513
+ (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
514
+ (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
231515 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
232516
233517 /* Configure DMA tx control */
....@@ -245,14 +529,23 @@
245529
246530 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
247531 {
248
- u32 hdw, rd_size_rnd, val;
249
- int i;
532
+ u32 rd_size_rnd, val;
533
+ int i, rd_fetch_cnt;
250534
251
- hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
252
- hdw &= GENMASK(27, 25);
253
- hdw >>= 25;
254
-
255
- rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
535
+ /* determine number of RD's we can fetch into the FIFO as one block */
536
+ rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
537
+ (BIT(priv->hwconfig.hwdataw) - 1)) >>
538
+ priv->hwconfig.hwdataw;
539
+ if (priv->flags & SAFEXCEL_HW_EIP197) {
540
+ /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
541
+ rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
542
+ rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
543
+ (priv->config.pes * EIP197_FETCH_DEPTH));
544
+ } else {
545
+ /* for the EIP97, just fetch all that fits minus 1 */
546
+ rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
547
+ rd_size_rnd) - 1;
548
+ }
256549
257550 for (i = 0; i < priv->config.rings; i++) {
258551 /* ring base address */
....@@ -261,12 +554,13 @@
261554 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
262555 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
263556
264
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
557
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
265558 priv->config.rd_size,
266559 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
267560
268
- writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
269
- (EIP197_FETCH_COUNT * priv->config.rd_offset),
561
+ writel(((rd_fetch_cnt *
562
+ (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
563
+ (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
270564 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
271565
272566 /* Configure DMA tx control */
....@@ -291,23 +585,21 @@
291585
292586 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
293587 {
294
- u32 version, val;
295
- int i, ret, pe;
588
+ u32 val;
589
+ int i, ret, pe, opbuflo, opbufhi;
296590
297
- /* Determine endianess and configure byte swap */
298
- version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
299
- val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
591
+ dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
592
+ priv->config.pes, priv->config.rings);
300593
301
- if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
302
- val |= EIP197_MST_CTRL_BYTE_SWAP;
303
- else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
304
- val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
305
-
306
- /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
307
- if (priv->version == EIP197B || priv->version == EIP197D)
594
+ /*
595
+ * For EIP197's only set maximum number of TX commands to 2^5 = 32
596
+ * Skip for the EIP97 as it does not have this field.
597
+ */
598
+ if (priv->flags & SAFEXCEL_HW_EIP197) {
599
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
308600 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
309
-
310
- writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
601
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
602
+ }
311603
312604 /* Configure wr/rd cache values */
313605 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
....@@ -330,11 +622,10 @@
330622 writel(EIP197_DxE_THR_CTRL_RESET_PE,
331623 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
332624
333
- if (priv->version == EIP197B || priv->version == EIP197D) {
334
- /* Reset HIA input interface arbiter */
625
+ if (priv->flags & EIP197_PE_ARB)
626
+ /* Reset HIA input interface arbiter (if present) */
335627 writel(EIP197_HIA_RA_PE_CTRL_RESET,
336628 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
337
- }
338629
339630 /* DMA transfer size to use */
340631 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
....@@ -357,12 +648,11 @@
357648 EIP197_PE_IN_xBUF_THRES_MAX(7),
358649 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
359650
360
- if (priv->version == EIP197B || priv->version == EIP197D) {
651
+ if (priv->flags & SAFEXCEL_HW_EIP197)
361652 /* enable HIA input interface arbiter and rings */
362653 writel(EIP197_HIA_RA_PE_CTRL_EN |
363654 GENMASK(priv->config.rings - 1, 0),
364655 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
365
- }
366656
367657 /* Data Store Engine configuration */
368658
....@@ -376,15 +666,22 @@
376666 ;
377667
378668 /* DMA transfer size to use */
669
+ if (priv->hwconfig.hwnumpes > 4) {
670
+ opbuflo = 9;
671
+ opbufhi = 10;
672
+ } else {
673
+ opbuflo = 7;
674
+ opbufhi = 8;
675
+ }
379676 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
380
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
381
- EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
677
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
678
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
382679 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
383680 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
384
- /* FIXME: instability issues can occur for EIP97 but disabling it impact
385
- * performances.
681
+ /* FIXME: instability issues can occur for EIP97 but disabling
682
+ * it impacts performance.
386683 */
387
- if (priv->version == EIP197B || priv->version == EIP197D)
684
+ if (priv->flags & SAFEXCEL_HW_EIP197)
388685 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
389686 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
390687
....@@ -392,23 +689,23 @@
392689 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
393690
394691 /* Configure the procesing engine thresholds */
395
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
396
- EIP197_PE_OUT_DBUF_THRES_MAX(8),
692
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
693
+ EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
397694 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
398695
399696 /* Processing Engine configuration */
400697
401
- /* H/W capabilities selection */
402
- val = EIP197_FUNCTION_RSVD;
403
- val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
404
- val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
405
- val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
406
- val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
407
- val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
408
- val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
409
- val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
410
- val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
411
- writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
698
+ /* Token & context configuration */
699
+ val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
700
+ EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
701
+ EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
702
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
703
+
704
+ /* H/W capabilities selection: just enable everything */
705
+ writel(EIP197_FUNCTION_ALL,
706
+ EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
707
+ writel(EIP197_FUNCTION_ALL,
708
+ EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
412709 }
413710
414711 /* Command Descriptor Rings prepare */
....@@ -433,7 +730,7 @@
433730 writel(0,
434731 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
435732
436
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
733
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
437734 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
438735 }
439736
....@@ -456,7 +753,7 @@
456753 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
457754
458755 /* Ring size */
459
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
756
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
460757 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
461758 }
462759
....@@ -473,18 +770,28 @@
473770 /* Clear any HIA interrupt */
474771 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
475772
476
- if (priv->version == EIP197B || priv->version == EIP197D) {
477
- eip197_trc_cache_init(priv);
773
+ if (priv->flags & EIP197_SIMPLE_TRC) {
774
+ writel(EIP197_STRC_CONFIG_INIT |
775
+ EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
776
+ EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
777
+ priv->base + EIP197_STRC_CONFIG);
778
+ writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
779
+ EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
780
+ } else if (priv->flags & SAFEXCEL_HW_EIP197) {
781
+ ret = eip197_trc_cache_init(priv);
782
+ if (ret)
783
+ return ret;
784
+ }
478785
786
+ if (priv->flags & EIP197_ICE) {
479787 ret = eip197_load_firmwares(priv);
480788 if (ret)
481789 return ret;
482790 }
483791
484
- safexcel_hw_setup_cdesc_rings(priv);
485
- safexcel_hw_setup_rdesc_rings(priv);
486
-
487
- return 0;
792
+ return safexcel_hw_setup_cdesc_rings(priv) ?:
793
+ safexcel_hw_setup_rdesc_rings(priv) ?:
794
+ 0;
488795 }
489796
490797 /* Called with ring's lock taken */
....@@ -572,27 +879,48 @@
572879 spin_unlock_bh(&priv->ring[ring].lock);
573880
574881 /* let the RDR know we have pending descriptors */
575
- writel((rdesc * priv->config.rd_offset) << 2,
882
+ writel((rdesc * priv->config.rd_offset),
576883 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
577884
578885 /* let the CDR know we have pending descriptors */
579
- writel((cdesc * priv->config.cd_offset) << 2,
886
+ writel((cdesc * priv->config.cd_offset),
580887 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
581888 }
582889
583890 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
584
- struct safexcel_result_desc *rdesc)
891
+ void *rdp)
585892 {
586
- if (likely(!rdesc->result_data.error_code))
893
+ struct safexcel_result_desc *rdesc = rdp;
894
+ struct result_data_desc *result_data = rdp + priv->config.res_offset;
895
+
896
+ if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
897
+ ((!rdesc->descriptor_overflow) &&
898
+ (!rdesc->buffer_overflow) &&
899
+ (!result_data->error_code))))
587900 return 0;
588901
589
- if (rdesc->result_data.error_code & 0x407f) {
590
- /* Fatal error (bits 0-7, 14) */
902
+ if (rdesc->descriptor_overflow)
903
+ dev_err(priv->dev, "Descriptor overflow detected");
904
+
905
+ if (rdesc->buffer_overflow)
906
+ dev_err(priv->dev, "Buffer overflow detected");
907
+
908
+ if (result_data->error_code & 0x4066) {
909
+ /* Fatal error (bits 1,2,5,6 & 14) */
591910 dev_err(priv->dev,
592
- "cipher: result: result descriptor error (%d)\n",
593
- rdesc->result_data.error_code);
911
+ "result descriptor error (%x)",
912
+ result_data->error_code);
913
+
594914 return -EIO;
595
- } else if (rdesc->result_data.error_code == BIT(9)) {
915
+ } else if (result_data->error_code &
916
+ (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
917
+ /*
918
+ * Give priority over authentication fails:
919
+ * Blocksize, length & overflow errors,
920
+ * something wrong with the input!
921
+ */
922
+ return -EINVAL;
923
+ } else if (result_data->error_code & BIT(9)) {
596924 /* Authentication failed */
597925 return -EBADMSG;
598926 }
....@@ -651,16 +979,18 @@
651979 {
652980 struct safexcel_command_desc *cdesc;
653981 struct safexcel_result_desc *rdesc;
982
+ struct safexcel_token *dmmy;
654983 int ret = 0;
655984
656985 /* Prepare command descriptor */
657
- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
986
+ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
987
+ &dmmy);
658988 if (IS_ERR(cdesc))
659989 return PTR_ERR(cdesc);
660990
661991 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
662992 cdesc->control_data.options = 0;
663
- cdesc->control_data.refresh = 0;
993
+ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
664994 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
665995
666996 /* Prepare result descriptor */
....@@ -705,7 +1035,8 @@
7051035 ndesc = ctx->handle_result(priv, ring, req,
7061036 &should_complete, &ret);
7071037 if (ndesc < 0) {
708
- dev_err(priv->dev, "failed to handle result (%d)", ndesc);
1038
+ dev_err(priv->dev, "failed to handle result (%d)\n",
1039
+ ndesc);
7091040 goto acknowledge;
7101041 }
7111042
....@@ -720,11 +1051,10 @@
7201051 }
7211052
7221053 acknowledge:
723
- if (i) {
1054
+ if (i)
7241055 writel(EIP197_xDR_PROC_xD_PKT(i) |
725
- EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
1056
+ (tot_descs * priv->config.rd_offset),
7261057 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
727
- }
7281058
7291059 /* If the number of requests overflowed the counter, try to proceed more
7301060 * requests.
....@@ -778,7 +1108,7 @@
7781108 * reinitialized. This should not happen under
7791109 * normal circumstances.
7801110 */
781
- dev_err(priv->dev, "RDR: fatal error.");
1111
+ dev_err(priv->dev, "RDR: fatal error.\n");
7821112 } else if (likely(stat & EIP197_xDR_THRESH)) {
7831113 rc = IRQ_WAKE_THREAD;
7841114 }
....@@ -808,25 +1138,54 @@
8081138 return IRQ_HANDLED;
8091139 }
8101140
811
-static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
1141
+static int safexcel_request_ring_irq(void *pdev, int irqid,
1142
+ int is_pci_dev,
1143
+ int ring_id,
8121144 irq_handler_t handler,
8131145 irq_handler_t threaded_handler,
8141146 struct safexcel_ring_irq_data *ring_irq_priv)
8151147 {
816
- int ret, irq = platform_get_irq_byname(pdev, name);
1148
+ int ret, irq, cpu;
1149
+ struct device *dev;
8171150
818
- if (irq < 0) {
819
- dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
820
- return irq;
1151
+ if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1152
+ struct pci_dev *pci_pdev = pdev;
1153
+
1154
+ dev = &pci_pdev->dev;
1155
+ irq = pci_irq_vector(pci_pdev, irqid);
1156
+ if (irq < 0) {
1157
+ dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1158
+ irqid, irq);
1159
+ return irq;
1160
+ }
1161
+ } else if (IS_ENABLED(CONFIG_OF)) {
1162
+ struct platform_device *plf_pdev = pdev;
1163
+ char irq_name[6] = {0}; /* "ringX\0" */
1164
+
1165
+ snprintf(irq_name, 6, "ring%d", irqid);
1166
+ dev = &plf_pdev->dev;
1167
+ irq = platform_get_irq_byname(plf_pdev, irq_name);
1168
+
1169
+ if (irq < 0) {
1170
+ dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
1171
+ irq_name, irq);
1172
+ return irq;
1173
+ }
1174
+ } else {
1175
+ return -ENXIO;
8211176 }
8221177
823
- ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
1178
+ ret = devm_request_threaded_irq(dev, irq, handler,
8241179 threaded_handler, IRQF_ONESHOT,
825
- dev_name(&pdev->dev), ring_irq_priv);
1180
+ dev_name(dev), ring_irq_priv);
8261181 if (ret) {
827
- dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
1182
+ dev_err(dev, "unable to request IRQ %d\n", irq);
8281183 return ret;
8291184 }
1185
+
1186
+ /* Set affinity */
1187
+ cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
1188
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
8301189
8311190 return irq;
8321191 }
....@@ -838,6 +1197,9 @@
8381197 &safexcel_alg_cbc_des3_ede,
8391198 &safexcel_alg_ecb_aes,
8401199 &safexcel_alg_cbc_aes,
1200
+ &safexcel_alg_cfb_aes,
1201
+ &safexcel_alg_ofb_aes,
1202
+ &safexcel_alg_ctr_aes,
8411203 &safexcel_alg_md5,
8421204 &safexcel_alg_sha1,
8431205 &safexcel_alg_sha224,
....@@ -855,6 +1217,53 @@
8551217 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
8561218 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
8571219 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
1220
+ &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1221
+ &safexcel_alg_authenc_hmac_sha1_ctr_aes,
1222
+ &safexcel_alg_authenc_hmac_sha224_ctr_aes,
1223
+ &safexcel_alg_authenc_hmac_sha256_ctr_aes,
1224
+ &safexcel_alg_authenc_hmac_sha384_ctr_aes,
1225
+ &safexcel_alg_authenc_hmac_sha512_ctr_aes,
1226
+ &safexcel_alg_xts_aes,
1227
+ &safexcel_alg_gcm,
1228
+ &safexcel_alg_ccm,
1229
+ &safexcel_alg_crc32,
1230
+ &safexcel_alg_cbcmac,
1231
+ &safexcel_alg_xcbcmac,
1232
+ &safexcel_alg_cmac,
1233
+ &safexcel_alg_chacha20,
1234
+ &safexcel_alg_chachapoly,
1235
+ &safexcel_alg_chachapoly_esp,
1236
+ &safexcel_alg_sm3,
1237
+ &safexcel_alg_hmac_sm3,
1238
+ &safexcel_alg_ecb_sm4,
1239
+ &safexcel_alg_cbc_sm4,
1240
+ &safexcel_alg_ofb_sm4,
1241
+ &safexcel_alg_cfb_sm4,
1242
+ &safexcel_alg_ctr_sm4,
1243
+ &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
1244
+ &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
1245
+ &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
1246
+ &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
1247
+ &safexcel_alg_sha3_224,
1248
+ &safexcel_alg_sha3_256,
1249
+ &safexcel_alg_sha3_384,
1250
+ &safexcel_alg_sha3_512,
1251
+ &safexcel_alg_hmac_sha3_224,
1252
+ &safexcel_alg_hmac_sha3_256,
1253
+ &safexcel_alg_hmac_sha3_384,
1254
+ &safexcel_alg_hmac_sha3_512,
1255
+ &safexcel_alg_authenc_hmac_sha1_cbc_des,
1256
+ &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
1257
+ &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
1258
+ &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
1259
+ &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
1260
+ &safexcel_alg_authenc_hmac_sha256_cbc_des,
1261
+ &safexcel_alg_authenc_hmac_sha224_cbc_des,
1262
+ &safexcel_alg_authenc_hmac_sha512_cbc_des,
1263
+ &safexcel_alg_authenc_hmac_sha384_cbc_des,
1264
+ &safexcel_alg_rfc4106_gcm,
1265
+ &safexcel_alg_rfc4543_gcm,
1266
+ &safexcel_alg_rfc4309_ccm,
8581267 };
8591268
8601269 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
....@@ -864,7 +1273,10 @@
8641273 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
8651274 safexcel_algs[i]->priv = priv;
8661275
867
- if (!(safexcel_algs[i]->engines & priv->version))
1276
+ /* Do we have all required base algorithms available? */
1277
+ if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1278
+ safexcel_algs[i]->algo_mask)
1279
+ /* No, so don't register this ciphersuite */
8681280 continue;
8691281
8701282 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
....@@ -882,7 +1294,10 @@
8821294
8831295 fail:
8841296 for (j = 0; j < i; j++) {
885
- if (!(safexcel_algs[j]->engines & priv->version))
1297
+ /* Do we have all required base algorithms available? */
1298
+ if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1299
+ safexcel_algs[j]->algo_mask)
1300
+ /* No, so don't unregister this ciphersuite */
8861301 continue;
8871302
8881303 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
....@@ -901,7 +1316,10 @@
9011316 int i;
9021317
9031318 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
904
- if (!(safexcel_algs[i]->engines & priv->version))
1319
+ /* Do we have all required base algorithms available? */
1320
+ if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1321
+ safexcel_algs[i]->algo_mask)
1322
+ /* No, so don't unregister this ciphersuite */
9051323 continue;
9061324
9071325 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
....@@ -915,41 +1333,37 @@
9151333
9161334 static void safexcel_configure(struct safexcel_crypto_priv *priv)
9171335 {
918
- u32 val, mask = 0;
1336
+ u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
9191337
920
- val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
1338
+ priv->config.pes = priv->hwconfig.hwnumpes;
1339
+ priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
1340
+ /* Cannot currently support more rings than we have ring AICs! */
1341
+ priv->config.rings = min_t(u32, priv->config.rings,
1342
+ priv->hwconfig.hwnumraic);
9211343
922
- /* Read number of PEs from the engine */
923
- switch (priv->version) {
924
- case EIP197B:
925
- case EIP197D:
926
- mask = EIP197_N_PES_MASK;
927
- break;
928
- default:
929
- mask = EIP97_N_PES_MASK;
930
- }
931
- priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
932
-
933
- val = (val & GENMASK(27, 25)) >> 25;
934
- mask = BIT(val) - 1;
935
-
936
- val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
937
- priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
938
-
939
- priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
1344
+ priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
9401345 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1346
+ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
9411347
942
- priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
1348
+ /* res token is behind the descr, but ofs must be rounded to buswdth */
1349
+ priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
1350
+ /* now the size of the descr is this 1st part plus the result struct */
1351
+ priv->config.rd_size = priv->config.res_offset +
1352
+ EIP197_RD64_RESULT_SIZE;
9431353 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1354
+
1355
+ /* convert dwords to bytes */
1356
+ priv->config.cd_offset *= sizeof(u32);
1357
+ priv->config.cdsh_offset *= sizeof(u32);
1358
+ priv->config.rd_offset *= sizeof(u32);
1359
+ priv->config.res_offset *= sizeof(u32);
9441360 }
9451361
9461362 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
9471363 {
9481364 struct safexcel_register_offsets *offsets = &priv->offsets;
9491365
950
- switch (priv->version) {
951
- case EIP197B:
952
- case EIP197D:
1366
+ if (priv->flags & SAFEXCEL_HW_EIP197) {
9531367 offsets->hia_aic = EIP197_HIA_AIC_BASE;
9541368 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
9551369 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
....@@ -960,8 +1374,8 @@
9601374 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
9611375 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
9621376 offsets->pe = EIP197_PE_BASE;
963
- break;
964
- case EIP97IES:
1377
+ offsets->global = EIP197_GLOBAL_BASE;
1378
+ } else {
9651379 offsets->hia_aic = EIP97_HIA_AIC_BASE;
9661380 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
9671381 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
....@@ -972,16 +1386,365 @@
9721386 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
9731387 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
9741388 offsets->pe = EIP97_PE_BASE;
975
- break;
1389
+ offsets->global = EIP97_GLOBAL_BASE;
9761390 }
9771391 }
1392
+
1393
+/*
1394
+ * Generic part of probe routine, shared by platform and PCI driver
1395
+ *
1396
+ * Assumes IO resources have been mapped, private data mem has been allocated,
1397
+ * clocks have been enabled, device pointer has been assigned etc.
1398
+ *
1399
+ */
1400
+static int safexcel_probe_generic(void *pdev,
1401
+ struct safexcel_crypto_priv *priv,
1402
+ int is_pci_dev)
1403
+{
1404
+ struct device *dev = priv->dev;
1405
+ u32 peid, version, mask, val, hiaopt, hwopt, peopt;
1406
+ int i, ret, hwctg;
1407
+
1408
+ priv->context_pool = dmam_pool_create("safexcel-context", dev,
1409
+ sizeof(struct safexcel_context_record),
1410
+ 1, 0);
1411
+ if (!priv->context_pool)
1412
+ return -ENOMEM;
1413
+
1414
+ /*
1415
+ * First try the EIP97 HIA version regs
1416
+ * For the EIP197, this is guaranteed to NOT return any of the test
1417
+ * values
1418
+ */
1419
+ version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1420
+
1421
+ mask = 0; /* do not swap */
1422
+ if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1423
+ priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1424
+ } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1425
+ /* read back byte-swapped, so complement byte swap bits */
1426
+ mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1427
+ priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1428
+ } else {
1429
+ /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1430
+ version = readl(priv->base + EIP197_HIA_AIC_BASE +
1431
+ EIP197_HIA_VERSION);
1432
+ if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1433
+ priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1434
+ priv->flags |= SAFEXCEL_HW_EIP197;
1435
+ } else if (EIP197_REG_HI16(version) ==
1436
+ EIP197_HIA_VERSION_BE) {
1437
+ /* read back byte-swapped, so complement swap bits */
1438
+ mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1439
+ priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1440
+ priv->flags |= SAFEXCEL_HW_EIP197;
1441
+ } else {
1442
+ return -ENODEV;
1443
+ }
1444
+ }
1445
+
1446
+ /* Now initialize the reg offsets based on the probing info so far */
1447
+ safexcel_init_register_offsets(priv);
1448
+
1449
+ /*
1450
+ * If the version was read byte-swapped, we need to flip the device
1451
+ * swapping Keep in mind here, though, that what we write will also be
1452
+ * byte-swapped ...
1453
+ */
1454
+ if (mask) {
1455
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1456
+ val = val ^ (mask >> 24); /* toggle byte swap bits */
1457
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1458
+ }
1459
+
1460
+ /*
1461
+ * We're not done probing yet! We may fall through to here if no HIA
1462
+ * was found at all. So, with the endianness presumably correct now and
1463
+ * the offsets setup, *really* probe for the EIP97/EIP197.
1464
+ */
1465
+ version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1466
+ if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1467
+ (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
1468
+ (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
1469
+ ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1470
+ (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1471
+ /*
1472
+ * We did not find the device that matched our initial probing
1473
+ * (or our initial probing failed) Report appropriate error.
1474
+ */
1475
+ dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1476
+ version);
1477
+ return -ENODEV;
1478
+ }
1479
+
1480
+ priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1481
+ hwctg = version >> 28;
1482
+ peid = version & 255;
1483
+
1484
+ /* Detect EIP206 processing pipe */
1485
+ version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
1486
+ if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
1487
+ dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
1488
+ return -ENODEV;
1489
+ }
1490
+ priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
1491
+
1492
+ /* Detect EIP96 packet engine and version */
1493
+ version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1494
+ if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1495
+ dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1496
+ return -ENODEV;
1497
+ }
1498
+ priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1499
+
1500
+ hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
1501
+ hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1502
+
1503
+ priv->hwconfig.icever = 0;
1504
+ priv->hwconfig.ocever = 0;
1505
+ priv->hwconfig.psever = 0;
1506
+ if (priv->flags & SAFEXCEL_HW_EIP197) {
1507
+ /* EIP197 */
1508
+ peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
1509
+
1510
+ priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1511
+ EIP197_HWDATAW_MASK;
1512
+ priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1513
+ EIP197_CFSIZE_MASK) +
1514
+ EIP197_CFSIZE_ADJUST;
1515
+ priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1516
+ EIP197_RFSIZE_MASK) +
1517
+ EIP197_RFSIZE_ADJUST;
1518
+ priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
1519
+ EIP197_N_PES_MASK;
1520
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1521
+ EIP197_N_RINGS_MASK;
1522
+ if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
1523
+ priv->flags |= EIP197_PE_ARB;
1524
+ if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
1525
+ priv->flags |= EIP197_ICE;
1526
+ /* Detect ICE EIP207 class. engine and version */
1527
+ version = readl(EIP197_PE(priv) +
1528
+ EIP197_PE_ICE_VERSION(0));
1529
+ if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
1530
+ dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
1531
+ peid);
1532
+ return -ENODEV;
1533
+ }
1534
+ priv->hwconfig.icever = EIP197_VERSION_MASK(version);
1535
+ }
1536
+ if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
1537
+ priv->flags |= EIP197_OCE;
1538
+ /* Detect EIP96PP packet stream editor and version */
1539
+ version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
1540
+ if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1541
+ dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
1542
+ return -ENODEV;
1543
+ }
1544
+ priv->hwconfig.psever = EIP197_VERSION_MASK(version);
1545
+ /* Detect OCE EIP207 class. engine and version */
1546
+ version = readl(EIP197_PE(priv) +
1547
+ EIP197_PE_ICE_VERSION(0));
1548
+ if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
1549
+ dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
1550
+ peid);
1551
+ return -ENODEV;
1552
+ }
1553
+ priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
1554
+ }
1555
+ /* If not a full TRC, then assume simple TRC */
1556
+ if (!(hwopt & EIP197_OPT_HAS_TRC))
1557
+ priv->flags |= EIP197_SIMPLE_TRC;
1558
+ /* EIP197 always has SOME form of TRC */
1559
+ priv->flags |= EIP197_TRC_CACHE;
1560
+ } else {
1561
+ /* EIP97 */
1562
+ priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1563
+ EIP97_HWDATAW_MASK;
1564
+ priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1565
+ EIP97_CFSIZE_MASK;
1566
+ priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1567
+ EIP97_RFSIZE_MASK;
1568
+ priv->hwconfig.hwnumpes = 1; /* by definition */
1569
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1570
+ EIP197_N_RINGS_MASK;
1571
+ }
1572
+
1573
+ /* Scan for ring AIC's */
1574
+ for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
1575
+ version = readl(EIP197_HIA_AIC_R(priv) +
1576
+ EIP197_HIA_AIC_R_VERSION(i));
1577
+ if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
1578
+ break;
1579
+ }
1580
+ priv->hwconfig.hwnumraic = i;
1581
+ /* Low-end EIP196 may not have any ring AIC's ... */
1582
+ if (!priv->hwconfig.hwnumraic) {
1583
+ dev_err(priv->dev, "No ring interrupt controller present!\n");
1584
+ return -ENODEV;
1585
+ }
1586
+
1587
+ /* Get supported algorithms from EIP96 transform engine */
1588
+ priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1589
+ EIP197_PE_EIP96_OPTIONS(0));
1590
+
1591
+ /* Print single info line describing what we just detected */
1592
+ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
1593
+ peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
1594
+ priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
1595
+ priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
1596
+ priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
1597
+ priv->hwconfig.ppver, priv->hwconfig.pever,
1598
+ priv->hwconfig.algo_flags, priv->hwconfig.icever,
1599
+ priv->hwconfig.ocever, priv->hwconfig.psever);
1600
+
1601
+ safexcel_configure(priv);
1602
+
1603
+ if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1604
+ /*
1605
+ * Request MSI vectors for global + 1 per ring -
1606
+ * or just 1 for older dev images
1607
+ */
1608
+ struct pci_dev *pci_pdev = pdev;
1609
+
1610
+ ret = pci_alloc_irq_vectors(pci_pdev,
1611
+ priv->config.rings + 1,
1612
+ priv->config.rings + 1,
1613
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
1614
+ if (ret < 0) {
1615
+ dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1616
+ return ret;
1617
+ }
1618
+ }
1619
+
1620
+ /* Register the ring IRQ handlers and configure the rings */
1621
+ priv->ring = devm_kcalloc(dev, priv->config.rings,
1622
+ sizeof(*priv->ring),
1623
+ GFP_KERNEL);
1624
+ if (!priv->ring)
1625
+ return -ENOMEM;
1626
+
1627
+ for (i = 0; i < priv->config.rings; i++) {
1628
+ char wq_name[9] = {0};
1629
+ int irq;
1630
+ struct safexcel_ring_irq_data *ring_irq;
1631
+
1632
+ ret = safexcel_init_ring_descriptors(priv,
1633
+ &priv->ring[i].cdr,
1634
+ &priv->ring[i].rdr);
1635
+ if (ret) {
1636
+ dev_err(dev, "Failed to initialize rings\n");
1637
+ goto err_cleanup_rings;
1638
+ }
1639
+
1640
+ priv->ring[i].rdr_req = devm_kcalloc(dev,
1641
+ EIP197_DEFAULT_RING_SIZE,
1642
+ sizeof(*priv->ring[i].rdr_req),
1643
+ GFP_KERNEL);
1644
+ if (!priv->ring[i].rdr_req) {
1645
+ ret = -ENOMEM;
1646
+ goto err_cleanup_rings;
1647
+ }
1648
+
1649
+ ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1650
+ if (!ring_irq) {
1651
+ ret = -ENOMEM;
1652
+ goto err_cleanup_rings;
1653
+ }
1654
+
1655
+ ring_irq->priv = priv;
1656
+ ring_irq->ring = i;
1657
+
1658
+ irq = safexcel_request_ring_irq(pdev,
1659
+ EIP197_IRQ_NUMBER(i, is_pci_dev),
1660
+ is_pci_dev,
1661
+ i,
1662
+ safexcel_irq_ring,
1663
+ safexcel_irq_ring_thread,
1664
+ ring_irq);
1665
+ if (irq < 0) {
1666
+ dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1667
+ ret = irq;
1668
+ goto err_cleanup_rings;
1669
+ }
1670
+
1671
+ priv->ring[i].irq = irq;
1672
+ priv->ring[i].work_data.priv = priv;
1673
+ priv->ring[i].work_data.ring = i;
1674
+ INIT_WORK(&priv->ring[i].work_data.work,
1675
+ safexcel_dequeue_work);
1676
+
1677
+ snprintf(wq_name, 9, "wq_ring%d", i);
1678
+ priv->ring[i].workqueue =
1679
+ create_singlethread_workqueue(wq_name);
1680
+ if (!priv->ring[i].workqueue) {
1681
+ ret = -ENOMEM;
1682
+ goto err_cleanup_rings;
1683
+ }
1684
+
1685
+ priv->ring[i].requests = 0;
1686
+ priv->ring[i].busy = false;
1687
+
1688
+ crypto_init_queue(&priv->ring[i].queue,
1689
+ EIP197_DEFAULT_RING_SIZE);
1690
+
1691
+ spin_lock_init(&priv->ring[i].lock);
1692
+ spin_lock_init(&priv->ring[i].queue_lock);
1693
+ }
1694
+
1695
+ atomic_set(&priv->ring_used, 0);
1696
+
1697
+ ret = safexcel_hw_init(priv);
1698
+ if (ret) {
1699
+ dev_err(dev, "HW init failed (%d)\n", ret);
1700
+ goto err_cleanup_rings;
1701
+ }
1702
+
1703
+ ret = safexcel_register_algorithms(priv);
1704
+ if (ret) {
1705
+ dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1706
+ goto err_cleanup_rings;
1707
+ }
1708
+
1709
+ return 0;
1710
+
1711
+err_cleanup_rings:
1712
+ for (i = 0; i < priv->config.rings; i++) {
1713
+ if (priv->ring[i].irq)
1714
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
1715
+ if (priv->ring[i].workqueue)
1716
+ destroy_workqueue(priv->ring[i].workqueue);
1717
+ }
1718
+
1719
+ return ret;
1720
+}
1721
+
1722
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1723
+{
1724
+ int i;
1725
+
1726
+ for (i = 0; i < priv->config.rings; i++) {
1727
+ /* clear any pending interrupt */
1728
+ writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1729
+ writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1730
+
1731
+ /* Reset the CDR base address */
1732
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1733
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1734
+
1735
+ /* Reset the RDR base address */
1736
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1737
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1738
+ }
1739
+}
1740
+
1741
+/* for Device Tree platform driver */
9781742
9791743 static int safexcel_probe(struct platform_device *pdev)
9801744 {
9811745 struct device *dev = &pdev->dev;
982
- struct resource *res;
9831746 struct safexcel_crypto_priv *priv;
984
- int i, ret;
1747
+ int ret;
9851748
9861749 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
9871750 if (!priv)
....@@ -990,13 +1753,9 @@
9901753 priv->dev = dev;
9911754 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
9921755
993
- if (priv->version == EIP197B || priv->version == EIP197D)
994
- priv->flags |= EIP197_TRC_CACHE;
1756
+ platform_set_drvdata(pdev, priv);
9951757
996
- safexcel_init_register_offsets(priv);
997
-
998
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999
- priv->base = devm_ioremap_resource(dev, res);
1758
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
10001759 if (IS_ERR(priv->base)) {
10011760 dev_err(dev, "failed to get resource\n");
10021761 return PTR_ERR(priv->base);
....@@ -1034,98 +1793,10 @@
10341793 if (ret)
10351794 goto err_reg_clk;
10361795
1037
- priv->context_pool = dmam_pool_create("safexcel-context", dev,
1038
- sizeof(struct safexcel_context_record),
1039
- 1, 0);
1040
- if (!priv->context_pool) {
1041
- ret = -ENOMEM;
1796
+ /* Generic EIP97/EIP197 device probing */
1797
+ ret = safexcel_probe_generic(pdev, priv, 0);
1798
+ if (ret)
10421799 goto err_reg_clk;
1043
- }
1044
-
1045
- safexcel_configure(priv);
1046
-
1047
- priv->ring = devm_kcalloc(dev, priv->config.rings,
1048
- sizeof(*priv->ring),
1049
- GFP_KERNEL);
1050
- if (!priv->ring) {
1051
- ret = -ENOMEM;
1052
- goto err_reg_clk;
1053
- }
1054
-
1055
- for (i = 0; i < priv->config.rings; i++) {
1056
- char irq_name[6] = {0}; /* "ringX\0" */
1057
- char wq_name[9] = {0}; /* "wq_ringX\0" */
1058
- int irq;
1059
- struct safexcel_ring_irq_data *ring_irq;
1060
-
1061
- ret = safexcel_init_ring_descriptors(priv,
1062
- &priv->ring[i].cdr,
1063
- &priv->ring[i].rdr);
1064
- if (ret)
1065
- goto err_reg_clk;
1066
-
1067
- priv->ring[i].rdr_req = devm_kcalloc(dev,
1068
- EIP197_DEFAULT_RING_SIZE,
1069
- sizeof(*priv->ring[i].rdr_req),
1070
- GFP_KERNEL);
1071
- if (!priv->ring[i].rdr_req) {
1072
- ret = -ENOMEM;
1073
- goto err_reg_clk;
1074
- }
1075
-
1076
- ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1077
- if (!ring_irq) {
1078
- ret = -ENOMEM;
1079
- goto err_reg_clk;
1080
- }
1081
-
1082
- ring_irq->priv = priv;
1083
- ring_irq->ring = i;
1084
-
1085
- snprintf(irq_name, 6, "ring%d", i);
1086
- irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
1087
- safexcel_irq_ring_thread,
1088
- ring_irq);
1089
- if (irq < 0) {
1090
- ret = irq;
1091
- goto err_reg_clk;
1092
- }
1093
-
1094
- priv->ring[i].work_data.priv = priv;
1095
- priv->ring[i].work_data.ring = i;
1096
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
1097
-
1098
- snprintf(wq_name, 9, "wq_ring%d", i);
1099
- priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
1100
- if (!priv->ring[i].workqueue) {
1101
- ret = -ENOMEM;
1102
- goto err_reg_clk;
1103
- }
1104
-
1105
- priv->ring[i].requests = 0;
1106
- priv->ring[i].busy = false;
1107
-
1108
- crypto_init_queue(&priv->ring[i].queue,
1109
- EIP197_DEFAULT_RING_SIZE);
1110
-
1111
- spin_lock_init(&priv->ring[i].lock);
1112
- spin_lock_init(&priv->ring[i].queue_lock);
1113
- }
1114
-
1115
- platform_set_drvdata(pdev, priv);
1116
- atomic_set(&priv->ring_used, 0);
1117
-
1118
- ret = safexcel_hw_init(priv);
1119
- if (ret) {
1120
- dev_err(dev, "EIP h/w init failed (%d)\n", ret);
1121
- goto err_reg_clk;
1122
- }
1123
-
1124
- ret = safexcel_register_algorithms(priv);
1125
- if (ret) {
1126
- dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1127
- goto err_reg_clk;
1128
- }
11291800
11301801 return 0;
11311802
....@@ -1136,25 +1807,6 @@
11361807 return ret;
11371808 }
11381809
1139
-static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1140
-{
1141
- int i;
1142
-
1143
- for (i = 0; i < priv->config.rings; i++) {
1144
- /* clear any pending interrupt */
1145
- writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1146
- writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1147
-
1148
- /* Reset the CDR base address */
1149
- writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1150
- writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1151
-
1152
- /* Reset the RDR base address */
1153
- writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1154
- writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1155
- }
1156
-}
1157
-
11581810 static int safexcel_remove(struct platform_device *pdev)
11591811 {
11601812 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
....@@ -1163,10 +1815,13 @@
11631815 safexcel_unregister_algorithms(priv);
11641816 safexcel_hw_reset_rings(priv);
11651817
1818
+ clk_disable_unprepare(priv->reg_clk);
11661819 clk_disable_unprepare(priv->clk);
11671820
1168
- for (i = 0; i < priv->config.rings; i++)
1821
+ for (i = 0; i < priv->config.rings; i++) {
1822
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
11691823 destroy_workqueue(priv->ring[i].workqueue);
1824
+ }
11701825
11711826 return 0;
11721827 }
....@@ -1174,29 +1829,29 @@
11741829 static const struct of_device_id safexcel_of_match_table[] = {
11751830 {
11761831 .compatible = "inside-secure,safexcel-eip97ies",
1177
- .data = (void *)EIP97IES,
1832
+ .data = (void *)EIP97IES_MRVL,
11781833 },
11791834 {
11801835 .compatible = "inside-secure,safexcel-eip197b",
1181
- .data = (void *)EIP197B,
1836
+ .data = (void *)EIP197B_MRVL,
11821837 },
11831838 {
11841839 .compatible = "inside-secure,safexcel-eip197d",
1185
- .data = (void *)EIP197D,
1840
+ .data = (void *)EIP197D_MRVL,
11861841 },
1842
+ /* For backward compatibility and intended for generic use */
11871843 {
1188
- /* Deprecated. Kept for backward compatibility. */
11891844 .compatible = "inside-secure,safexcel-eip97",
1190
- .data = (void *)EIP97IES,
1845
+ .data = (void *)EIP97IES_MRVL,
11911846 },
11921847 {
1193
- /* Deprecated. Kept for backward compatibility. */
11941848 .compatible = "inside-secure,safexcel-eip197",
1195
- .data = (void *)EIP197B,
1849
+ .data = (void *)EIP197B_MRVL,
11961850 },
11971851 {},
11981852 };
11991853
1854
+MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
12001855
12011856 static struct platform_driver crypto_safexcel = {
12021857 .probe = safexcel_probe,
....@@ -1206,10 +1861,161 @@
12061861 .of_match_table = safexcel_of_match_table,
12071862 },
12081863 };
1209
-module_platform_driver(crypto_safexcel);
1864
+
1865
+/* PCIE devices - i.e. Inside Secure development boards */
1866
+
1867
+static int safexcel_pci_probe(struct pci_dev *pdev,
1868
+ const struct pci_device_id *ent)
1869
+{
1870
+ struct device *dev = &pdev->dev;
1871
+ struct safexcel_crypto_priv *priv;
1872
+ void __iomem *pciebase;
1873
+ int rc;
1874
+ u32 val;
1875
+
1876
+ dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1877
+ ent->vendor, ent->device, ent->subvendor,
1878
+ ent->subdevice, ent->driver_data);
1879
+
1880
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1881
+ if (!priv)
1882
+ return -ENOMEM;
1883
+
1884
+ priv->dev = dev;
1885
+ priv->version = (enum safexcel_eip_version)ent->driver_data;
1886
+
1887
+ pci_set_drvdata(pdev, priv);
1888
+
1889
+ /* enable the device */
1890
+ rc = pcim_enable_device(pdev);
1891
+ if (rc) {
1892
+ dev_err(dev, "Failed to enable PCI device\n");
1893
+ return rc;
1894
+ }
1895
+
1896
+ /* take ownership of PCI BAR0 */
1897
+ rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1898
+ if (rc) {
1899
+ dev_err(dev, "Failed to map IO region for BAR0\n");
1900
+ return rc;
1901
+ }
1902
+ priv->base = pcim_iomap_table(pdev)[0];
1903
+
1904
+ if (priv->version == EIP197_DEVBRD) {
1905
+ dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1906
+
1907
+ rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1908
+ if (rc) {
1909
+ dev_err(dev, "Failed to map IO region for BAR4\n");
1910
+ return rc;
1911
+ }
1912
+
1913
+ pciebase = pcim_iomap_table(pdev)[2];
1914
+ val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1915
+ if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1916
+ dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1917
+ (val & 0xff));
1918
+
1919
+ /* Setup MSI identity map mapping */
1920
+ writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1921
+ pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1922
+ writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1923
+ pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1924
+ writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1925
+ pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1926
+ writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1927
+ pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1928
+
1929
+ /* Enable all device interrupts */
1930
+ writel(GENMASK(31, 0),
1931
+ pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1932
+ } else {
1933
+ dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1934
+ val);
1935
+ return -ENODEV;
1936
+ }
1937
+
1938
+ /* HW reset FPGA dev board */
1939
+ /* assert reset */
1940
+ writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1941
+ wmb(); /* maintain strict ordering for accesses here */
1942
+ /* deassert reset */
1943
+ writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1944
+ wmb(); /* maintain strict ordering for accesses here */
1945
+ }
1946
+
1947
+ /* enable bus mastering */
1948
+ pci_set_master(pdev);
1949
+
1950
+ /* Generic EIP97/EIP197 device probing */
1951
+ rc = safexcel_probe_generic(pdev, priv, 1);
1952
+ return rc;
1953
+}
1954
+
1955
+static void safexcel_pci_remove(struct pci_dev *pdev)
1956
+{
1957
+ struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1958
+ int i;
1959
+
1960
+ safexcel_unregister_algorithms(priv);
1961
+
1962
+ for (i = 0; i < priv->config.rings; i++)
1963
+ destroy_workqueue(priv->ring[i].workqueue);
1964
+
1965
+ safexcel_hw_reset_rings(priv);
1966
+}
1967
+
1968
+static const struct pci_device_id safexcel_pci_ids[] = {
1969
+ {
1970
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1971
+ 0x16ae, 0xc522),
1972
+ .driver_data = EIP197_DEVBRD,
1973
+ },
1974
+ {},
1975
+};
1976
+
1977
+MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1978
+
1979
+static struct pci_driver safexcel_pci_driver = {
1980
+ .name = "crypto-safexcel",
1981
+ .id_table = safexcel_pci_ids,
1982
+ .probe = safexcel_pci_probe,
1983
+ .remove = safexcel_pci_remove,
1984
+};
1985
+
1986
+static int __init safexcel_init(void)
1987
+{
1988
+ int ret;
1989
+
1990
+ /* Register PCI driver */
1991
+ ret = pci_register_driver(&safexcel_pci_driver);
1992
+
1993
+ /* Register platform driver */
1994
+ if (IS_ENABLED(CONFIG_OF) && !ret) {
1995
+ ret = platform_driver_register(&crypto_safexcel);
1996
+ if (ret)
1997
+ pci_unregister_driver(&safexcel_pci_driver);
1998
+ }
1999
+
2000
+ return ret;
2001
+}
2002
+
2003
+static void __exit safexcel_exit(void)
2004
+{
2005
+ /* Unregister platform driver */
2006
+ if (IS_ENABLED(CONFIG_OF))
2007
+ platform_driver_unregister(&crypto_safexcel);
2008
+
2009
+ /* Unregister PCI driver if successfully registered before */
2010
+ pci_unregister_driver(&safexcel_pci_driver);
2011
+}
2012
+
2013
+module_init(safexcel_init);
2014
+module_exit(safexcel_exit);
12102015
12112016 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
12122017 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
12132018 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1214
-MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
2019
+MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
12152020 MODULE_LICENSE("GPL v2");
2021
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);