hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/crypto/async_tx/async_pq.c
....@@ -1,23 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
34 * Copyright(c) 2009 Intel Corporation
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms of the GNU General Public License as published by the Free
7
- * Software Foundation; either version 2 of the License, or (at your option)
8
- * any later version.
9
- *
10
- * This program is distributed in the hope that it will be useful, but WITHOUT
11
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
- * more details.
14
- *
15
- * You should have received a copy of the GNU General Public License along with
16
- * this program; if not, write to the Free Software Foundation, Inc., 59
17
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
- *
19
- * The full GNU General Public License is included in this distribution in the
20
- * file called COPYING.
215 */
226 #include <linux/kernel.h>
237 #include <linux/interrupt.h>
....@@ -120,7 +104,7 @@
120104 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
121105 */
122106 static void
123
-do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
107
+do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
124108 size_t len, struct async_submit_ctl *submit)
125109 {
126110 void **srcs;
....@@ -137,7 +121,8 @@
137121 BUG_ON(i > disks - 3); /* P or Q can't be zero */
138122 srcs[i] = (void*)raid6_empty_zero_page;
139123 } else {
140
- srcs[i] = page_address(blocks[i]) + offset;
124
+ srcs[i] = page_address(blocks[i]) + offsets[i];
125
+
141126 if (i < disks - 2) {
142127 stop = i;
143128 if (start == -1)
....@@ -154,10 +139,23 @@
154139 async_tx_sync_epilog(submit);
155140 }
156141
142
+static inline bool
143
+is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
144
+ int src_cnt, size_t len)
145
+{
146
+ int i;
147
+
148
+ for (i = 0; i < src_cnt; i++) {
149
+ if (!is_dma_pq_aligned(dev, offs[i], 0, len))
150
+ return false;
151
+ }
152
+ return true;
153
+}
154
+
157155 /**
158156 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
159157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
160
- * @offset: common offset into each block (src and dest) to start transaction
158
+ * @offsets: offset array into each block (src and dest) to start transaction
161159 * @disks: number of blocks (including missing P or Q, see below)
162160 * @len: length of operation in bytes
163161 * @submit: submission/completion modifiers
....@@ -176,7 +174,7 @@
176174 * path.
177175 */
178176 struct dma_async_tx_descriptor *
179
-async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
177
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
180178 size_t len, struct async_submit_ctl *submit)
181179 {
182180 int src_cnt = disks - 2;
....@@ -195,7 +193,7 @@
195193 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
196194 (src_cnt <= dma_maxpq(device, 0) ||
197195 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
198
- is_dma_pq_aligned(device, offset, 0, len)) {
196
+ is_dma_pq_aligned_offs(device, offsets, disks, len)) {
199197 struct dma_async_tx_descriptor *tx;
200198 enum dma_ctrl_flags dma_flags = 0;
201199 unsigned char coefs[MAX_DISKS];
....@@ -212,8 +210,8 @@
212210 for (i = 0, j = 0; i < src_cnt; i++) {
213211 if (blocks[i] == NULL)
214212 continue;
215
- unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
216
- len, DMA_TO_DEVICE);
213
+ unmap->addr[j] = dma_map_page(device->dev, blocks[i],
214
+ offsets[i], len, DMA_TO_DEVICE);
217215 coefs[j] = raid6_gfexp[i];
218216 unmap->to_cnt++;
219217 j++;
....@@ -226,7 +224,8 @@
226224 unmap->bidi_cnt++;
227225 if (P(blocks, disks))
228226 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
229
- offset, len, DMA_BIDIRECTIONAL);
227
+ P(offsets, disks),
228
+ len, DMA_BIDIRECTIONAL);
230229 else {
231230 unmap->addr[j++] = 0;
232231 dma_flags |= DMA_PREP_PQ_DISABLE_P;
....@@ -235,7 +234,8 @@
235234 unmap->bidi_cnt++;
236235 if (Q(blocks, disks))
237236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
238
- offset, len, DMA_BIDIRECTIONAL);
237
+ Q(offsets, disks),
238
+ len, DMA_BIDIRECTIONAL);
239239 else {
240240 unmap->addr[j++] = 0;
241241 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
....@@ -256,13 +256,13 @@
256256
257257 if (!P(blocks, disks)) {
258258 P(blocks, disks) = pq_scribble_page;
259
- BUG_ON(len + offset > PAGE_SIZE);
259
+ P(offsets, disks) = 0;
260260 }
261261 if (!Q(blocks, disks)) {
262262 Q(blocks, disks) = pq_scribble_page;
263
- BUG_ON(len + offset > PAGE_SIZE);
263
+ Q(offsets, disks) = 0;
264264 }
265
- do_sync_gen_syndrome(blocks, offset, disks, len, submit);
265
+ do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
266266
267267 return NULL;
268268 }
....@@ -286,6 +286,7 @@
286286 * @len: length of operation in bytes
287287 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
288288 * @spare: temporary result buffer for the synchronous case
289
+ * @s_off: spare buffer page offset
289290 * @submit: submission / completion modifiers
290291 *
291292 * The same notes from async_gen_syndrome apply to the 'blocks',
....@@ -294,9 +295,9 @@
294295 * specified.
295296 */
296297 struct dma_async_tx_descriptor *
297
-async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
298
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
298299 size_t len, enum sum_check_flags *pqres, struct page *spare,
299
- struct async_submit_ctl *submit)
300
+ unsigned int s_off, struct async_submit_ctl *submit)
300301 {
301302 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
302303 struct dma_device *device = chan ? chan->device : NULL;
....@@ -311,7 +312,7 @@
311312 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
312313
313314 if (unmap && disks <= dma_maxpq(device, 0) &&
314
- is_dma_pq_aligned(device, offset, 0, len)) {
315
+ is_dma_pq_aligned_offs(device, offsets, disks, len)) {
315316 struct device *dev = device->dev;
316317 dma_addr_t pq[2];
317318 int i, j = 0, src_cnt = 0;
....@@ -323,7 +324,7 @@
323324 for (i = 0; i < disks-2; i++)
324325 if (likely(blocks[i])) {
325326 unmap->addr[j] = dma_map_page(dev, blocks[i],
326
- offset, len,
327
+ offsets[i], len,
327328 DMA_TO_DEVICE);
328329 coefs[j] = raid6_gfexp[i];
329330 unmap->to_cnt++;
....@@ -336,7 +337,7 @@
336337 dma_flags |= DMA_PREP_PQ_DISABLE_P;
337338 } else {
338339 pq[0] = dma_map_page(dev, P(blocks, disks),
339
- offset, len,
340
+ P(offsets, disks), len,
340341 DMA_TO_DEVICE);
341342 unmap->addr[j++] = pq[0];
342343 unmap->to_cnt++;
....@@ -346,7 +347,7 @@
346347 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
347348 } else {
348349 pq[1] = dma_map_page(dev, Q(blocks, disks),
349
- offset, len,
350
+ Q(offsets, disks), len,
350351 DMA_TO_DEVICE);
351352 unmap->addr[j++] = pq[1];
352353 unmap->to_cnt++;
....@@ -371,7 +372,9 @@
371372 async_tx_submit(chan, tx, submit);
372373 } else {
373374 struct page *p_src = P(blocks, disks);
375
+ unsigned int p_off = P(offsets, disks);
374376 struct page *q_src = Q(blocks, disks);
377
+ unsigned int q_off = Q(offsets, disks);
375378 enum async_tx_flags flags_orig = submit->flags;
376379 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
377380 void *scribble = submit->scribble;
....@@ -397,27 +400,32 @@
397400 if (p_src) {
398401 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
399402 NULL, NULL, scribble);
400
- tx = async_xor(spare, blocks, offset, disks-2, len, submit);
403
+ tx = async_xor_offs(spare, s_off,
404
+ blocks, offsets, disks-2, len, submit);
401405 async_tx_quiesce(&tx);
402
- p = page_address(p_src) + offset;
403
- s = page_address(spare) + offset;
406
+ p = page_address(p_src) + p_off;
407
+ s = page_address(spare) + s_off;
404408 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
405409 }
406410
407411 if (q_src) {
408412 P(blocks, disks) = NULL;
409413 Q(blocks, disks) = spare;
414
+ Q(offsets, disks) = s_off;
410415 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
411
- tx = async_gen_syndrome(blocks, offset, disks, len, submit);
416
+ tx = async_gen_syndrome(blocks, offsets, disks,
417
+ len, submit);
412418 async_tx_quiesce(&tx);
413
- q = page_address(q_src) + offset;
414
- s = page_address(spare) + offset;
419
+ q = page_address(q_src) + q_off;
420
+ s = page_address(spare) + s_off;
415421 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
416422 }
417423
418424 /* restore P, Q and submit */
419425 P(blocks, disks) = p_src;
426
+ P(offsets, disks) = p_off;
420427 Q(blocks, disks) = q_src;
428
+ Q(offsets, disks) = q_off;
421429
422430 submit->cb_fn = cb_fn_orig;
423431 submit->cb_param = cb_param_orig;