hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/crypto/async_tx/async_xor.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * xor offload engine api
34 *
....@@ -8,20 +9,6 @@
89 * with architecture considerations by:
910 * Neil Brown <neilb@suse.de>
1011 * Jeff Garzik <jeff@garzik.org>
11
- *
12
- * This program is free software; you can redistribute it and/or modify it
13
- * under the terms and conditions of the GNU General Public License,
14
- * version 2, as published by the Free Software Foundation.
15
- *
16
- * This program is distributed in the hope it will be useful, but WITHOUT
17
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19
- * more details.
20
- *
21
- * You should have received a copy of the GNU General Public License along with
22
- * this program; if not, write to the Free Software Foundation, Inc.,
23
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24
- *
2512 */
2613 #include <linux/kernel.h>
2714 #include <linux/interrupt.h>
....@@ -110,7 +97,8 @@
11097 }
11198
11299 static void
113
-do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
100
+do_sync_xor_offs(struct page *dest, unsigned int offset,
101
+ struct page **src_list, unsigned int *src_offs,
114102 int src_cnt, size_t len, struct async_submit_ctl *submit)
115103 {
116104 int i;
....@@ -127,7 +115,8 @@
127115 /* convert to buffer pointers */
128116 for (i = 0; i < src_cnt; i++)
129117 if (src_list[i])
130
- srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
118
+ srcs[xor_src_cnt++] = page_address(src_list[i]) +
119
+ (src_offs ? src_offs[i] : offset);
131120 src_cnt = xor_src_cnt;
132121 /* set destination address */
133122 dest_buf = page_address(dest) + offset;
....@@ -147,6 +136,117 @@
147136
148137 async_tx_sync_epilog(submit);
149138 }
139
+
140
+static inline bool
141
+dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset,
142
+ unsigned int *src_offs, int src_cnt, int len)
143
+{
144
+ int i;
145
+
146
+ if (!is_dma_xor_aligned(device, offset, 0, len))
147
+ return false;
148
+
149
+ if (!src_offs)
150
+ return true;
151
+
152
+ for (i = 0; i < src_cnt; i++) {
153
+ if (!is_dma_xor_aligned(device, src_offs[i], 0, len))
154
+ return false;
155
+ }
156
+ return true;
157
+}
158
+
159
+/**
160
+ * async_xor_offs - attempt to xor a set of blocks with a dma engine.
161
+ * @dest: destination page
162
+ * @offset: dst offset to start transaction
163
+ * @src_list: array of source pages
164
+ * @src_offs: array of source pages offset, NULL means common src/dst offset
165
+ * @src_cnt: number of source pages
166
+ * @len: length in bytes
167
+ * @submit: submission / completion modifiers
168
+ *
169
+ * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
170
+ *
171
+ * xor_blocks always uses the dest as a source so the
172
+ * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
173
+ * the calculation. The assumption with dma eninges is that they only
174
+ * use the destination buffer as a source when it is explicity specified
175
+ * in the source list.
176
+ *
177
+ * src_list note: if the dest is also a source it must be at index zero.
178
+ * The contents of this array will be overwritten if a scribble region
179
+ * is not specified.
180
+ */
181
+struct dma_async_tx_descriptor *
182
+async_xor_offs(struct page *dest, unsigned int offset,
183
+ struct page **src_list, unsigned int *src_offs,
184
+ int src_cnt, size_t len, struct async_submit_ctl *submit)
185
+{
186
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
187
+ &dest, 1, src_list,
188
+ src_cnt, len);
189
+ struct dma_device *device = chan ? chan->device : NULL;
190
+ struct dmaengine_unmap_data *unmap = NULL;
191
+
192
+ BUG_ON(src_cnt <= 1);
193
+
194
+ if (device)
195
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
196
+
197
+ if (unmap && dma_xor_aligned_offsets(device, offset,
198
+ src_offs, src_cnt, len)) {
199
+ struct dma_async_tx_descriptor *tx;
200
+ int i, j;
201
+
202
+ /* run the xor asynchronously */
203
+ pr_debug("%s (async): len: %zu\n", __func__, len);
204
+
205
+ unmap->len = len;
206
+ for (i = 0, j = 0; i < src_cnt; i++) {
207
+ if (!src_list[i])
208
+ continue;
209
+ unmap->to_cnt++;
210
+ unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
211
+ src_offs ? src_offs[i] : offset,
212
+ len, DMA_TO_DEVICE);
213
+ }
214
+
215
+ /* map it bidirectional as it may be re-used as a source */
216
+ unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
217
+ DMA_BIDIRECTIONAL);
218
+ unmap->bidi_cnt = 1;
219
+
220
+ tx = do_async_xor(chan, unmap, submit);
221
+ dmaengine_unmap_put(unmap);
222
+ return tx;
223
+ } else {
224
+ dmaengine_unmap_put(unmap);
225
+ /* run the xor synchronously */
226
+ pr_debug("%s (sync): len: %zu\n", __func__, len);
227
+ WARN_ONCE(chan, "%s: no space for dma address conversion\n",
228
+ __func__);
229
+
230
+ /* in the sync case the dest is an implied source
231
+ * (assumes the dest is the first source)
232
+ */
233
+ if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
234
+ src_cnt--;
235
+ src_list++;
236
+ if (src_offs)
237
+ src_offs++;
238
+ }
239
+
240
+ /* wait for any prerequisite operations */
241
+ async_tx_quiesce(&submit->depend_tx);
242
+
243
+ do_sync_xor_offs(dest, offset, src_list, src_offs,
244
+ src_cnt, len, submit);
245
+
246
+ return NULL;
247
+ }
248
+}
249
+EXPORT_SYMBOL_GPL(async_xor_offs);
150250
151251 /**
152252 * async_xor - attempt to xor a set of blocks with a dma engine.
....@@ -173,63 +273,8 @@
173273 async_xor(struct page *dest, struct page **src_list, unsigned int offset,
174274 int src_cnt, size_t len, struct async_submit_ctl *submit)
175275 {
176
- struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
177
- &dest, 1, src_list,
178
- src_cnt, len);
179
- struct dma_device *device = chan ? chan->device : NULL;
180
- struct dmaengine_unmap_data *unmap = NULL;
181
-
182
- BUG_ON(src_cnt <= 1);
183
-
184
- if (device)
185
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
186
-
187
- if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
188
- struct dma_async_tx_descriptor *tx;
189
- int i, j;
190
-
191
- /* run the xor asynchronously */
192
- pr_debug("%s (async): len: %zu\n", __func__, len);
193
-
194
- unmap->len = len;
195
- for (i = 0, j = 0; i < src_cnt; i++) {
196
- if (!src_list[i])
197
- continue;
198
- unmap->to_cnt++;
199
- unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
200
- offset, len, DMA_TO_DEVICE);
201
- }
202
-
203
- /* map it bidirectional as it may be re-used as a source */
204
- unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
205
- DMA_BIDIRECTIONAL);
206
- unmap->bidi_cnt = 1;
207
-
208
- tx = do_async_xor(chan, unmap, submit);
209
- dmaengine_unmap_put(unmap);
210
- return tx;
211
- } else {
212
- dmaengine_unmap_put(unmap);
213
- /* run the xor synchronously */
214
- pr_debug("%s (sync): len: %zu\n", __func__, len);
215
- WARN_ONCE(chan, "%s: no space for dma address conversion\n",
216
- __func__);
217
-
218
- /* in the sync case the dest is an implied source
219
- * (assumes the dest is the first source)
220
- */
221
- if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
222
- src_cnt--;
223
- src_list++;
224
- }
225
-
226
- /* wait for any prerequisite operations */
227
- async_tx_quiesce(&submit->depend_tx);
228
-
229
- do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
230
-
231
- return NULL;
232
- }
276
+ return async_xor_offs(dest, offset, src_list, NULL,
277
+ src_cnt, len, submit);
233278 }
234279 EXPORT_SYMBOL_GPL(async_xor);
235280
....@@ -250,10 +295,11 @@
250295 }
251296
252297 /**
253
- * async_xor_val - attempt a xor parity check with a dma engine.
298
+ * async_xor_val_offs - attempt a xor parity check with a dma engine.
254299 * @dest: destination page used if the xor is performed synchronously
300
+ * @offset: des offset in pages to start transaction
255301 * @src_list: array of source pages
256
- * @offset: offset in pages to start transaction
302
+ * @src_offs: array of source pages offset, NULL means common src/det offset
257303 * @src_cnt: number of source pages
258304 * @len: length in bytes
259305 * @result: 0 if sum == 0 else non-zero
....@@ -266,9 +312,10 @@
266312 * is not specified.
267313 */
268314 struct dma_async_tx_descriptor *
269
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
270
- int src_cnt, size_t len, enum sum_check_flags *result,
271
- struct async_submit_ctl *submit)
315
+async_xor_val_offs(struct page *dest, unsigned int offset,
316
+ struct page **src_list, unsigned int *src_offs,
317
+ int src_cnt, size_t len, enum sum_check_flags *result,
318
+ struct async_submit_ctl *submit)
272319 {
273320 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
274321 struct dma_device *device = chan ? chan->device : NULL;
....@@ -281,7 +328,7 @@
281328 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
282329
283330 if (unmap && src_cnt <= device->max_xor &&
284
- is_dma_xor_aligned(device, offset, 0, len)) {
331
+ dma_xor_aligned_offsets(device, offset, src_offs, src_cnt, len)) {
285332 unsigned long dma_prep_flags = 0;
286333 int i;
287334
....@@ -294,7 +341,8 @@
294341
295342 for (i = 0; i < src_cnt; i++) {
296343 unmap->addr[i] = dma_map_page(device->dev, src_list[i],
297
- offset, len, DMA_TO_DEVICE);
344
+ src_offs ? src_offs[i] : offset,
345
+ len, DMA_TO_DEVICE);
298346 unmap->to_cnt++;
299347 }
300348 unmap->len = len;
....@@ -325,7 +373,8 @@
325373 submit->flags |= ASYNC_TX_XOR_DROP_DST;
326374 submit->flags &= ~ASYNC_TX_ACK;
327375
328
- tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
376
+ tx = async_xor_offs(dest, offset, src_list, src_offs,
377
+ src_cnt, len, submit);
329378
330379 async_tx_quiesce(&tx);
331380
....@@ -338,6 +387,32 @@
338387
339388 return tx;
340389 }
390
+EXPORT_SYMBOL_GPL(async_xor_val_offs);
391
+
392
+/**
393
+ * async_xor_val - attempt a xor parity check with a dma engine.
394
+ * @dest: destination page used if the xor is performed synchronously
395
+ * @src_list: array of source pages
396
+ * @offset: offset in pages to start transaction
397
+ * @src_cnt: number of source pages
398
+ * @len: length in bytes
399
+ * @result: 0 if sum == 0 else non-zero
400
+ * @submit: submission / completion modifiers
401
+ *
402
+ * honored flags: ASYNC_TX_ACK
403
+ *
404
+ * src_list note: if the dest is also a source it must be at index zero.
405
+ * The contents of this array will be overwritten if a scribble region
406
+ * is not specified.
407
+ */
408
+struct dma_async_tx_descriptor *
409
+async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
410
+ int src_cnt, size_t len, enum sum_check_flags *result,
411
+ struct async_submit_ctl *submit)
412
+{
413
+ return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt,
414
+ len, result, submit);
415
+}
341416 EXPORT_SYMBOL_GPL(async_xor_val);
342417
343418 MODULE_AUTHOR("Intel Corporation");