hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/fs/verity/verify.c
....@@ -227,7 +227,7 @@
227227 const struct merkle_tree_params *params = &vi->tree_params;
228228 struct ahash_request *req;
229229 struct bio_vec *bv;
230
- int i;
230
+ struct bvec_iter_all iter_all;
231231 unsigned long max_ra_pages = 0;
232232
233233 /* This allocation never fails, since it's mempool-backed. */
....@@ -243,12 +243,12 @@
243243 * This improves sequential read performance, as it greatly
244244 * reduces the number of I/O requests made to the Merkle tree.
245245 */
246
- bio_for_each_segment_all(bv, bio, i)
246
+ bio_for_each_segment_all(bv, bio, iter_all)
247247 max_ra_pages++;
248248 max_ra_pages /= 4;
249249 }
250250
251
- bio_for_each_segment_all(bv, bio, i) {
251
+ bio_for_each_segment_all(bv, bio, iter_all) {
252252 struct page *page = bv->bv_page;
253253 unsigned long level0_index = page->index >> params->log_arity;
254254 unsigned long level0_ra_pages =
....@@ -279,15 +279,15 @@
279279 int __init fsverity_init_workqueue(void)
280280 {
281281 /*
282
- * Use an unbound workqueue to allow bios to be verified in parallel
283
- * even when they happen to complete on the same CPU. This sacrifices
284
- * locality, but it's worthwhile since hashing is CPU-intensive.
282
+ * Use a high-priority workqueue to prioritize verification work, which
283
+ * blocks reads from completing, over regular application tasks.
285284 *
286
- * Also use a high-priority workqueue to prioritize verification work,
287
- * which blocks reads from completing, over regular application tasks.
285
+ * For performance reasons, don't use an unbound workqueue. Using an
286
+ * unbound workqueue for crypto operations causes excessive scheduler
287
+ * latency on ARM64.
288288 */
289289 fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
290
- WQ_UNBOUND | WQ_HIGHPRI,
290
+ WQ_HIGHPRI,
291291 num_online_cpus());
292292 if (!fsverity_read_workqueue)
293293 return -ENOMEM;