hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/crypto/cryptd.c
....@@ -39,6 +39,7 @@
3939 struct cryptd_cpu_queue {
4040 struct crypto_queue queue;
4141 struct work_struct work;
42
+ spinlock_t qlock;
4243 };
4344
4445 struct cryptd_queue {
....@@ -117,6 +118,7 @@
117118 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118119 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
119120 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
121
+ spin_lock_init(&cpu_queue->qlock);
120122 }
121123 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
122124 return 0;
....@@ -141,8 +143,10 @@
141143 struct cryptd_cpu_queue *cpu_queue;
142144 atomic_t *refcnt;
143145
144
- cpu = get_cpu();
145
- cpu_queue = this_cpu_ptr(queue->cpu_queue);
146
+ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
147
+ spin_lock_bh(&cpu_queue->qlock);
148
+ cpu = smp_processor_id();
149
+
146150 err = crypto_enqueue_request(&cpu_queue->queue, request);
147151
148152 refcnt = crypto_tfm_ctx(request->tfm);
....@@ -158,7 +162,7 @@
158162 atomic_inc(refcnt);
159163
160164 out_put_cpu:
161
- put_cpu();
165
+ spin_unlock_bh(&cpu_queue->qlock);
162166
163167 return err;
164168 }
....@@ -174,16 +178,11 @@
174178 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
175179 /*
176180 * Only handle one request at a time to avoid hogging crypto workqueue.
177
- * preempt_disable/enable is used to prevent being preempted by
178
- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
179
- * cryptd_enqueue_request() being accessed from software interrupts.
180181 */
181
- local_bh_disable();
182
- preempt_disable();
182
+ spin_lock_bh(&cpu_queue->qlock);
183183 backlog = crypto_get_backlog(&cpu_queue->queue);
184184 req = crypto_dequeue_request(&cpu_queue->queue);
185
- preempt_enable();
186
- local_bh_enable();
185
+ spin_unlock_bh(&cpu_queue->qlock);
187186
188187 if (!req)
189188 return;