| .. | .. |
|---|
| 39 | 39 | struct cryptd_cpu_queue { |
|---|
| 40 | 40 | struct crypto_queue queue; |
|---|
| 41 | 41 | struct work_struct work; |
|---|
| 42 | + spinlock_t qlock; |
|---|
| 42 | 43 | }; |
|---|
| 43 | 44 | |
|---|
| 44 | 45 | struct cryptd_queue { |
|---|
| .. | .. |
|---|
| 117 | 118 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
|---|
| 118 | 119 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
|---|
| 119 | 120 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
|---|
| 121 | + spin_lock_init(&cpu_queue->qlock); |
|---|
| 120 | 122 | } |
|---|
| 121 | 123 | pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); |
|---|
| 122 | 124 | return 0; |
|---|
| .. | .. |
|---|
| 141 | 143 | struct cryptd_cpu_queue *cpu_queue; |
|---|
| 142 | 144 | atomic_t *refcnt; |
|---|
| 143 | 145 | |
|---|
| 144 | | - cpu = get_cpu(); |
|---|
| 145 | | - cpu_queue = this_cpu_ptr(queue->cpu_queue); |
|---|
| 146 | + cpu_queue = raw_cpu_ptr(queue->cpu_queue); |
|---|
| 147 | + spin_lock_bh(&cpu_queue->qlock); |
|---|
| 148 | + cpu = smp_processor_id(); |
|---|
| 149 | + |
|---|
| 146 | 150 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
|---|
| 147 | 151 | |
|---|
| 148 | 152 | refcnt = crypto_tfm_ctx(request->tfm); |
|---|
| .. | .. |
|---|
| 158 | 162 | atomic_inc(refcnt); |
|---|
| 159 | 163 | |
|---|
| 160 | 164 | out_put_cpu: |
|---|
| 161 | | - put_cpu(); |
|---|
| 165 | + spin_unlock_bh(&cpu_queue->qlock); |
|---|
| 162 | 166 | |
|---|
| 163 | 167 | return err; |
|---|
| 164 | 168 | } |
|---|
| .. | .. |
|---|
| 174 | 178 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
|---|
| 175 | 179 | /* |
|---|
| 176 | 180 | * Only handle one request at a time to avoid hogging crypto workqueue. |
|---|
| 177 | | - * preempt_disable/enable is used to prevent being preempted by |
|---|
| 178 | | - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent |
|---|
| 179 | | - * cryptd_enqueue_request() being accessed from software interrupts. |
|---|
| 180 | 181 | */ |
|---|
| 181 | | - local_bh_disable(); |
|---|
| 182 | | - preempt_disable(); |
|---|
| 182 | + spin_lock_bh(&cpu_queue->qlock); |
|---|
| 183 | 183 | backlog = crypto_get_backlog(&cpu_queue->queue); |
|---|
| 184 | 184 | req = crypto_dequeue_request(&cpu_queue->queue); |
|---|
| 185 | | - preempt_enable(); |
|---|
| 186 | | - local_bh_enable(); |
|---|
| 185 | + spin_unlock_bh(&cpu_queue->qlock); |
|---|
| 187 | 186 | |
|---|
| 188 | 187 | if (!req) |
|---|
| 189 | 188 | return; |
|---|