hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/crypto/scompress.c
....@@ -1,15 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Synchronous Compression operations
34 *
45 * Copyright 2015 LG Electronics Inc.
56 * Copyright (c) 2016, Intel Corporation
67 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify it
9
- * under the terms of the GNU General Public License as published by the Free
10
- * Software Foundation; either version 2 of the License, or (at your option)
11
- * any later version.
12
- *
138 */
149 #include <linux/errno.h>
1510 #include <linux/kernel.h>
....@@ -24,33 +19,36 @@
2419 #include <linux/cryptouser.h>
2520 #include <net/netlink.h>
2621 #include <linux/scatterlist.h>
27
-#include <linux/locallock.h>
2822 #include <crypto/scatterwalk.h>
2923 #include <crypto/internal/acompress.h>
3024 #include <crypto/internal/scompress.h>
3125 #include "internal.h"
3226
27
+struct scomp_scratch {
28
+ spinlock_t lock;
29
+ void *src;
30
+ void *dst;
31
+};
32
+
33
+static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
34
+ .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
35
+};
36
+
3337 static const struct crypto_type crypto_scomp_type;
34
-static void * __percpu *scomp_src_scratches;
35
-static void * __percpu *scomp_dst_scratches;
3638 static int scomp_scratch_users;
3739 static DEFINE_MUTEX(scomp_lock);
38
-static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock);
3940
4041 #ifdef CONFIG_NET
4142 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
4243 {
4344 struct crypto_report_comp rscomp;
4445
45
- strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
46
+ memset(&rscomp, 0, sizeof(rscomp));
4647
47
- if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
48
- sizeof(struct crypto_report_comp), &rscomp))
49
- goto nla_put_failure;
50
- return 0;
48
+ strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
5149
52
-nla_put_failure:
53
- return -EMSGSIZE;
50
+ return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
51
+ sizeof(rscomp), &rscomp);
5452 }
5553 #else
5654 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
....@@ -67,76 +65,53 @@
6765 seq_puts(m, "type : scomp\n");
6866 }
6967
70
-static void crypto_scomp_free_scratches(void * __percpu *scratches)
68
+static void crypto_scomp_free_scratches(void)
7169 {
70
+ struct scomp_scratch *scratch;
7271 int i;
73
-
74
- if (!scratches)
75
- return;
76
-
77
- for_each_possible_cpu(i)
78
- vfree(*per_cpu_ptr(scratches, i));
79
-
80
- free_percpu(scratches);
81
-}
82
-
83
-static void * __percpu *crypto_scomp_alloc_scratches(void)
84
-{
85
- void * __percpu *scratches;
86
- int i;
87
-
88
- scratches = alloc_percpu(void *);
89
- if (!scratches)
90
- return NULL;
9172
9273 for_each_possible_cpu(i) {
93
- void *scratch;
74
+ scratch = per_cpu_ptr(&scomp_scratch, i);
9475
95
- scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
96
- if (!scratch)
76
+ vfree(scratch->src);
77
+ vfree(scratch->dst);
78
+ scratch->src = NULL;
79
+ scratch->dst = NULL;
80
+ }
81
+}
82
+
83
+static int crypto_scomp_alloc_scratches(void)
84
+{
85
+ struct scomp_scratch *scratch;
86
+ int i;
87
+
88
+ for_each_possible_cpu(i) {
89
+ void *mem;
90
+
91
+ scratch = per_cpu_ptr(&scomp_scratch, i);
92
+
93
+ mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
94
+ if (!mem)
9795 goto error;
98
- *per_cpu_ptr(scratches, i) = scratch;
99
- }
100
-
101
- return scratches;
102
-
103
-error:
104
- crypto_scomp_free_scratches(scratches);
105
- return NULL;
106
-}
107
-
108
-static void crypto_scomp_free_all_scratches(void)
109
-{
110
- if (!--scomp_scratch_users) {
111
- crypto_scomp_free_scratches(scomp_src_scratches);
112
- crypto_scomp_free_scratches(scomp_dst_scratches);
113
- scomp_src_scratches = NULL;
114
- scomp_dst_scratches = NULL;
115
- }
116
-}
117
-
118
-static int crypto_scomp_alloc_all_scratches(void)
119
-{
120
- if (!scomp_scratch_users++) {
121
- scomp_src_scratches = crypto_scomp_alloc_scratches();
122
- if (!scomp_src_scratches)
123
- return -ENOMEM;
124
- scomp_dst_scratches = crypto_scomp_alloc_scratches();
125
- if (!scomp_dst_scratches) {
126
- crypto_scomp_free_scratches(scomp_src_scratches);
127
- scomp_src_scratches = NULL;
128
- return -ENOMEM;
129
- }
96
+ scratch->src = mem;
97
+ mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
98
+ if (!mem)
99
+ goto error;
100
+ scratch->dst = mem;
130101 }
131102 return 0;
103
+error:
104
+ crypto_scomp_free_scratches();
105
+ return -ENOMEM;
132106 }
133107
134108 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
135109 {
136
- int ret;
110
+ int ret = 0;
137111
138112 mutex_lock(&scomp_lock);
139
- ret = crypto_scomp_alloc_all_scratches();
113
+ if (!scomp_scratch_users++)
114
+ ret = crypto_scomp_alloc_scratches();
140115 mutex_unlock(&scomp_lock);
141116
142117 return ret;
....@@ -148,42 +123,41 @@
148123 void **tfm_ctx = acomp_tfm_ctx(tfm);
149124 struct crypto_scomp *scomp = *tfm_ctx;
150125 void **ctx = acomp_request_ctx(req);
151
- const int cpu = local_lock_cpu(scomp_scratches_lock);
152
- u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
153
- u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
126
+ struct scomp_scratch *scratch;
154127 int ret;
155128
156
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
157
- ret = -EINVAL;
158
- goto out;
159
- }
129
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
130
+ return -EINVAL;
160131
161
- if (req->dst && !req->dlen) {
162
- ret = -EINVAL;
163
- goto out;
164
- }
132
+ if (req->dst && !req->dlen)
133
+ return -EINVAL;
165134
166135 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
167136 req->dlen = SCOMP_SCRATCH_SIZE;
168137
169
- scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
138
+ scratch = raw_cpu_ptr(&scomp_scratch);
139
+ spin_lock(&scratch->lock);
140
+
141
+ scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
170142 if (dir)
171
- ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
172
- scratch_dst, &req->dlen, *ctx);
143
+ ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
144
+ scratch->dst, &req->dlen, *ctx);
173145 else
174
- ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
175
- scratch_dst, &req->dlen, *ctx);
146
+ ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
147
+ scratch->dst, &req->dlen, *ctx);
176148 if (!ret) {
177149 if (!req->dst) {
178150 req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
179
- if (!req->dst)
151
+ if (!req->dst) {
152
+ ret = -ENOMEM;
180153 goto out;
154
+ }
181155 }
182
- scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
156
+ scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
183157 1);
184158 }
185159 out:
186
- local_unlock_cpu(scomp_scratches_lock);
160
+ spin_unlock(&scratch->lock);
187161 return ret;
188162 }
189163
....@@ -204,7 +178,8 @@
204178 crypto_free_scomp(*ctx);
205179
206180 mutex_lock(&scomp_lock);
207
- crypto_scomp_free_all_scratches();
181
+ if (!--scomp_scratch_users)
182
+ crypto_scomp_free_scratches();
208183 mutex_unlock(&scomp_lock);
209184 }
210185
....@@ -291,9 +266,9 @@
291266 }
292267 EXPORT_SYMBOL_GPL(crypto_register_scomp);
293268
294
-int crypto_unregister_scomp(struct scomp_alg *alg)
269
+void crypto_unregister_scomp(struct scomp_alg *alg)
295270 {
296
- return crypto_unregister_alg(&alg->base);
271
+ crypto_unregister_alg(&alg->base);
297272 }
298273 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
299274