| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Glue Code for AVX assembler versions of Serpent Cipher |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 5 | 6 | * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> |
|---|
| 6 | 7 | * |
|---|
| 7 | 8 | * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 10 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 11 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 12 | | - * (at your option) any later version. |
|---|
| 13 | | - * |
|---|
| 14 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 15 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 16 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 17 | | - * GNU General Public License for more details. |
|---|
| 18 | | - * |
|---|
| 19 | | - * You should have received a copy of the GNU General Public License |
|---|
| 20 | | - * along with this program; if not, write to the Free Software |
|---|
| 21 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 |
|---|
| 22 | | - * USA |
|---|
| 23 | | - * |
|---|
| 24 | 9 | */ |
|---|
| 25 | 10 | |
|---|
| 26 | 11 | #include <linux/module.h> |
|---|
| .. | .. |
|---|
| 35 | 20 | #include <asm/crypto/serpent-avx.h> |
|---|
| 36 | 21 | |
|---|
| 37 | 22 | /* 8-way parallel cipher functions */ |
|---|
| 38 | | -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
|---|
| 23 | +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, |
|---|
| 39 | 24 | const u8 *src); |
|---|
| 40 | 25 | EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); |
|---|
| 41 | 26 | |
|---|
| 42 | | -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
|---|
| 27 | +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, |
|---|
| 43 | 28 | const u8 *src); |
|---|
| 44 | 29 | EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); |
|---|
| 45 | 30 | |
|---|
| 46 | | -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
|---|
| 31 | +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, |
|---|
| 47 | 32 | const u8 *src); |
|---|
| 48 | 33 | EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); |
|---|
| 49 | 34 | |
|---|
| 50 | | -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
|---|
| 51 | | - const u8 *src, le128 *iv); |
|---|
| 35 | +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, |
|---|
| 36 | + le128 *iv); |
|---|
| 52 | 37 | EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); |
|---|
| 53 | 38 | |
|---|
| 54 | | -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
|---|
| 39 | +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, |
|---|
| 55 | 40 | const u8 *src, le128 *iv); |
|---|
| 56 | 41 | EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); |
|---|
| 57 | 42 | |
|---|
| 58 | | -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
|---|
| 43 | +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, |
|---|
| 59 | 44 | const u8 *src, le128 *iv); |
|---|
| 60 | 45 | EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); |
|---|
| 61 | 46 | |
|---|
| 62 | | -void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
|---|
| 47 | +void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
|---|
| 63 | 48 | { |
|---|
| 64 | 49 | be128 ctrblk; |
|---|
| 50 | + u128 *dst = (u128 *)d; |
|---|
| 51 | + const u128 *src = (const u128 *)s; |
|---|
| 65 | 52 | |
|---|
| 66 | 53 | le128_to_be128(&ctrblk, iv); |
|---|
| 67 | 54 | le128_inc(iv); |
|---|
| .. | .. |
|---|
| 71 | 58 | } |
|---|
| 72 | 59 | EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); |
|---|
| 73 | 60 | |
|---|
| 74 | | -void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
|---|
| 61 | +void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
|---|
| 75 | 62 | { |
|---|
| 76 | | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
|---|
| 77 | | - GLUE_FUNC_CAST(__serpent_encrypt)); |
|---|
| 63 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); |
|---|
| 78 | 64 | } |
|---|
| 79 | 65 | EXPORT_SYMBOL_GPL(serpent_xts_enc); |
|---|
| 80 | 66 | |
|---|
| 81 | | -void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
|---|
| 67 | +void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
|---|
| 82 | 68 | { |
|---|
| 83 | | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
|---|
| 84 | | - GLUE_FUNC_CAST(__serpent_decrypt)); |
|---|
| 69 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); |
|---|
| 85 | 70 | } |
|---|
| 86 | 71 | EXPORT_SYMBOL_GPL(serpent_xts_dec); |
|---|
| 87 | 72 | |
|---|
| .. | .. |
|---|
| 117 | 102 | |
|---|
| 118 | 103 | .funcs = { { |
|---|
| 119 | 104 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
|---|
| 120 | | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } |
|---|
| 105 | + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } |
|---|
| 121 | 106 | }, { |
|---|
| 122 | 107 | .num_blocks = 1, |
|---|
| 123 | | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } |
|---|
| 108 | + .fn_u = { .ecb = __serpent_encrypt } |
|---|
| 124 | 109 | } } |
|---|
| 125 | 110 | }; |
|---|
| 126 | 111 | |
|---|
| .. | .. |
|---|
| 130 | 115 | |
|---|
| 131 | 116 | .funcs = { { |
|---|
| 132 | 117 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
|---|
| 133 | | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } |
|---|
| 118 | + .fn_u = { .ctr = serpent_ctr_8way_avx } |
|---|
| 134 | 119 | }, { |
|---|
| 135 | 120 | .num_blocks = 1, |
|---|
| 136 | | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } |
|---|
| 121 | + .fn_u = { .ctr = __serpent_crypt_ctr } |
|---|
| 137 | 122 | } } |
|---|
| 138 | 123 | }; |
|---|
| 139 | 124 | |
|---|
| .. | .. |
|---|
| 143 | 128 | |
|---|
| 144 | 129 | .funcs = { { |
|---|
| 145 | 130 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
|---|
| 146 | | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } |
|---|
| 131 | + .fn_u = { .xts = serpent_xts_enc_8way_avx } |
|---|
| 147 | 132 | }, { |
|---|
| 148 | 133 | .num_blocks = 1, |
|---|
| 149 | | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } |
|---|
| 134 | + .fn_u = { .xts = serpent_xts_enc } |
|---|
| 150 | 135 | } } |
|---|
| 151 | 136 | }; |
|---|
| 152 | 137 | |
|---|
| .. | .. |
|---|
| 156 | 141 | |
|---|
| 157 | 142 | .funcs = { { |
|---|
| 158 | 143 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
|---|
| 159 | | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } |
|---|
| 144 | + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } |
|---|
| 160 | 145 | }, { |
|---|
| 161 | 146 | .num_blocks = 1, |
|---|
| 162 | | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } |
|---|
| 147 | + .fn_u = { .ecb = __serpent_decrypt } |
|---|
| 163 | 148 | } } |
|---|
| 164 | 149 | }; |
|---|
| 165 | 150 | |
|---|
| .. | .. |
|---|
| 169 | 154 | |
|---|
| 170 | 155 | .funcs = { { |
|---|
| 171 | 156 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
|---|
| 172 | | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } |
|---|
| 157 | + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } |
|---|
| 173 | 158 | }, { |
|---|
| 174 | 159 | .num_blocks = 1, |
|---|
| 175 | | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } |
|---|
| 160 | + .fn_u = { .cbc = __serpent_decrypt } |
|---|
| 176 | 161 | } } |
|---|
| 177 | 162 | }; |
|---|
| 178 | 163 | |
|---|
| .. | .. |
|---|
| 182 | 167 | |
|---|
| 183 | 168 | .funcs = { { |
|---|
| 184 | 169 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
|---|
| 185 | | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } |
|---|
| 170 | + .fn_u = { .xts = serpent_xts_dec_8way_avx } |
|---|
| 186 | 171 | }, { |
|---|
| 187 | 172 | .num_blocks = 1, |
|---|
| 188 | | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } |
|---|
| 173 | + .fn_u = { .xts = serpent_xts_dec } |
|---|
| 189 | 174 | } } |
|---|
| 190 | 175 | }; |
|---|
| 191 | 176 | |
|---|
| .. | .. |
|---|
| 201 | 186 | |
|---|
| 202 | 187 | static int cbc_encrypt(struct skcipher_request *req) |
|---|
| 203 | 188 | { |
|---|
| 204 | | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), |
|---|
| 205 | | - req); |
|---|
| 189 | + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); |
|---|
| 206 | 190 | } |
|---|
| 207 | 191 | |
|---|
| 208 | 192 | static int cbc_decrypt(struct skcipher_request *req) |
|---|
| .. | .. |
|---|
| 221 | 205 | struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 222 | 206 | |
|---|
| 223 | 207 | return glue_xts_req_128bit(&serpent_enc_xts, req, |
|---|
| 224 | | - XTS_TWEAK_CAST(__serpent_encrypt), |
|---|
| 225 | | - &ctx->tweak_ctx, &ctx->crypt_ctx); |
|---|
| 208 | + __serpent_encrypt, &ctx->tweak_ctx, |
|---|
| 209 | + &ctx->crypt_ctx, false); |
|---|
| 226 | 210 | } |
|---|
| 227 | 211 | |
|---|
| 228 | 212 | static int xts_decrypt(struct skcipher_request *req) |
|---|
| .. | .. |
|---|
| 231 | 215 | struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 232 | 216 | |
|---|
| 233 | 217 | return glue_xts_req_128bit(&serpent_dec_xts, req, |
|---|
| 234 | | - XTS_TWEAK_CAST(__serpent_encrypt), |
|---|
| 235 | | - &ctx->tweak_ctx, &ctx->crypt_ctx); |
|---|
| 218 | + __serpent_encrypt, &ctx->tweak_ctx, |
|---|
| 219 | + &ctx->crypt_ctx, true); |
|---|
| 236 | 220 | } |
|---|
| 237 | 221 | |
|---|
| 238 | 222 | static struct skcipher_alg serpent_algs[] = { |
|---|