From 1c055e55a242a33e574e48be530e06770a210dcd Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 19 Feb 2024 03:26:26 +0000 Subject: [PATCH] add r8169 read mac form eeprom --- kernel/net/sunrpc/auth_gss/gss_krb5_crypto.c | 359 +++++++---------------------------------------------------- 1 files changed, 43 insertions(+), 316 deletions(-) diff --git a/kernel/net/sunrpc/auth_gss/gss_krb5_crypto.c b/kernel/net/sunrpc/auth_gss/gss_krb5_crypto.c index 0220e1c..634b6c6 100644 --- a/kernel/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/kernel/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -53,7 +53,7 @@ u32 krb5_encrypt( - struct crypto_skcipher *tfm, + struct crypto_sync_skcipher *tfm, void * iv, void * in, void * out, @@ -62,24 +62,24 @@ u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; - SKCIPHER_REQUEST_ON_STACK(req, tfm); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); - if (length % crypto_skcipher_blocksize(tfm) != 0) + if (length % crypto_sync_skcipher_blocksize(tfm) != 0) goto out; - if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { + if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", - crypto_skcipher_ivsize(tfm)); + crypto_sync_skcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); + memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); memcpy(out, in, length); sg_init_one(sg, out, length); - skcipher_request_set_tfm(req, tfm); + skcipher_request_set_sync_tfm(req, tfm); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, length, local_iv); @@ -92,7 +92,7 @@ u32 krb5_decrypt( - struct crypto_skcipher *tfm, + struct crypto_sync_skcipher *tfm, void * iv, void * in, void * out, @@ -101,23 +101,23 @@ u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; - SKCIPHER_REQUEST_ON_STACK(req, tfm); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); - if (length % crypto_skcipher_blocksize(tfm) != 0) + if (length % crypto_sync_skcipher_blocksize(tfm) != 0) goto out; - if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { + if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", - crypto_skcipher_ivsize(tfm)); + crypto_sync_skcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); + memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); memcpy(out, in, length); sg_init_one(sg, out, length); - skcipher_request_set_tfm(req, tfm); + skcipher_request_set_sync_tfm(req, tfm); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, length, local_iv); @@ -138,135 +138,6 @@ return crypto_ahash_update(req); } -static int -arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) -{ - unsigned int ms_usage; - - switch (usage) { - case KG_USAGE_SIGN: - ms_usage = 15; - break; - case KG_USAGE_SEAL: - ms_usage = 13; - break; - default: - return -EINVAL; - } - salt[0] = (ms_usage >> 0) & 0xff; - salt[1] = (ms_usage >> 8) & 0xff; - salt[2] = (ms_usage >> 16) & 0xff; - salt[3] = (ms_usage >> 24) & 0xff; - - return 0; -} - -static u32 -make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, - struct xdr_buf *body, int body_offset, u8 *cksumkey, - unsigned int usage, struct xdr_netobj *cksumout) -{ - struct scatterlist sg[1]; - int err = -1; - u8 *checksumdata; - u8 *rc4salt; - struct crypto_ahash *md5; - struct crypto_ahash *hmac_md5; - struct ahash_request *req; - - if (cksumkey == NULL) - return GSS_S_FAILURE; - - if (cksumout->len < kctx->gk5e->cksumlength) { - dprintk("%s: checksum buffer length, %u, too small for %s\n", - __func__, cksumout->len, kctx->gk5e->name); - return GSS_S_FAILURE; - } - - rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS); - if (!rc4salt) - return GSS_S_FAILURE; - - if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { - dprintk("%s: invalid usage value %u\n", __func__, usage); - goto out_free_rc4salt; - } - - checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); - if (!checksumdata) - goto out_free_rc4salt; - - md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(md5)) - goto out_free_cksum; - - hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(hmac_md5)) - goto out_free_md5; - - req = ahash_request_alloc(md5, GFP_NOFS); - if (!req) - goto out_free_hmac_md5; - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); - - err = crypto_ahash_init(req); - if (err) - goto out; - sg_init_one(sg, rc4salt, 4); - ahash_request_set_crypt(req, sg, NULL, 4); - err = crypto_ahash_update(req); - if (err) - goto out; - - sg_init_one(sg, header, hdrlen); - ahash_request_set_crypt(req, sg, NULL, hdrlen); - err = crypto_ahash_update(req); - if (err) - goto out; - err = xdr_process_buf(body, body_offset, body->len - body_offset, - checksummer, req); - if (err) - goto out; - ahash_request_set_crypt(req, NULL, checksumdata, 0); - err = crypto_ahash_final(req); - if (err) - goto out; - - ahash_request_free(req); - req = ahash_request_alloc(hmac_md5, GFP_NOFS); - if (!req) - goto out_free_hmac_md5; - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); - - err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); - if (err) - goto out; - - sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); - ahash_request_set_crypt(req, sg, checksumdata, - crypto_ahash_digestsize(md5)); - err = crypto_ahash_digest(req); - if (err) - goto out; - - memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); - cksumout->len = kctx->gk5e->cksumlength; -out: - ahash_request_free(req); -out_free_hmac_md5: - crypto_free_ahash(hmac_md5); -out_free_md5: - crypto_free_ahash(md5); -out_free_cksum: - kfree(checksumdata); -out_free_rc4salt: - kfree(rc4salt); - return err ? GSS_S_FAILURE : 0; -} - /* * checksum the plaintext data and hdrlen bytes of the token header * The checksum is performed over the first 8 bytes of the @@ -283,11 +154,6 @@ int err = -1; u8 *checksumdata; unsigned int checksumlen; - - if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) - return make_checksum_hmac_md5(kctx, header, hdrlen, - body, body_offset, - cksumkey, usage, cksumout); if (cksumout->len < kctx->gk5e->cksumlength) { dprintk("%s: checksum buffer length, %u, too small for %s\n", @@ -466,7 +332,8 @@ { struct encryptor_desc *desc = data; struct xdr_buf *outbuf = desc->outbuf; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); + struct crypto_sync_skcipher *tfm = + crypto_sync_skcipher_reqtfm(desc->req); struct page *in_page; int thislen = desc->fraglen + sg->length; int fraglen, ret; @@ -492,7 +359,7 @@ desc->fraglen += sg->length; desc->pos += sg->length; - fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); + fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1); thislen -= fraglen; if (thislen == 0) @@ -526,16 +393,16 @@ } int -gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, +gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf, int offset, struct page **pages) { int ret; struct encryptor_desc desc; - SKCIPHER_REQUEST_ON_STACK(req, tfm); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); - BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); + BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0); - skcipher_request_set_tfm(req, tfm); + skcipher_request_set_sync_tfm(req, tfm); skcipher_request_set_callback(req, 0, NULL, NULL); memset(desc.iv, 0, sizeof(desc.iv)); @@ -567,7 +434,8 @@ { struct decryptor_desc *desc = data; int thislen = desc->fraglen + sg->length; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); + struct crypto_sync_skcipher *tfm = + crypto_sync_skcipher_reqtfm(desc->req); int fraglen, ret; /* Worst case is 4 fragments: head, end of page 1, start @@ -578,7 +446,7 @@ desc->fragno++; desc->fraglen += sg->length; - fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); + fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1); thislen -= fraglen; if (thislen == 0) @@ -608,17 +476,17 @@ } int -gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, +gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf, int offset) { int ret; struct decryptor_desc desc; - SKCIPHER_REQUEST_ON_STACK(req, tfm); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); /* XXXJBF: */ - BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); + BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0); - skcipher_request_set_tfm(req, tfm); + skcipher_request_set_sync_tfm(req, tfm); skcipher_request_set_callback(req, 0, NULL, NULL); memset(desc.iv, 0, sizeof(desc.iv)); @@ -672,12 +540,12 @@ } static u32 -gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, +gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf, u32 offset, u8 *iv, struct page **pages, int encrypt) { u32 ret; struct scatterlist sg[1]; - SKCIPHER_REQUEST_ON_STACK(req, cipher); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher); u8 *data; struct page **save_pages; u32 len = buf->len - offset; @@ -706,7 +574,7 @@ sg_init_one(sg, data, len); - skcipher_request_set_tfm(req, cipher); + skcipher_request_set_sync_tfm(req, cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, len, iv); @@ -735,7 +603,7 @@ struct xdr_netobj hmac; u8 *cksumkey; u8 *ecptr; - struct crypto_skcipher *cipher, *aux_cipher; + struct crypto_sync_skcipher *cipher, *aux_cipher; int blocksize; struct page **save_pages; int nblocks, nbytes; @@ -754,7 +622,7 @@ cksumkey = kctx->acceptor_integ; usage = KG_USAGE_ACCEPTOR_SEAL; } - blocksize = crypto_skcipher_blocksize(cipher); + blocksize = crypto_sync_skcipher_blocksize(cipher); /* hide the gss token header and insert the confounder */ offset += GSS_KRB5_TOK_HDR_LEN; @@ -807,7 +675,7 @@ memset(desc.iv, 0, sizeof(desc.iv)); if (cbcbytes) { - SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; desc.fragno = 0; @@ -816,7 +684,7 @@ desc.outbuf = buf; desc.req = req; - skcipher_request_set_tfm(req, aux_cipher); + skcipher_request_set_sync_tfm(req, aux_cipher); skcipher_request_set_callback(req, 0, NULL, NULL); sg_init_table(desc.infrags, 4); @@ -849,13 +717,13 @@ } u32 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, - u32 *headskip, u32 *tailskip) +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, + struct xdr_buf *buf, u32 *headskip, u32 *tailskip) { struct xdr_buf subbuf; u32 ret = 0; u8 *cksum_key; - struct crypto_skcipher *cipher, *aux_cipher; + struct crypto_sync_skcipher *cipher, *aux_cipher; struct xdr_netobj our_hmac_obj; u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; @@ -874,12 +742,12 @@ cksum_key = kctx->initiator_integ; usage = KG_USAGE_INITIATOR_SEAL; } - blocksize = crypto_skcipher_blocksize(cipher); + blocksize = crypto_sync_skcipher_blocksize(cipher); /* create a segment skipping the header and leaving out the checksum */ xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, - (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - + (len - offset - GSS_KRB5_TOK_HDR_LEN - kctx->gk5e->cksumlength)); nblocks = (subbuf.len + blocksize - 1) / blocksize; @@ -891,13 +759,13 @@ memset(desc.iv, 0, sizeof(desc.iv)); if (cbcbytes) { - SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); desc.fragno = 0; desc.fraglen = 0; desc.req = req; - skcipher_request_set_tfm(req, aux_cipher); + skcipher_request_set_sync_tfm(req, aux_cipher); skcipher_request_set_callback(req, 0, NULL, NULL); sg_init_table(desc.frags, 4); @@ -924,7 +792,7 @@ goto out_err; /* Get the packet's hmac value */ - ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, + ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength, pkt_hmac, kctx->gk5e->cksumlength); if (ret) goto out_err; @@ -939,145 +807,4 @@ if (ret && ret != GSS_S_BAD_SIG) ret = GSS_S_FAILURE; return ret; -} - -/* - * Compute Kseq given the initial session key and the checksum. - * Set the key of the given cipher. - */ -int -krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, - unsigned char *cksum) -{ - struct crypto_shash *hmac; - struct shash_desc *desc; - u8 Kseq[GSS_KRB5_MAX_KEYLEN]; - u32 zeroconstant = 0; - int err; - - dprintk("%s: entered\n", __func__); - - hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); - if (IS_ERR(hmac)) { - dprintk("%s: error %ld, allocating hash '%s'\n", - __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); - return PTR_ERR(hmac); - } - - desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), - GFP_NOFS); - if (!desc) { - dprintk("%s: failed to allocate shash descriptor for '%s'\n", - __func__, kctx->gk5e->cksum_name); - crypto_free_shash(hmac); - return -ENOMEM; - } - - desc->tfm = hmac; - desc->flags = 0; - - /* Compute intermediate Kseq from session key */ - err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); - if (err) - goto out_err; - - /* Compute final Kseq from the checksum and intermediate Kseq */ - err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = crypto_shash_digest(desc, cksum, 8, Kseq); - if (err) - goto out_err; - - err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = 0; - -out_err: - kzfree(desc); - crypto_free_shash(hmac); - dprintk("%s: returning %d\n", __func__, err); - return err; -} - -/* - * Compute Kcrypt given the initial session key and the plaintext seqnum. - * Set the key of cipher kctx->enc. - */ -int -krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, - s32 seqnum) -{ - struct crypto_shash *hmac; - struct shash_desc *desc; - u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; - u8 zeroconstant[4] = {0}; - u8 seqnumarray[4]; - int err, i; - - dprintk("%s: entered, seqnum %u\n", __func__, seqnum); - - hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); - if (IS_ERR(hmac)) { - dprintk("%s: error %ld, allocating hash '%s'\n", - __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); - return PTR_ERR(hmac); - } - - desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), - GFP_NOFS); - if (!desc) { - dprintk("%s: failed to allocate shash descriptor for '%s'\n", - __func__, kctx->gk5e->cksum_name); - crypto_free_shash(hmac); - return -ENOMEM; - } - - desc->tfm = hmac; - desc->flags = 0; - - /* Compute intermediate Kcrypt from session key */ - for (i = 0; i < kctx->gk5e->keylength; i++) - Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; - - err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); - if (err) - goto out_err; - - /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ - err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); - if (err) - goto out_err; - - seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); - seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); - seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); - seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); - - err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); - if (err) - goto out_err; - - err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = 0; - -out_err: - kzfree(desc); - crypto_free_shash(hmac); - dprintk("%s: returning %d\n", __func__, err); - return err; } -- Gitblit v1.6.2