hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/cifs/smb2ops.c
....@@ -593,7 +593,7 @@
593593 if (rc == -EOPNOTSUPP) {
594594 cifs_dbg(FYI,
595595 "server does not support query network interfaces\n");
596
- goto out;
596
+ ret_data_len = 0;
597597 } else if (rc != 0) {
598598 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
599599 goto out;
....@@ -859,12 +859,13 @@
859859 bool no_cached_open = tcon->nohandlecache;
860860 struct cached_fid *cfid = NULL;
861861
862
- oparms.tcon = tcon;
863
- oparms.desired_access = FILE_READ_ATTRIBUTES;
864
- oparms.disposition = FILE_OPEN;
865
- oparms.create_options = cifs_create_options(cifs_sb, 0);
866
- oparms.fid = &fid;
867
- oparms.reconnect = false;
862
+ oparms = (struct cifs_open_parms) {
863
+ .tcon = tcon,
864
+ .desired_access = FILE_READ_ATTRIBUTES,
865
+ .disposition = FILE_OPEN,
866
+ .create_options = cifs_create_options(cifs_sb, 0),
867
+ .fid = &fid,
868
+ };
868869
869870 if (no_cached_open) {
870871 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
....@@ -1783,7 +1784,7 @@
17831784 pcchunk->SourceOffset = cpu_to_le64(src_off);
17841785 pcchunk->TargetOffset = cpu_to_le64(dest_off);
17851786 pcchunk->Length =
1786
- cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1787
+ cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
17871788
17881789 /* Request server copy to target from src identified by key */
17891790 kfree(retbuf);
....@@ -4164,69 +4165,82 @@
41644165 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
41654166 }
41664167
4167
-/* We can not use the normal sg_set_buf() as we will sometimes pass a
4168
- * stack object as buf.
4169
- */
4170
-static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
4171
- unsigned int buflen)
4168
+static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
4169
+ int num_rqst, const u8 *sig, u8 **iv,
4170
+ struct aead_request **req, struct scatterlist **sgl,
4171
+ unsigned int *num_sgs)
41724172 {
4173
- void *addr;
4174
- /*
4175
- * VMAP_STACK (at least) puts stack into the vmalloc address space
4176
- */
4177
- if (is_vmalloc_addr(buf))
4178
- addr = vmalloc_to_page(buf);
4179
- else
4180
- addr = virt_to_page(buf);
4181
- sg_set_page(sg, addr, buflen, offset_in_page(buf));
4182
-}
4173
+ unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
4174
+ unsigned int iv_size = crypto_aead_ivsize(tfm);
4175
+ unsigned int len;
4176
+ u8 *p;
41834177
4184
-/* Assumes the first rqst has a transform header as the first iov.
4185
- * I.e.
4186
- * rqst[0].rq_iov[0] is transform header
4187
- * rqst[0].rq_iov[1+] data to be encrypted/decrypted
4188
- * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
4189
- */
4190
-static struct scatterlist *
4191
-init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
4192
-{
4193
- unsigned int sg_len;
4194
- struct scatterlist *sg;
4195
- unsigned int i;
4196
- unsigned int j;
4197
- unsigned int idx = 0;
4198
- int skip;
4178
+ *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
41994179
4200
- sg_len = 1;
4201
- for (i = 0; i < num_rqst; i++)
4202
- sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
4180
+ len = iv_size;
4181
+ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
4182
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
4183
+ len += req_size;
4184
+ len = ALIGN(len, __alignof__(struct scatterlist));
4185
+ len += *num_sgs * sizeof(**sgl);
42034186
4204
- sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
4205
- if (!sg)
4187
+ p = kmalloc(len, GFP_ATOMIC);
4188
+ if (!p)
42064189 return NULL;
42074190
4208
- sg_init_table(sg, sg_len);
4191
+ *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
4192
+ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
4193
+ crypto_tfm_ctx_alignment());
4194
+ *sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
4195
+ __alignof__(struct scatterlist));
4196
+ return p;
4197
+}
4198
+
4199
+static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
4200
+ int num_rqst, const u8 *sig, u8 **iv,
4201
+ struct aead_request **req, struct scatterlist **sgl)
4202
+{
4203
+ unsigned int off, len, skip;
4204
+ struct scatterlist *sg;
4205
+ unsigned int num_sgs;
4206
+ unsigned long addr;
4207
+ int i, j;
4208
+ void *p;
4209
+
4210
+ p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
4211
+ if (!p)
4212
+ return NULL;
4213
+
4214
+ sg_init_table(*sgl, num_sgs);
4215
+ sg = *sgl;
4216
+
4217
+ /* Assumes the first rqst has a transform header as the first iov.
4218
+ * I.e.
4219
+ * rqst[0].rq_iov[0] is transform header
4220
+ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
4221
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
4222
+ */
42094223 for (i = 0; i < num_rqst; i++) {
4224
+ /*
4225
+ * The first rqst has a transform header where the
4226
+ * first 20 bytes are not part of the encrypted blob.
4227
+ */
42104228 for (j = 0; j < rqst[i].rq_nvec; j++) {
4211
- /*
4212
- * The first rqst has a transform header where the
4213
- * first 20 bytes are not part of the encrypted blob
4214
- */
4229
+ struct kvec *iov = &rqst[i].rq_iov[j];
4230
+
42154231 skip = (i == 0) && (j == 0) ? 20 : 0;
4216
- smb2_sg_set_buf(&sg[idx++],
4217
- rqst[i].rq_iov[j].iov_base + skip,
4218
- rqst[i].rq_iov[j].iov_len - skip);
4219
- }
4220
-
4232
+ addr = (unsigned long)iov->iov_base + skip;
4233
+ len = iov->iov_len - skip;
4234
+ sg = cifs_sg_set_buf(sg, (void *)addr, len);
4235
+ }
42214236 for (j = 0; j < rqst[i].rq_npages; j++) {
4222
- unsigned int len, offset;
4223
-
4224
- rqst_page_get_length(&rqst[i], j, &len, &offset);
4225
- sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
4237
+ rqst_page_get_length(&rqst[i], j, &len, &off);
4238
+ sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
42264239 }
42274240 }
4228
- smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
4229
- return sg;
4241
+ cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
4242
+
4243
+ return p;
42304244 }
42314245
42324246 static int
....@@ -4270,11 +4284,11 @@
42704284 u8 sign[SMB2_SIGNATURE_SIZE] = {};
42714285 u8 key[SMB3_ENC_DEC_KEY_SIZE];
42724286 struct aead_request *req;
4273
- char *iv;
4274
- unsigned int iv_len;
4287
+ u8 *iv;
42754288 DECLARE_CRYPTO_WAIT(wait);
42764289 struct crypto_aead *tfm;
42774290 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4291
+ void *creq;
42784292
42794293 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
42804294 if (rc) {
....@@ -4309,30 +4323,13 @@
43094323 return rc;
43104324 }
43114325
4312
- req = aead_request_alloc(tfm, GFP_KERNEL);
4313
- if (!req) {
4314
- cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
4326
+ creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
4327
+ if (unlikely(!creq))
43154328 return -ENOMEM;
4316
- }
43174329
43184330 if (!enc) {
43194331 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
43204332 crypt_len += SMB2_SIGNATURE_SIZE;
4321
- }
4322
-
4323
- sg = init_sg(num_rqst, rqst, sign);
4324
- if (!sg) {
4325
- cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
4326
- rc = -ENOMEM;
4327
- goto free_req;
4328
- }
4329
-
4330
- iv_len = crypto_aead_ivsize(tfm);
4331
- iv = kzalloc(iv_len, GFP_KERNEL);
4332
- if (!iv) {
4333
- cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
4334
- rc = -ENOMEM;
4335
- goto free_sg;
43364333 }
43374334
43384335 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
....@@ -4343,6 +4340,7 @@
43434340 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
43444341 }
43454342
4343
+ aead_request_set_tfm(req, tfm);
43464344 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
43474345 aead_request_set_ad(req, assoc_data_len);
43484346
....@@ -4355,11 +4353,7 @@
43554353 if (!rc && enc)
43564354 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
43574355
4358
- kfree(iv);
4359
-free_sg:
4360
- kfree(sg);
4361
-free_req:
4362
- kfree(req);
4356
+ kfree_sensitive(creq);
43634357 return rc;
43644358 }
43654359