From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/block/blk-crypto-internal.h | 198 +++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 180 insertions(+), 18 deletions(-)
diff --git a/kernel/block/blk-crypto-internal.h b/kernel/block/blk-crypto-internal.h
index 4da998c..8e08345 100644
--- a/kernel/block/blk-crypto-internal.h
+++ b/kernel/block/blk-crypto-internal.h
@@ -7,6 +7,7 @@
#define __LINUX_BLK_CRYPTO_INTERNAL_H
#include <linux/bio.h>
+#include <linux/blkdev.h>
/* Represents a crypto mode supported by blk-crypto */
struct blk_crypto_mode {
@@ -17,17 +18,189 @@
extern const struct blk_crypto_mode blk_crypto_modes[];
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
+void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
+ unsigned int inc);
+
+bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
+
+bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
+ struct bio_crypt_ctx *bc2);
+
+static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
+ struct bio *bio)
+{
+ return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
+ bio->bi_crypt_context);
+}
+
+static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
+ struct bio *bio)
+{
+ return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
+ bio->bi_iter.bi_size, req->crypt_ctx);
+}
+
+static inline bool bio_crypt_ctx_merge_rq(struct request *req,
+ struct request *next)
+{
+ return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
+ next->crypt_ctx);
+}
+
+static inline void blk_crypto_rq_set_defaults(struct request *rq)
+{
+ rq->crypt_ctx = NULL;
+ rq->crypt_keyslot = NULL;
+}
+
+static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
+{
+ return rq->crypt_ctx;
+}
+
+static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
+{
+ return rq->crypt_keyslot;
+}
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
+ struct bio *bio)
+{
+ return true;
+}
+
+static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
+ struct bio *bio)
+{
+ return true;
+}
+
+static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
+ struct bio *bio)
+{
+ return true;
+}
+
+static inline bool bio_crypt_ctx_merge_rq(struct request *req,
+ struct request *next)
+{
+ return true;
+}
+
+static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
+
+static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
+{
+ return false;
+}
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
+static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
+{
+ if (bio_has_crypt_ctx(bio))
+ __bio_crypt_advance(bio, bytes);
+}
+
+void __bio_crypt_free_ctx(struct bio *bio);
+static inline void bio_crypt_free_ctx(struct bio *bio)
+{
+ if (bio_has_crypt_ctx(bio))
+ __bio_crypt_free_ctx(bio);
+}
+
+static inline void bio_crypt_do_front_merge(struct request *rq,
+ struct bio *bio)
+{
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ if (bio_has_crypt_ctx(bio))
+ memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
+ sizeof(rq->crypt_ctx->bc_dun));
+#endif
+}
+
+bool __blk_crypto_bio_prep(struct bio **bio_ptr);
+static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
+{
+ if (bio_has_crypt_ctx(*bio_ptr))
+ return __blk_crypto_bio_prep(bio_ptr);
+ return true;
+}
+
+blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
+static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
+{
+ if (blk_crypto_rq_is_encrypted(rq))
+ return __blk_crypto_rq_get_keyslot(rq);
+ return BLK_STS_OK;
+}
+
+void __blk_crypto_rq_put_keyslot(struct request *rq);
+static inline void blk_crypto_rq_put_keyslot(struct request *rq)
+{
+ if (blk_crypto_rq_has_keyslot(rq))
+ __blk_crypto_rq_put_keyslot(rq);
+}
+
+void __blk_crypto_free_request(struct request *rq);
+static inline void blk_crypto_free_request(struct request *rq)
+{
+ if (blk_crypto_rq_is_encrypted(rq))
+ __blk_crypto_free_request(rq);
+}
+
+int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
+ gfp_t gfp_mask);
+/**
+ * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
+ * is inserted
+ * @rq: The request to prepare
+ * @bio: The first bio being inserted into the request
+ * @gfp_mask: Memory allocation flags
+ *
+ * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
+ * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
+ */
+static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
+ gfp_t gfp_mask)
+{
+ if (bio_has_crypt_ctx(bio))
+ return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
+ return 0;
+}
+
+/**
+ * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
+ * into a request queue.
+ * @rq: the request being queued
+ *
+ * Return: BLK_STS_OK on success, nonzero on error.
+ */
+static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
+{
+
+ if (blk_crypto_rq_is_encrypted(rq))
+ return blk_crypto_rq_get_keyslot(rq);
+ return BLK_STS_OK;
+}
+
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
-int blk_crypto_fallback_submit_bio(struct bio **bio_ptr);
-
-bool blk_crypto_queue_decrypt_bio(struct bio *bio);
+bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
-
-bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
@@ -38,21 +211,10 @@
return -ENOPKG;
}
-static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
+static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
{
- return false;
-}
-
-static inline int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
-{
- pr_warn_once("crypto API fallback disabled; failing request\n");
+ pr_warn_once("crypto API fallback disabled; failing request.\n");
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
- return -EIO;
-}
-
-static inline bool blk_crypto_queue_decrypt_bio(struct bio *bio)
-{
- WARN_ON(1);
return false;
}
--
Gitblit v1.6.2