From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/security/selinux/ss/sidtab.c |  231 ++++++++++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 187 insertions(+), 44 deletions(-)

diff --git a/kernel/security/selinux/ss/sidtab.c b/kernel/security/selinux/ss/sidtab.c
index d9d8599..1981c5a 100644
--- a/kernel/security/selinux/ss/sidtab.c
+++ b/kernel/security/selinux/ss/sidtab.c
@@ -9,6 +9,8 @@
  */
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
@@ -16,6 +18,14 @@
 #include "flask.h"
 #include "security.h"
 #include "sidtab.h"
+
+struct sidtab_str_cache {
+	struct rcu_head rcu_member;
+	struct list_head lru_member;
+	struct sidtab_entry *parent;
+	u32 len;
+	char str[];
+};
 
 #define index_to_sid(index) (index + SECINITSID_NUM + 1)
 #define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
@@ -29,22 +39,31 @@
 	for (i = 0; i < SECINITSID_NUM; i++)
 		s->isids[i].set = 0;
 
+	s->frozen = false;
 	s->count = 0;
 	s->convert = NULL;
 	hash_init(s->context_to_sid);
 
 	spin_lock_init(&s->lock);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+	s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
+	INIT_LIST_HEAD(&s->cache_lru_list);
+	spin_lock_init(&s->cache_lock);
+#endif
+
 	return 0;
 }
 
-static u32 context_to_sid(struct sidtab *s, struct context *context)
+static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
 {
-	struct sidtab_entry_leaf *entry;
+	struct sidtab_entry *entry;
 	u32 sid = 0;
 
 	rcu_read_lock();
-	hash_for_each_possible_rcu(s->context_to_sid, entry, list,
-				   context->hash) {
+	hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
+		if (entry->hash != hash)
+			continue;
 		if (context_cmp(&entry->context, context)) {
 			sid = entry->sid;
 			break;
@@ -56,19 +75,25 @@
 
 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
 {
-	struct sidtab_isid_entry *entry;
+	struct sidtab_isid_entry *isid;
+	u32 hash;
 	int rc;
 
 	if (sid == 0 || sid > SECINITSID_NUM)
 		return -EINVAL;
 
-	entry = &s->isids[sid - 1];
+	isid = &s->isids[sid - 1];
 
-	rc = context_cpy(&entry->leaf.context, context);
+	rc = context_cpy(&isid->entry.context, context);
 	if (rc)
 		return rc;
 
-	entry->set = 1;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+	isid->entry.cache = NULL;
+#endif
+	isid->set = 1;
+
+	hash = context_compute_hash(context);
 
 	/*
 	 * Multiple initial sids may map to the same context. Check that this
@@ -76,9 +101,10 @@
 	 * to avoid duplicate entries and long linked lists upon hash
 	 * collision.
 	 */
-	if (!context_to_sid(s, context)) {
-		entry->leaf.sid = sid;
-		hash_add(s->context_to_sid, &entry->leaf.list, context->hash);
+	if (!context_to_sid(s, context, hash)) {
+		isid->entry.sid = sid;
+		isid->entry.hash = hash;
+		hash_add(s->context_to_sid, &isid->entry.list, hash);
 	}
 
 	return 0;
@@ -92,7 +118,7 @@
 	int entries = 0;
 	int max_chain_len = 0;
 	int cur_bucket = 0;
-	struct sidtab_entry_leaf *entry;
+	struct sidtab_entry *entry;
 
 	rcu_read_lock();
 	hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
@@ -151,8 +177,8 @@
 	return 0;
 }
 
-static struct sidtab_entry_leaf *sidtab_do_lookup(struct sidtab *s, u32 index,
-						  int alloc)
+static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
+					     int alloc)
 {
 	union sidtab_entry_inner *entry;
 	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
@@ -192,7 +218,7 @@
 	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
 }
 
-static struct context *sidtab_lookup(struct sidtab *s, u32 index)
+static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
 {
 	/* read entries only after reading count */
 	u32 count = smp_load_acquire(&s->count);
@@ -200,36 +226,37 @@
 	if (index >= count)
 		return NULL;
 
-	return &sidtab_do_lookup(s, index, 0)->context;
+	return sidtab_do_lookup(s, index, 0);
 }
 
-static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
 {
-	return s->isids[sid - 1].set ? &s->isids[sid - 1].leaf.context : NULL;
+	return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
 }
 
-static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
+					       int force)
 {
-	struct context *context;
-
 	if (sid != 0) {
+		struct sidtab_entry *entry;
+
 		if (sid > SECINITSID_NUM)
-			context = sidtab_lookup(s, sid_to_index(sid));
+			entry = sidtab_lookup(s, sid_to_index(sid));
 		else
-			context = sidtab_lookup_initial(s, sid);
-		if (context && (!context->len || force))
-			return context;
+			entry = sidtab_lookup_initial(s, sid);
+		if (entry && (!entry->context.len || force))
+			return entry;
 	}
 
 	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
 }
 
-struct context *sidtab_search(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
 {
 	return sidtab_search_core(s, sid, 0);
 }
 
-struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
 {
 	return sidtab_search_core(s, sid, 1);
 }
@@ -238,12 +265,12 @@
 			  u32 *sid)
 {
 	unsigned long flags;
-	u32 count;
+	u32 count, hash = context_compute_hash(context);
 	struct sidtab_convert_params *convert;
-	struct sidtab_entry_leaf *dst, *dst_convert;
+	struct sidtab_entry *dst, *dst_convert;
 	int rc;
 
-	*sid = context_to_sid(s, context);
+	*sid = context_to_sid(s, context, hash);
 	if (*sid)
 		return 0;
 
@@ -251,12 +278,20 @@
 	spin_lock_irqsave(&s->lock, flags);
 
 	rc = 0;
-	*sid = context_to_sid(s, context);
+	*sid = context_to_sid(s, context, hash);
 	if (*sid)
 		goto out_unlock;
 
-	/* read entries only after reading count */
-	count = smp_load_acquire(&s->count);
+	if (unlikely(s->frozen)) {
+		/*
+		 * This sidtab is now frozen - tell the caller to abort and
+		 * get the new one.
+		 */
+		rc = -ESTALE;
+		goto out_unlock;
+	}
+
+	count = s->count;
 	convert = s->convert;
 
 	/* bail out if we already reached max entries */
@@ -271,6 +306,7 @@
 		goto out_unlock;
 
 	dst->sid = index_to_sid(count);
+	dst->hash = hash;
 
 	rc = context_cpy(&dst->context, context);
 	if (rc)
@@ -289,16 +325,17 @@
 		}
 
 		rc = convert->func(context, &dst_convert->context,
-				convert->args);
+				   convert->args, GFP_ATOMIC);
 		if (rc) {
 			context_destroy(&dst->context);
 			goto out_unlock;
 		}
 		dst_convert->sid = index_to_sid(count);
+		dst_convert->hash = context_compute_hash(&dst_convert->context);
 		convert->target->count = count + 1;
 
 		hash_add_rcu(convert->target->context_to_sid,
-				&dst_convert->list, dst_convert->context.hash);
+			     &dst_convert->list, dst_convert->hash);
 	}
 
 	if (context->len)
@@ -309,7 +346,7 @@
 
 	/* write entries before updating count */
 	smp_store_release(&s->count, count + 1);
-	hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
+	hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
 
 	rc = 0;
 out_unlock:
@@ -319,16 +356,15 @@
 
 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
 {
-	struct sidtab_entry_leaf *entry;
+	struct sidtab_entry *entry;
 	u32 i;
 
 	for (i = 0; i < count; i++) {
 		entry = sidtab_do_lookup(s, i, 0);
 		entry->sid = index_to_sid(i);
+		entry->hash = context_compute_hash(&entry->context);
 
-		hash_add_rcu(s->context_to_sid, &entry->list,
-				entry->context.hash);
-
+		hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
 	}
 }
 
@@ -368,7 +404,7 @@
 		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
 			rc = convert->func(&esrc->ptr_leaf->entries[i].context,
 					   &edst->ptr_leaf->entries[i].context,
-					   convert->args);
+					   convert->args, GFP_KERNEL);
 			if (rc)
 				return rc;
 			(*pos)++;
@@ -376,7 +412,6 @@
 		}
 		cond_resched();
 	}
-
 	return 0;
 }
 
@@ -439,6 +474,35 @@
 	return 0;
 }
 
+void sidtab_cancel_convert(struct sidtab *s)
+{
+	unsigned long flags;
+
+	/* cancelling policy load - disable live convert of sidtab */
+	spin_lock_irqsave(&s->lock, flags);
+	s->convert = NULL;
+	spin_unlock_irqrestore(&s->lock, flags);
+}
+
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+	spin_lock_irqsave(&s->lock, *flags);
+	s->frozen = true;
+	s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+	spin_unlock_irqrestore(&s->lock, *flags);
+}
+
+static void sidtab_destroy_entry(struct sidtab_entry *entry)
+{
+	context_destroy(&entry->context);
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+	kfree(rcu_dereference_raw(entry->cache));
+#endif
+}
+
 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
 {
 	u32 i;
@@ -459,7 +523,7 @@
 			return;
 
 		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
-			context_destroy(&node->entries[i].context);
+			sidtab_destroy_entry(&node->entries[i]);
 		kfree(node);
 	}
 }
@@ -470,7 +534,7 @@
 
 	for (i = 0; i < SECINITSID_NUM; i++)
 		if (s->isids[i].set)
-			context_destroy(&s->isids[i].leaf.context);
+			sidtab_destroy_entry(&s->isids[i].entry);
 
 	level = SIDTAB_MAX_LEVEL;
 	while (level && !s->roots[level].ptr_inner)
@@ -483,3 +547,82 @@
 	 * to be cleaned up here.
 	 */
 }
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+			const char *str, u32 str_len)
+{
+	struct sidtab_str_cache *cache, *victim = NULL;
+	unsigned long flags;
+
+	/* do not cache invalid contexts */
+	if (entry->context.len)
+		return;
+
+	spin_lock_irqsave(&s->cache_lock, flags);
+
+	cache = rcu_dereference_protected(entry->cache,
+					  lockdep_is_held(&s->cache_lock));
+	if (cache) {
+		/* entry in cache - just bump to the head of LRU list */
+		list_move(&cache->lru_member, &s->cache_lru_list);
+		goto out_unlock;
+	}
+
+	cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
+	if (!cache)
+		goto out_unlock;
+
+	if (s->cache_free_slots == 0) {
+		/* pop a cache entry from the tail and free it */
+		victim = container_of(s->cache_lru_list.prev,
+				      struct sidtab_str_cache, lru_member);
+		list_del(&victim->lru_member);
+		rcu_assign_pointer(victim->parent->cache, NULL);
+	} else {
+		s->cache_free_slots--;
+	}
+	cache->parent = entry;
+	cache->len = str_len;
+	memcpy(cache->str, str, str_len);
+	list_add(&cache->lru_member, &s->cache_lru_list);
+
+	rcu_assign_pointer(entry->cache, cache);
+
+out_unlock:
+	spin_unlock_irqrestore(&s->cache_lock, flags);
+	kfree_rcu(victim, rcu_member);
+}
+
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+		       char **out, u32 *out_len)
+{
+	struct sidtab_str_cache *cache;
+	int rc = 0;
+
+	if (entry->context.len)
+		return -ENOENT; /* do not cache invalid contexts */
+
+	rcu_read_lock();
+
+	cache = rcu_dereference(entry->cache);
+	if (!cache) {
+		rc = -ENOENT;
+	} else {
+		*out_len = cache->len;
+		if (out) {
+			*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
+			if (!*out)
+				rc = -ENOMEM;
+		}
+	}
+
+	rcu_read_unlock();
+
+	if (!rc && out)
+		sidtab_sid2str_put(s, entry, *out, *out_len);
+	return rc;
+}
+
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */

--
Gitblit v1.6.2