From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 08:20:59 +0000 Subject: [PATCH] kernel_5.10 no rt --- kernel/mm/slub.c | 148 +++++++++++-------------------------------------- 1 files changed, 34 insertions(+), 114 deletions(-) diff --git a/kernel/mm/slub.c b/kernel/mm/slub.c index 3be07ee..3acf083 100644 --- a/kernel/mm/slub.c +++ b/kernel/mm/slub.c @@ -431,7 +431,7 @@ #ifdef CONFIG_SLUB_DEBUG static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; -static DEFINE_RAW_SPINLOCK(object_map_lock); +static DEFINE_SPINLOCK(object_map_lock); static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, struct page *page) @@ -456,7 +456,7 @@ { VM_BUG_ON(!irqs_disabled()); - raw_spin_lock(&object_map_lock); + spin_lock(&object_map_lock); __fill_map(object_map, s, page); @@ -466,7 +466,7 @@ static void put_map(unsigned long *map) __releases(&object_map_lock) { VM_BUG_ON(map != object_map); - raw_spin_unlock(&object_map_lock); + spin_unlock(&object_map_lock); } static inline unsigned int size_from_object(struct kmem_cache *s) @@ -1255,7 +1255,7 @@ unsigned long flags; int ret = 0; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { @@ -1290,7 +1290,7 @@ bulk_cnt, cnt); slab_unlock(page); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; @@ -1537,12 +1537,6 @@ return false; } #endif /* CONFIG_SLUB_DEBUG */ - -struct slub_free_list { - raw_spinlock_t lock; - struct list_head list; -}; -static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); /* * Hooks for other subsystems that check memory allocations. In a typical @@ -1804,18 +1798,10 @@ void *start, *p, *next; int idx; bool shuffle; - bool enableirqs = false; flags &= gfp_allowed_mask; if (gfpflags_allow_blocking(flags)) - enableirqs = true; - -#ifdef CONFIG_PREEMPT_RT - if (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND) - enableirqs = true; -#endif - if (enableirqs) local_irq_enable(); flags |= s->allocflags; @@ -1874,7 +1860,7 @@ page->frozen = 1; out: - if (enableirqs) + if (gfpflags_allow_blocking(flags)) local_irq_disable(); if (!page) return NULL; @@ -1917,16 +1903,6 @@ __free_pages(page, order); } -static void free_delayed(struct list_head *h) -{ - while (!list_empty(h)) { - struct page *page = list_first_entry(h, struct page, lru); - - list_del(&page->lru); - __free_slab(page->slab_cache, page); - } -} - static void rcu_free_slab(struct rcu_head *h) { struct page *page = container_of(h, struct page, rcu_head); @@ -1938,12 +1914,6 @@ { if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { call_rcu(&page->rcu_head, rcu_free_slab); - } else if (irqs_disabled()) { - struct slub_free_list *f = this_cpu_ptr(&slub_free_list); - - raw_spin_lock(&f->lock); - list_add(&page->lru, &f->list); - raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -2051,7 +2021,7 @@ if (!n || !n->nr_partial) return NULL; - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, slab_list) { void *t; @@ -2076,7 +2046,7 @@ break; } - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); return object; } @@ -2330,7 +2300,7 @@ * that acquire_slab() will see a slab page that * is frozen */ - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -2342,7 +2312,7 @@ * slabs from diagnostic functions will not see * any frozen slabs. */ - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); } #endif } @@ -2367,7 +2337,7 @@ goto redo; if (lock) - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); if (m == M_PARTIAL) stat(s, tail); @@ -2407,10 +2377,10 @@ n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); n = n2; - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); } do { @@ -2439,7 +2409,7 @@ } if (n) - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -2476,21 +2446,14 @@ pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > slub_cpu_partial(s)) { - struct slub_free_list *f; unsigned long flags; - LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); - f = this_cpu_ptr(&slub_free_list); - raw_spin_lock(&f->lock); - list_splice_init(&f->list, &tofree); - raw_spin_unlock(&f->lock); local_irq_restore(flags); - free_delayed(&tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2556,19 +2519,7 @@ static void flush_all(struct kmem_cache *s) { - LIST_HEAD(tofree); - int cpu; - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); - for_each_online_cpu(cpu) { - struct slub_free_list *f; - - f = &per_cpu(slub_free_list, cpu); - raw_spin_lock_irq(&f->lock); - list_splice_init(&f->list, &tofree); - raw_spin_unlock_irq(&f->lock); - free_delayed(&tofree); - } } /* @@ -2623,10 +2574,10 @@ unsigned long x = 0; struct page *page; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) x += get_count(page); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SLUB_SYSFS */ @@ -2765,10 +2716,8 @@ * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c, - struct list_head *to_free) + unsigned long addr, struct kmem_cache_cpu *c) { - struct slub_free_list *f; void *freelist; struct page *page; @@ -2837,13 +2786,6 @@ VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); - -out: - f = this_cpu_ptr(&slub_free_list); - raw_spin_lock(&f->lock); - list_splice_init(&f->list, to_free); - raw_spin_unlock(&f->lock); - return freelist; new_slab: @@ -2859,7 +2801,7 @@ if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); - goto out; + return NULL; } page = c->page; @@ -2872,7 +2814,7 @@ goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist), c); - goto out; + return freelist; } /* @@ -2884,7 +2826,6 @@ { void *p; unsigned long flags; - LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPTION @@ -2896,9 +2837,8 @@ c = this_cpu_ptr(s->cpu_slab); #endif - p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); + p = ___slab_alloc(s, gfpflags, node, addr, c); local_irq_restore(flags); - free_delayed(&tofree); return p; } @@ -2933,10 +2873,6 @@ unsigned long tid; struct obj_cgroup *objcg = NULL; bool init = false; - - if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP)) - WARN_ON_ONCE(!preemptible() && - (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)); s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) @@ -3110,7 +3046,7 @@ do { if (unlikely(n)) { - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -3142,7 +3078,7 @@ * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); } } @@ -3184,7 +3120,7 @@ add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -3199,7 +3135,7 @@ remove_full(s, n, page); } - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -3416,13 +3352,8 @@ void **p) { struct kmem_cache_cpu *c; - LIST_HEAD(to_free); int i; struct obj_cgroup *objcg = NULL; - - if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP)) - WARN_ON_ONCE(!preemptible() && - (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)); /* memcg and kmem_cache debug support */ s = slab_pre_alloc_hook(s, &objcg, size, flags); @@ -3460,7 +3391,7 @@ * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, - _RET_IP_, c, &to_free); + _RET_IP_, c); if (unlikely(!p[i])) goto error; @@ -3475,7 +3406,6 @@ } c->tid = next_tid(c->tid); local_irq_enable(); - free_delayed(&to_free); /* * memcg and kmem_cache debug support and memory initialization. @@ -3486,7 +3416,6 @@ return i; error: local_irq_enable(); - free_delayed(&to_free); slab_post_alloc_hook(s, objcg, flags, i, p, false); __kmem_cache_free_bulk(s, i, p); return 0; @@ -3622,7 +3551,7 @@ init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - raw_spin_lock_init(&n->list_lock); + spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -4016,7 +3945,7 @@ struct page *page, *h; BUG_ON(irqs_disabled()); - raw_spin_lock_irq(&n->list_lock); + spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, slab_list) { if (!page->inuse) { remove_partial(n, page); @@ -4026,7 +3955,7 @@ "Objects remaining in %s on __kmem_cache_shutdown()"); } } - raw_spin_unlock_irq(&n->list_lock); + spin_unlock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &discard, slab_list) discard_slab(s, page); @@ -4301,7 +4230,7 @@ for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); /* * Build lists of slabs to discard or promote. @@ -4332,7 +4261,7 @@ for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, slab_list) @@ -4507,12 +4436,6 @@ { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; - int cpu; - - for_each_possible_cpu(cpu) { - raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); - INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); - } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4705,7 +4628,7 @@ struct page *page; unsigned long flags; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) { validate_slab(s, page); @@ -4727,7 +4650,7 @@ s->name, count, atomic_long_read(&n->nr_slabs)); out: - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4782,9 +4705,6 @@ { struct location *l; int order; - - if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC) - return 0; order = get_order(sizeof(struct location) * max); @@ -5920,12 +5840,12 @@ if (!atomic_long_read(&n->nr_slabs)) continue; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) process_slab(t, s, page, alloc, obj_map); list_for_each_entry(page, &n->full, slab_list) process_slab(t, s, page, alloc, obj_map); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); } bitmap_free(obj_map); -- Gitblit v1.6.2