hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/mm/swap.c
....@@ -33,6 +33,7 @@
3333 #include <linux/memcontrol.h>
3434 #include <linux/gfp.h>
3535 #include <linux/uio.h>
36
+#include <linux/locallock.h>
3637 #include <linux/hugetlb.h>
3738 #include <linux/page_idle.h>
3839
....@@ -51,6 +52,8 @@
5152 #ifdef CONFIG_SMP
5253 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
5354 #endif
55
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
56
+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
5457
5558 /*
5659 * This path almost never happens for VM activity - pages are normally
....@@ -253,11 +256,11 @@
253256 unsigned long flags;
254257
255258 get_page(page);
256
- local_irq_save(flags);
259
+ local_lock_irqsave(rotate_lock, flags);
257260 pvec = this_cpu_ptr(&lru_rotate_pvecs);
258261 if (!pagevec_add(pvec, page) || PageCompound(page))
259262 pagevec_move_tail(pvec);
260
- local_irq_restore(flags);
263
+ local_unlock_irqrestore(rotate_lock, flags);
261264 }
262265 }
263266
....@@ -307,12 +310,13 @@
307310 {
308311 page = compound_head(page);
309312 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
310
- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
313
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
314
+ activate_page_pvecs);
311315
312316 get_page(page);
313317 if (!pagevec_add(pvec, page) || PageCompound(page))
314318 pagevec_lru_move_fn(pvec, __activate_page, NULL);
315
- put_cpu_var(activate_page_pvecs);
319
+ put_locked_var(swapvec_lock, activate_page_pvecs);
316320 }
317321 }
318322
....@@ -334,7 +338,7 @@
334338
335339 static void __lru_cache_activate_page(struct page *page)
336340 {
337
- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
341
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
338342 int i;
339343
340344 /*
....@@ -356,7 +360,7 @@
356360 }
357361 }
358362
359
- put_cpu_var(lru_add_pvec);
363
+ put_locked_var(swapvec_lock, lru_add_pvec);
360364 }
361365
362366 /*
....@@ -398,12 +402,12 @@
398402
399403 static void __lru_cache_add(struct page *page)
400404 {
401
- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
405
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
402406
403407 get_page(page);
404408 if (!pagevec_add(pvec, page) || PageCompound(page))
405409 __pagevec_lru_add(pvec);
406
- put_cpu_var(lru_add_pvec);
410
+ put_locked_var(swapvec_lock, lru_add_pvec);
407411 }
408412
409413 /**
....@@ -581,9 +585,15 @@
581585 unsigned long flags;
582586
583587 /* No harm done if a racing interrupt already did this */
584
- local_irq_save(flags);
588
+#ifdef CONFIG_PREEMPT_RT_BASE
589
+ local_lock_irqsave_on(rotate_lock, flags, cpu);
585590 pagevec_move_tail(pvec);
586
- local_irq_restore(flags);
591
+ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
592
+#else
593
+ local_lock_irqsave(rotate_lock, flags);
594
+ pagevec_move_tail(pvec);
595
+ local_unlock_irqrestore(rotate_lock, flags);
596
+#endif
587597 }
588598
589599 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
....@@ -615,11 +625,12 @@
615625 return;
616626
617627 if (likely(get_page_unless_zero(page))) {
618
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
628
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
629
+ lru_deactivate_file_pvecs);
619630
620631 if (!pagevec_add(pvec, page) || PageCompound(page))
621632 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
622
- put_cpu_var(lru_deactivate_file_pvecs);
633
+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
623634 }
624635 }
625636
....@@ -634,22 +645,33 @@
634645 {
635646 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
636647 !PageSwapCache(page) && !PageUnevictable(page)) {
637
- struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
648
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
649
+ lru_lazyfree_pvecs);
638650
639651 get_page(page);
640652 if (!pagevec_add(pvec, page) || PageCompound(page))
641653 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
642
- put_cpu_var(lru_lazyfree_pvecs);
654
+ put_locked_var(swapvec_lock, lru_lazyfree_pvecs);
643655 }
644656 }
645657
646658 void lru_add_drain(void)
647659 {
648
- lru_add_drain_cpu(get_cpu());
649
- put_cpu();
660
+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
661
+ local_unlock_cpu(swapvec_lock);
650662 }
651663
652664 #ifdef CONFIG_SMP
665
+
666
+#ifdef CONFIG_PREEMPT_RT_BASE
667
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
668
+{
669
+ local_lock_on(swapvec_lock, cpu);
670
+ lru_add_drain_cpu(cpu);
671
+ local_unlock_on(swapvec_lock, cpu);
672
+}
673
+
674
+#else
653675
654676 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
655677
....@@ -657,6 +679,16 @@
657679 {
658680 lru_add_drain();
659681 }
682
+
683
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
684
+{
685
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
686
+
687
+ INIT_WORK(work, lru_add_drain_per_cpu);
688
+ queue_work_on(cpu, mm_percpu_wq, work);
689
+ cpumask_set_cpu(cpu, has_work);
690
+}
691
+#endif
660692
661693 /*
662694 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
....@@ -682,21 +714,19 @@
682714 cpumask_clear(&has_work);
683715
684716 for_each_online_cpu(cpu) {
685
- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
686717
687718 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
688719 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
689720 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
690721 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
691
- need_activate_page_drain(cpu)) {
692
- INIT_WORK(work, lru_add_drain_per_cpu);
693
- queue_work_on(cpu, mm_percpu_wq, work);
694
- cpumask_set_cpu(cpu, &has_work);
695
- }
722
+ need_activate_page_drain(cpu))
723
+ remote_lru_add_drain(cpu, &has_work);
696724 }
697725
726
+#ifndef CONFIG_PREEMPT_RT_BASE
698727 for_each_cpu(cpu, &has_work)
699728 flush_work(&per_cpu(lru_add_drain_work, cpu));
729
+#endif
700730
701731 mutex_unlock(&lock);
702732 }