| .. | .. |
|---|
| 33 | 33 | #include <linux/memcontrol.h> |
|---|
| 34 | 34 | #include <linux/gfp.h> |
|---|
| 35 | 35 | #include <linux/uio.h> |
|---|
| 36 | +#include <linux/locallock.h> |
|---|
| 36 | 37 | #include <linux/hugetlb.h> |
|---|
| 37 | 38 | #include <linux/page_idle.h> |
|---|
| 38 | 39 | |
|---|
| .. | .. |
|---|
| 51 | 52 | #ifdef CONFIG_SMP |
|---|
| 52 | 53 | static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); |
|---|
| 53 | 54 | #endif |
|---|
| 55 | +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); |
|---|
| 56 | +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); |
|---|
| 54 | 57 | |
|---|
| 55 | 58 | /* |
|---|
| 56 | 59 | * This path almost never happens for VM activity - pages are normally |
|---|
| .. | .. |
|---|
| 253 | 256 | unsigned long flags; |
|---|
| 254 | 257 | |
|---|
| 255 | 258 | get_page(page); |
|---|
| 256 | | - local_irq_save(flags); |
|---|
| 259 | + local_lock_irqsave(rotate_lock, flags); |
|---|
| 257 | 260 | pvec = this_cpu_ptr(&lru_rotate_pvecs); |
|---|
| 258 | 261 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
|---|
| 259 | 262 | pagevec_move_tail(pvec); |
|---|
| 260 | | - local_irq_restore(flags); |
|---|
| 263 | + local_unlock_irqrestore(rotate_lock, flags); |
|---|
| 261 | 264 | } |
|---|
| 262 | 265 | } |
|---|
| 263 | 266 | |
|---|
| .. | .. |
|---|
| 307 | 310 | { |
|---|
| 308 | 311 | page = compound_head(page); |
|---|
| 309 | 312 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
|---|
| 310 | | - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); |
|---|
| 313 | + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
|---|
| 314 | + activate_page_pvecs); |
|---|
| 311 | 315 | |
|---|
| 312 | 316 | get_page(page); |
|---|
| 313 | 317 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
|---|
| 314 | 318 | pagevec_lru_move_fn(pvec, __activate_page, NULL); |
|---|
| 315 | | - put_cpu_var(activate_page_pvecs); |
|---|
| 319 | + put_locked_var(swapvec_lock, activate_page_pvecs); |
|---|
| 316 | 320 | } |
|---|
| 317 | 321 | } |
|---|
| 318 | 322 | |
|---|
| .. | .. |
|---|
| 334 | 338 | |
|---|
| 335 | 339 | static void __lru_cache_activate_page(struct page *page) |
|---|
| 336 | 340 | { |
|---|
| 337 | | - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
|---|
| 341 | + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
|---|
| 338 | 342 | int i; |
|---|
| 339 | 343 | |
|---|
| 340 | 344 | /* |
|---|
| .. | .. |
|---|
| 356 | 360 | } |
|---|
| 357 | 361 | } |
|---|
| 358 | 362 | |
|---|
| 359 | | - put_cpu_var(lru_add_pvec); |
|---|
| 363 | + put_locked_var(swapvec_lock, lru_add_pvec); |
|---|
| 360 | 364 | } |
|---|
| 361 | 365 | |
|---|
| 362 | 366 | /* |
|---|
| .. | .. |
|---|
| 398 | 402 | |
|---|
| 399 | 403 | static void __lru_cache_add(struct page *page) |
|---|
| 400 | 404 | { |
|---|
| 401 | | - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
|---|
| 405 | + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
|---|
| 402 | 406 | |
|---|
| 403 | 407 | get_page(page); |
|---|
| 404 | 408 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
|---|
| 405 | 409 | __pagevec_lru_add(pvec); |
|---|
| 406 | | - put_cpu_var(lru_add_pvec); |
|---|
| 410 | + put_locked_var(swapvec_lock, lru_add_pvec); |
|---|
| 407 | 411 | } |
|---|
| 408 | 412 | |
|---|
| 409 | 413 | /** |
|---|
| .. | .. |
|---|
| 581 | 585 | unsigned long flags; |
|---|
| 582 | 586 | |
|---|
| 583 | 587 | /* No harm done if a racing interrupt already did this */ |
|---|
| 584 | | - local_irq_save(flags); |
|---|
| 588 | +#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 589 | + local_lock_irqsave_on(rotate_lock, flags, cpu); |
|---|
| 585 | 590 | pagevec_move_tail(pvec); |
|---|
| 586 | | - local_irq_restore(flags); |
|---|
| 591 | + local_unlock_irqrestore_on(rotate_lock, flags, cpu); |
|---|
| 592 | +#else |
|---|
| 593 | + local_lock_irqsave(rotate_lock, flags); |
|---|
| 594 | + pagevec_move_tail(pvec); |
|---|
| 595 | + local_unlock_irqrestore(rotate_lock, flags); |
|---|
| 596 | +#endif |
|---|
| 587 | 597 | } |
|---|
| 588 | 598 | |
|---|
| 589 | 599 | pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
|---|
| .. | .. |
|---|
| 615 | 625 | return; |
|---|
| 616 | 626 | |
|---|
| 617 | 627 | if (likely(get_page_unless_zero(page))) { |
|---|
| 618 | | - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
|---|
| 628 | + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
|---|
| 629 | + lru_deactivate_file_pvecs); |
|---|
| 619 | 630 | |
|---|
| 620 | 631 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
|---|
| 621 | 632 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
|---|
| 622 | | - put_cpu_var(lru_deactivate_file_pvecs); |
|---|
| 633 | + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); |
|---|
| 623 | 634 | } |
|---|
| 624 | 635 | } |
|---|
| 625 | 636 | |
|---|
| .. | .. |
|---|
| 634 | 645 | { |
|---|
| 635 | 646 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
|---|
| 636 | 647 | !PageSwapCache(page) && !PageUnevictable(page)) { |
|---|
| 637 | | - struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); |
|---|
| 648 | + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
|---|
| 649 | + lru_lazyfree_pvecs); |
|---|
| 638 | 650 | |
|---|
| 639 | 651 | get_page(page); |
|---|
| 640 | 652 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
|---|
| 641 | 653 | pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); |
|---|
| 642 | | - put_cpu_var(lru_lazyfree_pvecs); |
|---|
| 654 | + put_locked_var(swapvec_lock, lru_lazyfree_pvecs); |
|---|
| 643 | 655 | } |
|---|
| 644 | 656 | } |
|---|
| 645 | 657 | |
|---|
| 646 | 658 | void lru_add_drain(void) |
|---|
| 647 | 659 | { |
|---|
| 648 | | - lru_add_drain_cpu(get_cpu()); |
|---|
| 649 | | - put_cpu(); |
|---|
| 660 | + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); |
|---|
| 661 | + local_unlock_cpu(swapvec_lock); |
|---|
| 650 | 662 | } |
|---|
| 651 | 663 | |
|---|
| 652 | 664 | #ifdef CONFIG_SMP |
|---|
| 665 | + |
|---|
| 666 | +#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 667 | +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) |
|---|
| 668 | +{ |
|---|
| 669 | + local_lock_on(swapvec_lock, cpu); |
|---|
| 670 | + lru_add_drain_cpu(cpu); |
|---|
| 671 | + local_unlock_on(swapvec_lock, cpu); |
|---|
| 672 | +} |
|---|
| 673 | + |
|---|
| 674 | +#else |
|---|
| 653 | 675 | |
|---|
| 654 | 676 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
|---|
| 655 | 677 | |
|---|
| .. | .. |
|---|
| 657 | 679 | { |
|---|
| 658 | 680 | lru_add_drain(); |
|---|
| 659 | 681 | } |
|---|
| 682 | + |
|---|
| 683 | +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) |
|---|
| 684 | +{ |
|---|
| 685 | + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); |
|---|
| 686 | + |
|---|
| 687 | + INIT_WORK(work, lru_add_drain_per_cpu); |
|---|
| 688 | + queue_work_on(cpu, mm_percpu_wq, work); |
|---|
| 689 | + cpumask_set_cpu(cpu, has_work); |
|---|
| 690 | +} |
|---|
| 691 | +#endif |
|---|
| 660 | 692 | |
|---|
| 661 | 693 | /* |
|---|
| 662 | 694 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu |
|---|
| .. | .. |
|---|
| 682 | 714 | cpumask_clear(&has_work); |
|---|
| 683 | 715 | |
|---|
| 684 | 716 | for_each_online_cpu(cpu) { |
|---|
| 685 | | - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); |
|---|
| 686 | 717 | |
|---|
| 687 | 718 | if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || |
|---|
| 688 | 719 | pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || |
|---|
| 689 | 720 | pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || |
|---|
| 690 | 721 | pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || |
|---|
| 691 | | - need_activate_page_drain(cpu)) { |
|---|
| 692 | | - INIT_WORK(work, lru_add_drain_per_cpu); |
|---|
| 693 | | - queue_work_on(cpu, mm_percpu_wq, work); |
|---|
| 694 | | - cpumask_set_cpu(cpu, &has_work); |
|---|
| 695 | | - } |
|---|
| 722 | + need_activate_page_drain(cpu)) |
|---|
| 723 | + remote_lru_add_drain(cpu, &has_work); |
|---|
| 696 | 724 | } |
|---|
| 697 | 725 | |
|---|
| 726 | +#ifndef CONFIG_PREEMPT_RT_BASE |
|---|
| 698 | 727 | for_each_cpu(cpu, &has_work) |
|---|
| 699 | 728 | flush_work(&per_cpu(lru_add_drain_work, cpu)); |
|---|
| 729 | +#endif |
|---|
| 700 | 730 | |
|---|
| 701 | 731 | mutex_unlock(&lock); |
|---|
| 702 | 732 | } |
|---|