.. | .. |
---|
860 | 860 | struct vmap_block *vb; |
---|
861 | 861 | struct vmap_area *va; |
---|
862 | 862 | unsigned long vb_idx; |
---|
863 | | - int node, err; |
---|
| 863 | + int node, err, cpu; |
---|
864 | 864 | void *vaddr; |
---|
865 | 865 | |
---|
866 | 866 | node = numa_node_id(); |
---|
.. | .. |
---|
903 | 903 | BUG_ON(err); |
---|
904 | 904 | radix_tree_preload_end(); |
---|
905 | 905 | |
---|
906 | | - vbq = &get_cpu_var(vmap_block_queue); |
---|
| 906 | + cpu = get_cpu_light(); |
---|
| 907 | + vbq = this_cpu_ptr(&vmap_block_queue); |
---|
907 | 908 | spin_lock(&vbq->lock); |
---|
908 | 909 | list_add_tail_rcu(&vb->free_list, &vbq->free); |
---|
909 | 910 | spin_unlock(&vbq->lock); |
---|
910 | | - put_cpu_var(vmap_block_queue); |
---|
| 911 | + put_cpu_light(); |
---|
911 | 912 | |
---|
912 | 913 | return vaddr; |
---|
913 | 914 | } |
---|
.. | .. |
---|
976 | 977 | struct vmap_block *vb; |
---|
977 | 978 | void *vaddr = NULL; |
---|
978 | 979 | unsigned int order; |
---|
| 980 | + int cpu; |
---|
979 | 981 | |
---|
980 | 982 | BUG_ON(offset_in_page(size)); |
---|
981 | 983 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
---|
.. | .. |
---|
990 | 992 | order = get_order(size); |
---|
991 | 993 | |
---|
992 | 994 | rcu_read_lock(); |
---|
993 | | - vbq = &get_cpu_var(vmap_block_queue); |
---|
| 995 | + cpu = get_cpu_light(); |
---|
| 996 | + vbq = this_cpu_ptr(&vmap_block_queue); |
---|
994 | 997 | list_for_each_entry_rcu(vb, &vbq->free, free_list) { |
---|
995 | 998 | unsigned long pages_off; |
---|
996 | 999 | |
---|
.. | .. |
---|
1013 | 1016 | break; |
---|
1014 | 1017 | } |
---|
1015 | 1018 | |
---|
1016 | | - put_cpu_var(vmap_block_queue); |
---|
| 1019 | + put_cpu_light(); |
---|
1017 | 1020 | rcu_read_unlock(); |
---|
1018 | 1021 | |
---|
1019 | 1022 | /* Allocate new block if nothing was found */ |
---|