.. | .. |
---|
57 | 57 | #include <linux/wait.h> |
---|
58 | 58 | #include <linux/pagemap.h> |
---|
59 | 59 | #include <linux/fs.h> |
---|
60 | | -#include <linux/local_lock.h> |
---|
61 | 60 | |
---|
62 | 61 | #define ZSPAGE_MAGIC 0x58 |
---|
63 | 62 | |
---|
.. | .. |
---|
77 | 76 | #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) |
---|
78 | 77 | |
---|
79 | 78 | #define ZS_HANDLE_SIZE (sizeof(unsigned long)) |
---|
80 | | - |
---|
81 | | -#ifdef CONFIG_PREEMPT_RT |
---|
82 | | - |
---|
83 | | -struct zsmalloc_handle { |
---|
84 | | - unsigned long addr; |
---|
85 | | - spinlock_t lock; |
---|
86 | | -}; |
---|
87 | | - |
---|
88 | | -#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) |
---|
89 | | - |
---|
90 | | -#else |
---|
91 | | - |
---|
92 | | -#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) |
---|
93 | | -#endif |
---|
94 | 79 | |
---|
95 | 80 | /* |
---|
96 | 81 | * Object location (<PFN>, <obj_idx>) is encoded as |
---|
.. | .. |
---|
308 | 293 | }; |
---|
309 | 294 | |
---|
310 | 295 | struct mapping_area { |
---|
311 | | - local_lock_t lock; |
---|
312 | 296 | char *vm_buf; /* copy buffer for objects that span pages */ |
---|
313 | 297 | char *vm_addr; /* address of kmap_atomic()'ed pages */ |
---|
314 | 298 | enum zs_mapmode vm_mm; /* mapping mode */ |
---|
.. | .. |
---|
338 | 322 | |
---|
339 | 323 | static int create_cache(struct zs_pool *pool) |
---|
340 | 324 | { |
---|
341 | | - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, |
---|
| 325 | + pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, |
---|
342 | 326 | 0, 0, NULL); |
---|
343 | 327 | if (!pool->handle_cachep) |
---|
344 | 328 | return 1; |
---|
.. | .. |
---|
362 | 346 | |
---|
363 | 347 | static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) |
---|
364 | 348 | { |
---|
365 | | - void *p; |
---|
366 | | - |
---|
367 | | - p = kmem_cache_alloc(pool->handle_cachep, |
---|
368 | | - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); |
---|
369 | | -#ifdef CONFIG_PREEMPT_RT |
---|
370 | | - if (p) { |
---|
371 | | - struct zsmalloc_handle *zh = p; |
---|
372 | | - |
---|
373 | | - spin_lock_init(&zh->lock); |
---|
374 | | - } |
---|
375 | | -#endif |
---|
376 | | - return (unsigned long)p; |
---|
| 349 | + return (unsigned long)kmem_cache_alloc(pool->handle_cachep, |
---|
| 350 | + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA)); |
---|
377 | 351 | } |
---|
378 | | - |
---|
379 | | -#ifdef CONFIG_PREEMPT_RT |
---|
380 | | -static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) |
---|
381 | | -{ |
---|
382 | | - return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); |
---|
383 | | -} |
---|
384 | | -#endif |
---|
385 | 352 | |
---|
386 | 353 | static void cache_free_handle(struct zs_pool *pool, unsigned long handle) |
---|
387 | 354 | { |
---|
.. | .. |
---|
401 | 368 | |
---|
402 | 369 | static void record_obj(unsigned long handle, unsigned long obj) |
---|
403 | 370 | { |
---|
404 | | -#ifdef CONFIG_PREEMPT_RT |
---|
405 | | - struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
---|
406 | | - |
---|
407 | | - WRITE_ONCE(zh->addr, obj); |
---|
408 | | -#else |
---|
409 | 371 | /* |
---|
410 | 372 | * lsb of @obj represents handle lock while other bits |
---|
411 | 373 | * represent object value the handle is pointing so |
---|
412 | 374 | * updating shouldn't do store tearing. |
---|
413 | 375 | */ |
---|
414 | 376 | WRITE_ONCE(*(unsigned long *)handle, obj); |
---|
415 | | -#endif |
---|
416 | 377 | } |
---|
417 | 378 | |
---|
418 | 379 | /* zpool driver */ |
---|
.. | .. |
---|
494 | 455 | #endif /* CONFIG_ZPOOL */ |
---|
495 | 456 | |
---|
496 | 457 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |
---|
497 | | -static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { |
---|
498 | | - /* XXX remove this and use a spin_lock_t in pin_tag() */ |
---|
499 | | - .lock = INIT_LOCAL_LOCK(lock), |
---|
500 | | -}; |
---|
| 458 | +static DEFINE_PER_CPU(struct mapping_area, zs_map_area); |
---|
501 | 459 | |
---|
502 | 460 | static bool is_zspage_isolated(struct zspage *zspage) |
---|
503 | 461 | { |
---|
.. | .. |
---|
907 | 865 | |
---|
908 | 866 | static unsigned long handle_to_obj(unsigned long handle) |
---|
909 | 867 | { |
---|
910 | | -#ifdef CONFIG_PREEMPT_RT |
---|
911 | | - struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
---|
912 | | - |
---|
913 | | - return zh->addr; |
---|
914 | | -#else |
---|
915 | 868 | return *(unsigned long *)handle; |
---|
916 | | -#endif |
---|
917 | 869 | } |
---|
918 | 870 | |
---|
919 | 871 | static unsigned long obj_to_head(struct page *page, void *obj) |
---|
.. | .. |
---|
927 | 879 | |
---|
928 | 880 | static inline int testpin_tag(unsigned long handle) |
---|
929 | 881 | { |
---|
930 | | -#ifdef CONFIG_PREEMPT_RT |
---|
931 | | - struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
---|
932 | | - |
---|
933 | | - return spin_is_locked(&zh->lock); |
---|
934 | | -#else |
---|
935 | 882 | return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); |
---|
936 | | -#endif |
---|
937 | 883 | } |
---|
938 | 884 | |
---|
939 | 885 | static inline int trypin_tag(unsigned long handle) |
---|
940 | 886 | { |
---|
941 | | -#ifdef CONFIG_PREEMPT_RT |
---|
942 | | - struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
---|
943 | | - |
---|
944 | | - return spin_trylock(&zh->lock); |
---|
945 | | -#else |
---|
946 | 887 | return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); |
---|
947 | | -#endif |
---|
948 | 888 | } |
---|
949 | 889 | |
---|
950 | 890 | static void pin_tag(unsigned long handle) __acquires(bitlock) |
---|
951 | 891 | { |
---|
952 | | -#ifdef CONFIG_PREEMPT_RT |
---|
953 | | - struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
---|
954 | | - |
---|
955 | | - return spin_lock(&zh->lock); |
---|
956 | | -#else |
---|
957 | 892 | bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); |
---|
958 | | -#endif |
---|
959 | 893 | } |
---|
960 | 894 | |
---|
961 | 895 | static void unpin_tag(unsigned long handle) __releases(bitlock) |
---|
962 | 896 | { |
---|
963 | | -#ifdef CONFIG_PREEMPT_RT |
---|
964 | | - struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
---|
965 | | - |
---|
966 | | - return spin_unlock(&zh->lock); |
---|
967 | | -#else |
---|
968 | 897 | bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); |
---|
969 | | -#endif |
---|
970 | 898 | } |
---|
971 | 899 | |
---|
972 | 900 | static void reset_page(struct page *page) |
---|
.. | .. |
---|
1350 | 1278 | class = pool->size_class[class_idx]; |
---|
1351 | 1279 | off = (class->size * obj_idx) & ~PAGE_MASK; |
---|
1352 | 1280 | |
---|
1353 | | - local_lock(&zs_map_area.lock); |
---|
1354 | | - area = this_cpu_ptr(&zs_map_area); |
---|
| 1281 | + area = &get_cpu_var(zs_map_area); |
---|
1355 | 1282 | area->vm_mm = mm; |
---|
1356 | 1283 | if (off + class->size <= PAGE_SIZE) { |
---|
1357 | 1284 | /* this object is contained entirely within a page */ |
---|
.. | .. |
---|
1405 | 1332 | |
---|
1406 | 1333 | __zs_unmap_object(area, pages, off, class->size); |
---|
1407 | 1334 | } |
---|
1408 | | - local_unlock(&zs_map_area.lock); |
---|
| 1335 | + put_cpu_var(zs_map_area); |
---|
1409 | 1336 | |
---|
1410 | 1337 | migrate_read_unlock(zspage); |
---|
1411 | 1338 | unpin_tag(handle); |
---|