| .. | .. |
|---|
| 56 | 56 | #include <linux/wait.h> |
|---|
| 57 | 57 | #include <linux/pagemap.h> |
|---|
| 58 | 58 | #include <linux/fs.h> |
|---|
| 59 | +#include <linux/locallock.h> |
|---|
| 59 | 60 | |
|---|
| 60 | 61 | #define ZSPAGE_MAGIC 0x58 |
|---|
| 61 | 62 | |
|---|
| .. | .. |
|---|
| 73 | 74 | */ |
|---|
| 74 | 75 | #define ZS_MAX_ZSPAGE_ORDER 2 |
|---|
| 75 | 76 | #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) |
|---|
| 76 | | - |
|---|
| 77 | 77 | #define ZS_HANDLE_SIZE (sizeof(unsigned long)) |
|---|
| 78 | + |
|---|
| 79 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 80 | + |
|---|
| 81 | +struct zsmalloc_handle { |
|---|
| 82 | + unsigned long addr; |
|---|
| 83 | + struct mutex lock; |
|---|
| 84 | +}; |
|---|
| 85 | + |
|---|
| 86 | +#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) |
|---|
| 87 | + |
|---|
| 88 | +#else |
|---|
| 89 | + |
|---|
| 90 | +#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) |
|---|
| 91 | +#endif |
|---|
| 78 | 92 | |
|---|
| 79 | 93 | /* |
|---|
| 80 | 94 | * Object location (<PFN>, <obj_idx>) is encoded as |
|---|
| .. | .. |
|---|
| 325 | 339 | |
|---|
| 326 | 340 | static int create_cache(struct zs_pool *pool) |
|---|
| 327 | 341 | { |
|---|
| 328 | | - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, |
|---|
| 342 | + pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, |
|---|
| 329 | 343 | 0, 0, NULL); |
|---|
| 330 | 344 | if (!pool->handle_cachep) |
|---|
| 331 | 345 | return 1; |
|---|
| .. | .. |
|---|
| 349 | 363 | |
|---|
| 350 | 364 | static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) |
|---|
| 351 | 365 | { |
|---|
| 352 | | - return (unsigned long)kmem_cache_alloc(pool->handle_cachep, |
|---|
| 353 | | - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); |
|---|
| 366 | + void *p; |
|---|
| 367 | + |
|---|
| 368 | + p = kmem_cache_alloc(pool->handle_cachep, |
|---|
| 369 | + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); |
|---|
| 370 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 371 | + if (p) { |
|---|
| 372 | + struct zsmalloc_handle *zh = p; |
|---|
| 373 | + |
|---|
| 374 | + mutex_init(&zh->lock); |
|---|
| 375 | + } |
|---|
| 376 | +#endif |
|---|
| 377 | + return (unsigned long)p; |
|---|
| 354 | 378 | } |
|---|
| 379 | + |
|---|
| 380 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 381 | +static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) |
|---|
| 382 | +{ |
|---|
| 383 | + return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); |
|---|
| 384 | +} |
|---|
| 385 | +#endif |
|---|
| 355 | 386 | |
|---|
| 356 | 387 | static void cache_free_handle(struct zs_pool *pool, unsigned long handle) |
|---|
| 357 | 388 | { |
|---|
| .. | .. |
|---|
| 371 | 402 | |
|---|
| 372 | 403 | static void record_obj(unsigned long handle, unsigned long obj) |
|---|
| 373 | 404 | { |
|---|
| 405 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 406 | + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
|---|
| 407 | + |
|---|
| 408 | + WRITE_ONCE(zh->addr, obj); |
|---|
| 409 | +#else |
|---|
| 374 | 410 | /* |
|---|
| 375 | 411 | * lsb of @obj represents handle lock while other bits |
|---|
| 376 | 412 | * represent object value the handle is pointing so |
|---|
| 377 | 413 | * updating shouldn't do store tearing. |
|---|
| 378 | 414 | */ |
|---|
| 379 | 415 | WRITE_ONCE(*(unsigned long *)handle, obj); |
|---|
| 416 | +#endif |
|---|
| 380 | 417 | } |
|---|
| 381 | 418 | |
|---|
| 382 | 419 | /* zpool driver */ |
|---|
| .. | .. |
|---|
| 458 | 495 | |
|---|
| 459 | 496 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |
|---|
| 460 | 497 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); |
|---|
| 498 | +static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); |
|---|
| 461 | 499 | |
|---|
| 462 | 500 | static bool is_zspage_isolated(struct zspage *zspage) |
|---|
| 463 | 501 | { |
|---|
| .. | .. |
|---|
| 887 | 925 | |
|---|
| 888 | 926 | static unsigned long handle_to_obj(unsigned long handle) |
|---|
| 889 | 927 | { |
|---|
| 928 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 929 | + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
|---|
| 930 | + |
|---|
| 931 | + return zh->addr; |
|---|
| 932 | +#else |
|---|
| 890 | 933 | return *(unsigned long *)handle; |
|---|
| 934 | +#endif |
|---|
| 891 | 935 | } |
|---|
| 892 | 936 | |
|---|
| 893 | 937 | static unsigned long obj_to_head(struct page *page, void *obj) |
|---|
| .. | .. |
|---|
| 901 | 945 | |
|---|
| 902 | 946 | static inline int testpin_tag(unsigned long handle) |
|---|
| 903 | 947 | { |
|---|
| 948 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 949 | + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
|---|
| 950 | + |
|---|
| 951 | + return mutex_is_locked(&zh->lock); |
|---|
| 952 | +#else |
|---|
| 904 | 953 | return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); |
|---|
| 954 | +#endif |
|---|
| 905 | 955 | } |
|---|
| 906 | 956 | |
|---|
| 907 | 957 | static inline int trypin_tag(unsigned long handle) |
|---|
| 908 | 958 | { |
|---|
| 959 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 960 | + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
|---|
| 961 | + |
|---|
| 962 | + return mutex_trylock(&zh->lock); |
|---|
| 963 | +#else |
|---|
| 909 | 964 | return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); |
|---|
| 965 | +#endif |
|---|
| 910 | 966 | } |
|---|
| 911 | 967 | |
|---|
| 912 | 968 | static void pin_tag(unsigned long handle) |
|---|
| 913 | 969 | { |
|---|
| 970 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 971 | + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
|---|
| 972 | + |
|---|
| 973 | + return mutex_lock(&zh->lock); |
|---|
| 974 | +#else |
|---|
| 914 | 975 | bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); |
|---|
| 976 | +#endif |
|---|
| 915 | 977 | } |
|---|
| 916 | 978 | |
|---|
| 917 | 979 | static void unpin_tag(unsigned long handle) |
|---|
| 918 | 980 | { |
|---|
| 981 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 982 | + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); |
|---|
| 983 | + |
|---|
| 984 | + return mutex_unlock(&zh->lock); |
|---|
| 985 | +#else |
|---|
| 919 | 986 | bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); |
|---|
| 987 | +#endif |
|---|
| 920 | 988 | } |
|---|
| 921 | 989 | |
|---|
| 922 | 990 | static void reset_page(struct page *page) |
|---|
| .. | .. |
|---|
| 1342 | 1410 | class = pool->size_class[class_idx]; |
|---|
| 1343 | 1411 | off = (class->size * obj_idx) & ~PAGE_MASK; |
|---|
| 1344 | 1412 | |
|---|
| 1345 | | - area = &get_cpu_var(zs_map_area); |
|---|
| 1413 | + area = &get_locked_var(zs_map_area_lock, zs_map_area); |
|---|
| 1346 | 1414 | area->vm_mm = mm; |
|---|
| 1347 | 1415 | if (off + class->size <= PAGE_SIZE) { |
|---|
| 1348 | 1416 | /* this object is contained entirely within a page */ |
|---|
| .. | .. |
|---|
| 1396 | 1464 | |
|---|
| 1397 | 1465 | __zs_unmap_object(area, pages, off, class->size); |
|---|
| 1398 | 1466 | } |
|---|
| 1399 | | - put_cpu_var(zs_map_area); |
|---|
| 1467 | + put_locked_var(zs_map_area_lock, zs_map_area); |
|---|
| 1400 | 1468 | |
|---|
| 1401 | 1469 | migrate_read_unlock(zspage); |
|---|
| 1402 | 1470 | unpin_tag(handle); |
|---|