hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/mm/zsmalloc.c
....@@ -57,7 +57,6 @@
5757 #include <linux/wait.h>
5858 #include <linux/pagemap.h>
5959 #include <linux/fs.h>
60
-#include <linux/local_lock.h>
6160
6261 #define ZSPAGE_MAGIC 0x58
6362
....@@ -77,20 +76,6 @@
7776 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
7877
7978 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
80
-
81
-#ifdef CONFIG_PREEMPT_RT
82
-
83
-struct zsmalloc_handle {
84
- unsigned long addr;
85
- spinlock_t lock;
86
-};
87
-
88
-#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
89
-
90
-#else
91
-
92
-#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
93
-#endif
9479
9580 /*
9681 * Object location (<PFN>, <obj_idx>) is encoded as
....@@ -308,7 +293,6 @@
308293 };
309294
310295 struct mapping_area {
311
- local_lock_t lock;
312296 char *vm_buf; /* copy buffer for objects that span pages */
313297 char *vm_addr; /* address of kmap_atomic()'ed pages */
314298 enum zs_mapmode vm_mm; /* mapping mode */
....@@ -338,7 +322,7 @@
338322
339323 static int create_cache(struct zs_pool *pool)
340324 {
341
- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
325
+ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
342326 0, 0, NULL);
343327 if (!pool->handle_cachep)
344328 return 1;
....@@ -362,26 +346,9 @@
362346
363347 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
364348 {
365
- void *p;
366
-
367
- p = kmem_cache_alloc(pool->handle_cachep,
368
- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
369
-#ifdef CONFIG_PREEMPT_RT
370
- if (p) {
371
- struct zsmalloc_handle *zh = p;
372
-
373
- spin_lock_init(&zh->lock);
374
- }
375
-#endif
376
- return (unsigned long)p;
349
+ return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
350
+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
377351 }
378
-
379
-#ifdef CONFIG_PREEMPT_RT
380
-static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
381
-{
382
- return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
383
-}
384
-#endif
385352
386353 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
387354 {
....@@ -401,18 +368,12 @@
401368
402369 static void record_obj(unsigned long handle, unsigned long obj)
403370 {
404
-#ifdef CONFIG_PREEMPT_RT
405
- struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
406
-
407
- WRITE_ONCE(zh->addr, obj);
408
-#else
409371 /*
410372 * lsb of @obj represents handle lock while other bits
411373 * represent object value the handle is pointing so
412374 * updating shouldn't do store tearing.
413375 */
414376 WRITE_ONCE(*(unsigned long *)handle, obj);
415
-#endif
416377 }
417378
418379 /* zpool driver */
....@@ -494,10 +455,7 @@
494455 #endif /* CONFIG_ZPOOL */
495456
496457 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
497
-static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
498
- /* XXX remove this and use a spin_lock_t in pin_tag() */
499
- .lock = INIT_LOCAL_LOCK(lock),
500
-};
458
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
501459
502460 static bool is_zspage_isolated(struct zspage *zspage)
503461 {
....@@ -907,13 +865,7 @@
907865
908866 static unsigned long handle_to_obj(unsigned long handle)
909867 {
910
-#ifdef CONFIG_PREEMPT_RT
911
- struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
912
-
913
- return zh->addr;
914
-#else
915868 return *(unsigned long *)handle;
916
-#endif
917869 }
918870
919871 static unsigned long obj_to_head(struct page *page, void *obj)
....@@ -927,46 +879,22 @@
927879
928880 static inline int testpin_tag(unsigned long handle)
929881 {
930
-#ifdef CONFIG_PREEMPT_RT
931
- struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
932
-
933
- return spin_is_locked(&zh->lock);
934
-#else
935882 return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
936
-#endif
937883 }
938884
939885 static inline int trypin_tag(unsigned long handle)
940886 {
941
-#ifdef CONFIG_PREEMPT_RT
942
- struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
943
-
944
- return spin_trylock(&zh->lock);
945
-#else
946887 return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
947
-#endif
948888 }
949889
950890 static void pin_tag(unsigned long handle) __acquires(bitlock)
951891 {
952
-#ifdef CONFIG_PREEMPT_RT
953
- struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
954
-
955
- return spin_lock(&zh->lock);
956
-#else
957892 bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
958
-#endif
959893 }
960894
961895 static void unpin_tag(unsigned long handle) __releases(bitlock)
962896 {
963
-#ifdef CONFIG_PREEMPT_RT
964
- struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
965
-
966
- return spin_unlock(&zh->lock);
967
-#else
968897 bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
969
-#endif
970898 }
971899
972900 static void reset_page(struct page *page)
....@@ -1350,8 +1278,7 @@
13501278 class = pool->size_class[class_idx];
13511279 off = (class->size * obj_idx) & ~PAGE_MASK;
13521280
1353
- local_lock(&zs_map_area.lock);
1354
- area = this_cpu_ptr(&zs_map_area);
1281
+ area = &get_cpu_var(zs_map_area);
13551282 area->vm_mm = mm;
13561283 if (off + class->size <= PAGE_SIZE) {
13571284 /* this object is contained entirely within a page */
....@@ -1405,7 +1332,7 @@
14051332
14061333 __zs_unmap_object(area, pages, off, class->size);
14071334 }
1408
- local_unlock(&zs_map_area.lock);
1335
+ put_cpu_var(zs_map_area);
14091336
14101337 migrate_read_unlock(zspage);
14111338 unpin_tag(handle);