hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/xfs/kmem.h
....@@ -1,4 +1,4 @@
1
-// SPDX-License-Identifier: GPL-2.0
1
+/* SPDX-License-Identifier: GPL-2.0 */
22 /*
33 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
44 * All Rights Reserved.
....@@ -16,11 +16,10 @@
1616 */
1717
1818 typedef unsigned __bitwise xfs_km_flags_t;
19
-#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
20
-#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
2119 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
2220 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
2321 #define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
22
+#define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
2423
2524 /*
2625 * We use a special process flag to avoid recursive callbacks into
....@@ -32,15 +31,11 @@
3231 {
3332 gfp_t lflags;
3433
35
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
34
+ BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
3635
37
- if (flags & KM_NOSLEEP) {
38
- lflags = GFP_ATOMIC | __GFP_NOWARN;
39
- } else {
40
- lflags = GFP_KERNEL | __GFP_NOWARN;
41
- if (flags & KM_NOFS)
42
- lflags &= ~__GFP_FS;
43
- }
36
+ lflags = GFP_KERNEL | __GFP_NOWARN;
37
+ if (flags & KM_NOFS)
38
+ lflags &= ~__GFP_FS;
4439
4540 /*
4641 * Default page/slab allocator behavior is to retry for ever
....@@ -55,12 +50,15 @@
5550 if (flags & KM_ZERO)
5651 lflags |= __GFP_ZERO;
5752
53
+ if (flags & KM_NOLOCKDEP)
54
+ lflags |= __GFP_NOLOCKDEP;
55
+
5856 return lflags;
5957 }
6058
6159 extern void *kmem_alloc(size_t, xfs_km_flags_t);
60
+extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
6261 extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
63
-extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
6462 static inline void kmem_free(const void *ptr)
6563 {
6664 kvfree(ptr);
....@@ -73,55 +71,19 @@
7371 return kmem_alloc(size, flags | KM_ZERO);
7472 }
7573
76
-static inline void *
77
-kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
78
-{
79
- return kmem_alloc_large(size, flags | KM_ZERO);
80
-}
81
-
8274 /*
8375 * Zone interfaces
8476 */
8577
86
-#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
87
-#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
88
-#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
89
-#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
90
-
9178 #define kmem_zone kmem_cache
9279 #define kmem_zone_t struct kmem_cache
9380
94
-static inline kmem_zone_t *
95
-kmem_zone_init(int size, char *zone_name)
81
+static inline struct page *
82
+kmem_to_page(void *addr)
9683 {
97
- return kmem_cache_create(zone_name, size, 0, 0, NULL);
98
-}
99
-
100
-static inline kmem_zone_t *
101
-kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
102
- void (*construct)(void *))
103
-{
104
- return kmem_cache_create(zone_name, size, 0, flags, construct);
105
-}
106
-
107
-static inline void
108
-kmem_zone_free(kmem_zone_t *zone, void *ptr)
109
-{
110
- kmem_cache_free(zone, ptr);
111
-}
112
-
113
-static inline void
114
-kmem_zone_destroy(kmem_zone_t *zone)
115
-{
116
- kmem_cache_destroy(zone);
117
-}
118
-
119
-extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
120
-
121
-static inline void *
122
-kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
123
-{
124
- return kmem_zone_alloc(zone, flags | KM_ZERO);
84
+ if (is_vmalloc_addr(addr))
85
+ return vmalloc_to_page(addr);
86
+ return virt_to_page(addr);
12587 }
12688
12789 #endif /* __XFS_SUPPORT_KMEM_H__ */