hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/fs/xfs/kmem.c
....@@ -3,15 +3,10 @@
33 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
44 * All Rights Reserved.
55 */
6
-#include <linux/mm.h>
7
-#include <linux/sched/mm.h>
8
-#include <linux/highmem.h>
9
-#include <linux/slab.h>
10
-#include <linux/swap.h>
11
-#include <linux/blkdev.h>
6
+#include "xfs.h"
127 #include <linux/backing-dev.h>
13
-#include "kmem.h"
148 #include "xfs_message.h"
9
+#include "xfs_trace.h"
1510
1611 void *
1712 kmem_alloc(size_t size, xfs_km_flags_t flags)
....@@ -20,9 +15,11 @@
2015 gfp_t lflags = kmem_flags_convert(flags);
2116 void *ptr;
2217
18
+ trace_kmem_alloc(size, flags, _RET_IP_);
19
+
2320 do {
2421 ptr = kmalloc(size, lflags);
25
- if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
22
+ if (ptr || (flags & KM_MAYFAIL))
2623 return ptr;
2724 if (!(++retries % 100))
2825 xfs_err(NULL,
....@@ -33,29 +30,25 @@
3330 } while (1);
3431 }
3532
36
-void *
37
-kmem_alloc_large(size_t size, xfs_km_flags_t flags)
33
+
34
+/*
35
+ * __vmalloc() will allocate data pages and auxiliary structures (e.g.
36
+ * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
37
+ * we need to tell memory reclaim that we are in such a context via
38
+ * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
39
+ * and potentially deadlocking.
40
+ */
41
+static void *
42
+__kmem_vmalloc(size_t size, xfs_km_flags_t flags)
3843 {
3944 unsigned nofs_flag = 0;
4045 void *ptr;
41
- gfp_t lflags;
46
+ gfp_t lflags = kmem_flags_convert(flags);
4247
43
- ptr = kmem_alloc(size, flags | KM_MAYFAIL);
44
- if (ptr)
45
- return ptr;
46
-
47
- /*
48
- * __vmalloc() will allocate data pages and auxillary structures (e.g.
49
- * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
50
- * here. Hence we need to tell memory reclaim that we are in such a
51
- * context via PF_MEMALLOC_NOFS to prevent memory reclaim re-entering
52
- * the filesystem here and potentially deadlocking.
53
- */
5448 if (flags & KM_NOFS)
5549 nofs_flag = memalloc_nofs_save();
5650
57
- lflags = kmem_flags_convert(flags);
58
- ptr = __vmalloc(size, lflags, PAGE_KERNEL);
51
+ ptr = __vmalloc(size, lflags);
5952
6053 if (flags & KM_NOFS)
6154 memalloc_nofs_restore(nofs_flag);
....@@ -63,42 +56,40 @@
6356 return ptr;
6457 }
6558
59
+/*
60
+ * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
61
+ * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
62
+ * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
63
+ * aligned region.
64
+ */
6665 void *
67
-kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
66
+kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
6867 {
69
- int retries = 0;
70
- gfp_t lflags = kmem_flags_convert(flags);
7168 void *ptr;
7269
73
- do {
74
- ptr = krealloc(old, newsize, lflags);
75
- if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
70
+ trace_kmem_alloc_io(size, flags, _RET_IP_);
71
+
72
+ if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
73
+ align_mask = PAGE_SIZE - 1;
74
+
75
+ ptr = kmem_alloc(size, flags | KM_MAYFAIL);
76
+ if (ptr) {
77
+ if (!((uintptr_t)ptr & align_mask))
7678 return ptr;
77
- if (!(++retries % 100))
78
- xfs_err(NULL,
79
- "%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
80
- current->comm, current->pid,
81
- newsize, __func__, lflags);
82
- congestion_wait(BLK_RW_ASYNC, HZ/50);
83
- } while (1);
79
+ kfree(ptr);
80
+ }
81
+ return __kmem_vmalloc(size, flags);
8482 }
8583
8684 void *
87
-kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
85
+kmem_alloc_large(size_t size, xfs_km_flags_t flags)
8886 {
89
- int retries = 0;
90
- gfp_t lflags = kmem_flags_convert(flags);
9187 void *ptr;
9288
93
- do {
94
- ptr = kmem_cache_alloc(zone, lflags);
95
- if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
96
- return ptr;
97
- if (!(++retries % 100))
98
- xfs_err(NULL,
99
- "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
100
- current->comm, current->pid,
101
- __func__, lflags);
102
- congestion_wait(BLK_RW_ASYNC, HZ/50);
103
- } while (1);
89
+ trace_kmem_alloc_large(size, flags, _RET_IP_);
90
+
91
+ ptr = kmem_alloc(size, flags | KM_MAYFAIL);
92
+ if (ptr)
93
+ return ptr;
94
+ return __kmem_vmalloc(size, flags);
10495 }