.. | .. |
---|
19 | 19 | #include <linux/memory.h> |
---|
20 | 20 | #include <linux/memory_hotplug.h> |
---|
21 | 21 | #include <linux/mm.h> |
---|
22 | | -#include <linux/mutex.h> |
---|
23 | 22 | #include <linux/stat.h> |
---|
24 | 23 | #include <linux/slab.h> |
---|
| 24 | +#include <linux/xarray.h> |
---|
25 | 25 | |
---|
26 | 26 | #include <linux/atomic.h> |
---|
27 | 27 | #include <linux/uaccess.h> |
---|
28 | 28 | |
---|
29 | | -static DEFINE_MUTEX(mem_sysfs_mutex); |
---|
30 | | - |
---|
31 | 29 | #define MEMORY_CLASS_NAME "memory" |
---|
| 30 | + |
---|
| 31 | +static const char *const online_type_to_str[] = { |
---|
| 32 | + [MMOP_OFFLINE] = "offline", |
---|
| 33 | + [MMOP_ONLINE] = "online", |
---|
| 34 | + [MMOP_ONLINE_KERNEL] = "online_kernel", |
---|
| 35 | + [MMOP_ONLINE_MOVABLE] = "online_movable", |
---|
| 36 | +}; |
---|
| 37 | + |
---|
| 38 | +int memhp_online_type_from_str(const char *str) |
---|
| 39 | +{ |
---|
| 40 | + int i; |
---|
| 41 | + |
---|
| 42 | + for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { |
---|
| 43 | + if (sysfs_streq(str, online_type_to_str[i])) |
---|
| 44 | + return i; |
---|
| 45 | + } |
---|
| 46 | + return -EINVAL; |
---|
| 47 | +} |
---|
32 | 48 | |
---|
33 | 49 | #define to_memory_block(dev) container_of(dev, struct memory_block, dev) |
---|
34 | 50 | |
---|
35 | 51 | static int sections_per_block; |
---|
36 | 52 | |
---|
37 | | -static inline int base_memory_block_id(int section_nr) |
---|
| 53 | +static inline unsigned long memory_block_id(unsigned long section_nr) |
---|
38 | 54 | { |
---|
39 | 55 | return section_nr / sections_per_block; |
---|
40 | 56 | } |
---|
41 | 57 | |
---|
42 | | -static inline int pfn_to_block_id(unsigned long pfn) |
---|
| 58 | +static inline unsigned long pfn_to_block_id(unsigned long pfn) |
---|
43 | 59 | { |
---|
44 | | - return base_memory_block_id(pfn_to_section_nr(pfn)); |
---|
| 60 | + return memory_block_id(pfn_to_section_nr(pfn)); |
---|
| 61 | +} |
---|
| 62 | + |
---|
| 63 | +static inline unsigned long phys_to_block_id(unsigned long phys) |
---|
| 64 | +{ |
---|
| 65 | + return pfn_to_block_id(PFN_DOWN(phys)); |
---|
45 | 66 | } |
---|
46 | 67 | |
---|
47 | 68 | static int memory_subsys_online(struct device *dev); |
---|
.. | .. |
---|
53 | 74 | .online = memory_subsys_online, |
---|
54 | 75 | .offline = memory_subsys_offline, |
---|
55 | 76 | }; |
---|
| 77 | + |
---|
| 78 | +/* |
---|
| 79 | + * Memory blocks are cached in a local radix tree to avoid |
---|
| 80 | + * a costly linear search for the corresponding device on |
---|
| 81 | + * the subsystem bus. |
---|
| 82 | + */ |
---|
| 83 | +static DEFINE_XARRAY(memory_blocks); |
---|
56 | 84 | |
---|
57 | 85 | static BLOCKING_NOTIFIER_HEAD(memory_chain); |
---|
58 | 86 | |
---|
.. | .. |
---|
68 | 96 | } |
---|
69 | 97 | EXPORT_SYMBOL(unregister_memory_notifier); |
---|
70 | 98 | |
---|
71 | | -static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); |
---|
72 | | - |
---|
73 | | -int register_memory_isolate_notifier(struct notifier_block *nb) |
---|
74 | | -{ |
---|
75 | | - return atomic_notifier_chain_register(&memory_isolate_chain, nb); |
---|
76 | | -} |
---|
77 | | -EXPORT_SYMBOL(register_memory_isolate_notifier); |
---|
78 | | - |
---|
79 | | -void unregister_memory_isolate_notifier(struct notifier_block *nb) |
---|
80 | | -{ |
---|
81 | | - atomic_notifier_chain_unregister(&memory_isolate_chain, nb); |
---|
82 | | -} |
---|
83 | | -EXPORT_SYMBOL(unregister_memory_isolate_notifier); |
---|
84 | | - |
---|
85 | 99 | static void memory_block_release(struct device *dev) |
---|
86 | 100 | { |
---|
87 | 101 | struct memory_block *mem = to_memory_block(dev); |
---|
.. | .. |
---|
93 | 107 | { |
---|
94 | 108 | return MIN_MEMORY_BLOCK_SIZE; |
---|
95 | 109 | } |
---|
96 | | - |
---|
97 | | -static unsigned long get_memory_block_size(void) |
---|
98 | | -{ |
---|
99 | | - unsigned long block_sz; |
---|
100 | | - |
---|
101 | | - block_sz = memory_block_size_bytes(); |
---|
102 | | - |
---|
103 | | - /* Validate blk_sz is a power of 2 and not less than section size */ |
---|
104 | | - if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) { |
---|
105 | | - WARN_ON(1); |
---|
106 | | - block_sz = MIN_MEMORY_BLOCK_SIZE; |
---|
107 | | - } |
---|
108 | | - |
---|
109 | | - return block_sz; |
---|
110 | | -} |
---|
| 110 | +EXPORT_SYMBOL_GPL(memory_block_size_bytes); |
---|
111 | 111 | |
---|
112 | 112 | /* |
---|
113 | | - * use this as the physical section index that this memsection |
---|
114 | | - * uses. |
---|
| 113 | + * Show the first physical section index (number) of this memory block. |
---|
115 | 114 | */ |
---|
116 | | - |
---|
117 | | -static ssize_t show_mem_start_phys_index(struct device *dev, |
---|
118 | | - struct device_attribute *attr, char *buf) |
---|
| 115 | +static ssize_t phys_index_show(struct device *dev, |
---|
| 116 | + struct device_attribute *attr, char *buf) |
---|
119 | 117 | { |
---|
120 | 118 | struct memory_block *mem = to_memory_block(dev); |
---|
121 | 119 | unsigned long phys_index; |
---|
122 | 120 | |
---|
123 | 121 | phys_index = mem->start_section_nr / sections_per_block; |
---|
124 | | - return sprintf(buf, "%08lx\n", phys_index); |
---|
| 122 | + |
---|
| 123 | + return sysfs_emit(buf, "%08lx\n", phys_index); |
---|
125 | 124 | } |
---|
126 | 125 | |
---|
127 | 126 | /* |
---|
128 | | - * Show whether the section of memory is likely to be hot-removable |
---|
| 127 | + * Legacy interface that we cannot remove. Always indicate "removable" |
---|
| 128 | + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. |
---|
129 | 129 | */ |
---|
130 | | -static ssize_t show_mem_removable(struct device *dev, |
---|
131 | | - struct device_attribute *attr, char *buf) |
---|
| 130 | +static ssize_t removable_show(struct device *dev, struct device_attribute *attr, |
---|
| 131 | + char *buf) |
---|
132 | 132 | { |
---|
133 | | - unsigned long i, pfn; |
---|
134 | | - int ret = 1; |
---|
135 | | - struct memory_block *mem = to_memory_block(dev); |
---|
136 | | - |
---|
137 | | - if (mem->state != MEM_ONLINE) |
---|
138 | | - goto out; |
---|
139 | | - |
---|
140 | | - for (i = 0; i < sections_per_block; i++) { |
---|
141 | | - if (!present_section_nr(mem->start_section_nr + i)) |
---|
142 | | - continue; |
---|
143 | | - pfn = section_nr_to_pfn(mem->start_section_nr + i); |
---|
144 | | - ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); |
---|
145 | | - } |
---|
146 | | - |
---|
147 | | -out: |
---|
148 | | - return sprintf(buf, "%d\n", ret); |
---|
| 133 | + return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); |
---|
149 | 134 | } |
---|
150 | 135 | |
---|
151 | 136 | /* |
---|
152 | 137 | * online, offline, going offline, etc. |
---|
153 | 138 | */ |
---|
154 | | -static ssize_t show_mem_state(struct device *dev, |
---|
155 | | - struct device_attribute *attr, char *buf) |
---|
| 139 | +static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
---|
| 140 | + char *buf) |
---|
156 | 141 | { |
---|
157 | 142 | struct memory_block *mem = to_memory_block(dev); |
---|
158 | | - ssize_t len = 0; |
---|
| 143 | + const char *output; |
---|
159 | 144 | |
---|
160 | 145 | /* |
---|
161 | 146 | * We can probably put these states in a nice little array |
---|
.. | .. |
---|
163 | 148 | */ |
---|
164 | 149 | switch (mem->state) { |
---|
165 | 150 | case MEM_ONLINE: |
---|
166 | | - len = sprintf(buf, "online\n"); |
---|
| 151 | + output = "online"; |
---|
167 | 152 | break; |
---|
168 | 153 | case MEM_OFFLINE: |
---|
169 | | - len = sprintf(buf, "offline\n"); |
---|
| 154 | + output = "offline"; |
---|
170 | 155 | break; |
---|
171 | 156 | case MEM_GOING_OFFLINE: |
---|
172 | | - len = sprintf(buf, "going-offline\n"); |
---|
| 157 | + output = "going-offline"; |
---|
173 | 158 | break; |
---|
174 | 159 | default: |
---|
175 | | - len = sprintf(buf, "ERROR-UNKNOWN-%ld\n", |
---|
176 | | - mem->state); |
---|
177 | 160 | WARN_ON(1); |
---|
178 | | - break; |
---|
| 161 | + return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state); |
---|
179 | 162 | } |
---|
180 | 163 | |
---|
181 | | - return len; |
---|
| 164 | + return sysfs_emit(buf, "%s\n", output); |
---|
182 | 165 | } |
---|
183 | 166 | |
---|
184 | 167 | int memory_notify(unsigned long val, void *v) |
---|
185 | 168 | { |
---|
186 | 169 | return blocking_notifier_call_chain(&memory_chain, val, v); |
---|
187 | | -} |
---|
188 | | - |
---|
189 | | -int memory_isolate_notify(unsigned long val, void *v) |
---|
190 | | -{ |
---|
191 | | - return atomic_notifier_call_chain(&memory_isolate_chain, val, v); |
---|
192 | | -} |
---|
193 | | - |
---|
194 | | -/* |
---|
195 | | - * The probe routines leave the pages uninitialized, just as the bootmem code |
---|
196 | | - * does. Make sure we do not access them, but instead use only information from |
---|
197 | | - * within sections. |
---|
198 | | - */ |
---|
199 | | -static bool pages_correctly_probed(unsigned long start_pfn) |
---|
200 | | -{ |
---|
201 | | - unsigned long section_nr = pfn_to_section_nr(start_pfn); |
---|
202 | | - unsigned long section_nr_end = section_nr + sections_per_block; |
---|
203 | | - unsigned long pfn = start_pfn; |
---|
204 | | - |
---|
205 | | - /* |
---|
206 | | - * memmap between sections is not contiguous except with |
---|
207 | | - * SPARSEMEM_VMEMMAP. We lookup the page once per section |
---|
208 | | - * and assume memmap is contiguous within each section |
---|
209 | | - */ |
---|
210 | | - for (; section_nr < section_nr_end; section_nr++) { |
---|
211 | | - if (WARN_ON_ONCE(!pfn_valid(pfn))) |
---|
212 | | - return false; |
---|
213 | | - |
---|
214 | | - if (!present_section_nr(section_nr)) { |
---|
215 | | - pr_warn("section %ld pfn[%lx, %lx) not present", |
---|
216 | | - section_nr, pfn, pfn + PAGES_PER_SECTION); |
---|
217 | | - return false; |
---|
218 | | - } else if (!valid_section_nr(section_nr)) { |
---|
219 | | - pr_warn("section %ld pfn[%lx, %lx) no valid memmap", |
---|
220 | | - section_nr, pfn, pfn + PAGES_PER_SECTION); |
---|
221 | | - return false; |
---|
222 | | - } else if (online_section_nr(section_nr)) { |
---|
223 | | - pr_warn("section %ld pfn[%lx, %lx) is already online", |
---|
224 | | - section_nr, pfn, pfn + PAGES_PER_SECTION); |
---|
225 | | - return false; |
---|
226 | | - } |
---|
227 | | - pfn += PAGES_PER_SECTION; |
---|
228 | | - } |
---|
229 | | - |
---|
230 | | - return true; |
---|
231 | 170 | } |
---|
232 | 171 | |
---|
233 | 172 | /* |
---|
.. | .. |
---|
236 | 175 | */ |
---|
237 | 176 | static int |
---|
238 | 177 | memory_block_action(unsigned long start_section_nr, unsigned long action, |
---|
239 | | - int online_type) |
---|
| 178 | + int online_type, int nid) |
---|
240 | 179 | { |
---|
241 | 180 | unsigned long start_pfn; |
---|
242 | 181 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
---|
.. | .. |
---|
246 | 185 | |
---|
247 | 186 | switch (action) { |
---|
248 | 187 | case MEM_ONLINE: |
---|
249 | | - if (!pages_correctly_probed(start_pfn)) |
---|
250 | | - return -EBUSY; |
---|
251 | | - |
---|
252 | | - ret = online_pages(start_pfn, nr_pages, online_type); |
---|
| 188 | + ret = online_pages(start_pfn, nr_pages, online_type, nid); |
---|
253 | 189 | break; |
---|
254 | 190 | case MEM_OFFLINE: |
---|
255 | 191 | ret = offline_pages(start_pfn, nr_pages); |
---|
.. | .. |
---|
275 | 211 | mem->state = MEM_GOING_OFFLINE; |
---|
276 | 212 | |
---|
277 | 213 | ret = memory_block_action(mem->start_section_nr, to_state, |
---|
278 | | - mem->online_type); |
---|
| 214 | + mem->online_type, mem->nid); |
---|
279 | 215 | |
---|
280 | 216 | mem->state = ret ? from_state_req : to_state; |
---|
281 | 217 | |
---|
.. | .. |
---|
292 | 228 | return 0; |
---|
293 | 229 | |
---|
294 | 230 | /* |
---|
295 | | - * If we are called from store_mem_state(), online_type will be |
---|
296 | | - * set >= 0 Otherwise we were called from the device online |
---|
297 | | - * attribute and need to set the online_type. |
---|
| 231 | + * When called via device_online() without configuring the online_type, |
---|
| 232 | + * we want to default to MMOP_ONLINE. |
---|
298 | 233 | */ |
---|
299 | | - if (mem->online_type < 0) |
---|
300 | | - mem->online_type = MMOP_ONLINE_KEEP; |
---|
| 234 | + if (mem->online_type == MMOP_OFFLINE) |
---|
| 235 | + mem->online_type = MMOP_ONLINE; |
---|
301 | 236 | |
---|
302 | 237 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
---|
303 | | - |
---|
304 | | - /* clear online_type */ |
---|
305 | | - mem->online_type = -1; |
---|
| 238 | + mem->online_type = MMOP_OFFLINE; |
---|
306 | 239 | |
---|
307 | 240 | return ret; |
---|
308 | 241 | } |
---|
.. | .. |
---|
314 | 247 | if (mem->state == MEM_OFFLINE) |
---|
315 | 248 | return 0; |
---|
316 | 249 | |
---|
317 | | - /* Can't offline block with non-present sections */ |
---|
318 | | - if (mem->section_count != sections_per_block) |
---|
319 | | - return -EINVAL; |
---|
320 | | - |
---|
321 | 250 | return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
---|
322 | 251 | } |
---|
323 | 252 | |
---|
324 | | -static ssize_t |
---|
325 | | -store_mem_state(struct device *dev, |
---|
326 | | - struct device_attribute *attr, const char *buf, size_t count) |
---|
| 253 | +static ssize_t state_store(struct device *dev, struct device_attribute *attr, |
---|
| 254 | + const char *buf, size_t count) |
---|
327 | 255 | { |
---|
| 256 | + const int online_type = memhp_online_type_from_str(buf); |
---|
328 | 257 | struct memory_block *mem = to_memory_block(dev); |
---|
329 | | - int ret, online_type; |
---|
| 258 | + int ret; |
---|
| 259 | + |
---|
| 260 | + if (online_type < 0) |
---|
| 261 | + return -EINVAL; |
---|
330 | 262 | |
---|
331 | 263 | ret = lock_device_hotplug_sysfs(); |
---|
332 | 264 | if (ret) |
---|
333 | 265 | return ret; |
---|
334 | 266 | |
---|
335 | | - if (sysfs_streq(buf, "online_kernel")) |
---|
336 | | - online_type = MMOP_ONLINE_KERNEL; |
---|
337 | | - else if (sysfs_streq(buf, "online_movable")) |
---|
338 | | - online_type = MMOP_ONLINE_MOVABLE; |
---|
339 | | - else if (sysfs_streq(buf, "online")) |
---|
340 | | - online_type = MMOP_ONLINE_KEEP; |
---|
341 | | - else if (sysfs_streq(buf, "offline")) |
---|
342 | | - online_type = MMOP_OFFLINE; |
---|
343 | | - else { |
---|
344 | | - ret = -EINVAL; |
---|
345 | | - goto err; |
---|
346 | | - } |
---|
347 | | - |
---|
348 | 267 | switch (online_type) { |
---|
349 | 268 | case MMOP_ONLINE_KERNEL: |
---|
350 | 269 | case MMOP_ONLINE_MOVABLE: |
---|
351 | | - case MMOP_ONLINE_KEEP: |
---|
| 270 | + case MMOP_ONLINE: |
---|
352 | 271 | /* mem->online_type is protected by device_hotplug_lock */ |
---|
353 | 272 | mem->online_type = online_type; |
---|
354 | 273 | ret = device_online(&mem->dev); |
---|
.. | .. |
---|
360 | 279 | ret = -EINVAL; /* should never happen */ |
---|
361 | 280 | } |
---|
362 | 281 | |
---|
363 | | -err: |
---|
364 | 282 | unlock_device_hotplug(); |
---|
365 | 283 | |
---|
366 | 284 | if (ret < 0) |
---|
.. | .. |
---|
372 | 290 | } |
---|
373 | 291 | |
---|
374 | 292 | /* |
---|
375 | | - * phys_device is a bad name for this. What I really want |
---|
376 | | - * is a way to differentiate between memory ranges that |
---|
377 | | - * are part of physical devices that constitute |
---|
378 | | - * a complete removable unit or fru. |
---|
379 | | - * i.e. do these ranges belong to the same physical device, |
---|
380 | | - * s.t. if I offline all of these sections I can then |
---|
381 | | - * remove the physical device? |
---|
| 293 | + * Legacy interface that we cannot remove: s390x exposes the storage increment |
---|
| 294 | + * covered by a memory block, allowing for identifying which memory blocks |
---|
| 295 | + * comprise a storage increment. Since a memory block spans complete |
---|
| 296 | + * storage increments nowadays, this interface is basically unused. Other |
---|
| 297 | + * archs never exposed != 0. |
---|
382 | 298 | */ |
---|
383 | | -static ssize_t show_phys_device(struct device *dev, |
---|
| 299 | +static ssize_t phys_device_show(struct device *dev, |
---|
384 | 300 | struct device_attribute *attr, char *buf) |
---|
385 | 301 | { |
---|
386 | 302 | struct memory_block *mem = to_memory_block(dev); |
---|
387 | | - return sprintf(buf, "%d\n", mem->phys_device); |
---|
| 303 | + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
---|
| 304 | + |
---|
| 305 | + return sysfs_emit(buf, "%d\n", |
---|
| 306 | + arch_get_memory_phys_device(start_pfn)); |
---|
388 | 307 | } |
---|
389 | 308 | |
---|
390 | 309 | #ifdef CONFIG_MEMORY_HOTREMOVE |
---|
391 | | -static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, |
---|
392 | | - unsigned long nr_pages, int online_type, |
---|
393 | | - struct zone *default_zone) |
---|
| 310 | +static int print_allowed_zone(char *buf, int len, int nid, |
---|
| 311 | + unsigned long start_pfn, unsigned long nr_pages, |
---|
| 312 | + int online_type, struct zone *default_zone) |
---|
394 | 313 | { |
---|
395 | 314 | struct zone *zone; |
---|
396 | 315 | |
---|
397 | 316 | zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); |
---|
398 | | - if (zone != default_zone) { |
---|
399 | | - strcat(buf, " "); |
---|
400 | | - strcat(buf, zone->name); |
---|
401 | | - } |
---|
| 317 | + if (zone == default_zone) |
---|
| 318 | + return 0; |
---|
| 319 | + |
---|
| 320 | + return sysfs_emit_at(buf, len, " %s", zone->name); |
---|
402 | 321 | } |
---|
403 | 322 | |
---|
404 | | -static ssize_t show_valid_zones(struct device *dev, |
---|
| 323 | +static ssize_t valid_zones_show(struct device *dev, |
---|
405 | 324 | struct device_attribute *attr, char *buf) |
---|
406 | 325 | { |
---|
407 | 326 | struct memory_block *mem = to_memory_block(dev); |
---|
408 | 327 | unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
---|
409 | 328 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
---|
410 | | - unsigned long valid_start_pfn, valid_end_pfn; |
---|
411 | 329 | struct zone *default_zone; |
---|
| 330 | + int len = 0; |
---|
412 | 331 | int nid; |
---|
413 | 332 | |
---|
414 | 333 | /* |
---|
.. | .. |
---|
420 | 339 | * The block contains more than one zone can not be offlined. |
---|
421 | 340 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 |
---|
422 | 341 | */ |
---|
423 | | - if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, |
---|
424 | | - &valid_start_pfn, &valid_end_pfn)) |
---|
425 | | - return sprintf(buf, "none\n"); |
---|
426 | | - start_pfn = valid_start_pfn; |
---|
427 | | - strcat(buf, page_zone(pfn_to_page(start_pfn))->name); |
---|
| 342 | + default_zone = test_pages_in_a_zone(start_pfn, |
---|
| 343 | + start_pfn + nr_pages); |
---|
| 344 | + if (!default_zone) |
---|
| 345 | + return sysfs_emit(buf, "%s\n", "none"); |
---|
| 346 | + len += sysfs_emit_at(buf, len, "%s", default_zone->name); |
---|
428 | 347 | goto out; |
---|
429 | 348 | } |
---|
430 | 349 | |
---|
431 | 350 | nid = mem->nid; |
---|
432 | | - default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); |
---|
433 | | - strcat(buf, default_zone->name); |
---|
| 351 | + default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, |
---|
| 352 | + nr_pages); |
---|
434 | 353 | |
---|
435 | | - print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, |
---|
436 | | - default_zone); |
---|
437 | | - print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, |
---|
438 | | - default_zone); |
---|
| 354 | + len += sysfs_emit_at(buf, len, "%s", default_zone->name); |
---|
| 355 | + len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, |
---|
| 356 | + MMOP_ONLINE_KERNEL, default_zone); |
---|
| 357 | + len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, |
---|
| 358 | + MMOP_ONLINE_MOVABLE, default_zone); |
---|
439 | 359 | out: |
---|
440 | | - strcat(buf, "\n"); |
---|
441 | | - |
---|
442 | | - return strlen(buf); |
---|
| 360 | + len += sysfs_emit_at(buf, len, "\n"); |
---|
| 361 | + return len; |
---|
443 | 362 | } |
---|
444 | | -static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); |
---|
| 363 | +static DEVICE_ATTR_RO(valid_zones); |
---|
445 | 364 | #endif |
---|
446 | 365 | |
---|
447 | | -static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); |
---|
448 | | -static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); |
---|
449 | | -static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); |
---|
450 | | -static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); |
---|
| 366 | +static DEVICE_ATTR_RO(phys_index); |
---|
| 367 | +static DEVICE_ATTR_RW(state); |
---|
| 368 | +static DEVICE_ATTR_RO(phys_device); |
---|
| 369 | +static DEVICE_ATTR_RO(removable); |
---|
451 | 370 | |
---|
452 | 371 | /* |
---|
453 | | - * Block size attribute stuff |
---|
| 372 | + * Show the memory block size (shared by all memory blocks). |
---|
454 | 373 | */ |
---|
455 | | -static ssize_t |
---|
456 | | -print_block_size(struct device *dev, struct device_attribute *attr, |
---|
457 | | - char *buf) |
---|
| 374 | +static ssize_t block_size_bytes_show(struct device *dev, |
---|
| 375 | + struct device_attribute *attr, char *buf) |
---|
458 | 376 | { |
---|
459 | | - return sprintf(buf, "%lx\n", get_memory_block_size()); |
---|
| 377 | + return sysfs_emit(buf, "%lx\n", memory_block_size_bytes()); |
---|
460 | 378 | } |
---|
461 | 379 | |
---|
462 | | -static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
---|
| 380 | +static DEVICE_ATTR_RO(block_size_bytes); |
---|
463 | 381 | |
---|
464 | 382 | /* |
---|
465 | 383 | * Memory auto online policy. |
---|
466 | 384 | */ |
---|
467 | 385 | |
---|
468 | | -static ssize_t |
---|
469 | | -show_auto_online_blocks(struct device *dev, struct device_attribute *attr, |
---|
470 | | - char *buf) |
---|
| 386 | +static ssize_t auto_online_blocks_show(struct device *dev, |
---|
| 387 | + struct device_attribute *attr, char *buf) |
---|
471 | 388 | { |
---|
472 | | - if (memhp_auto_online) |
---|
473 | | - return sprintf(buf, "online\n"); |
---|
474 | | - else |
---|
475 | | - return sprintf(buf, "offline\n"); |
---|
| 389 | + return sysfs_emit(buf, "%s\n", |
---|
| 390 | + online_type_to_str[memhp_default_online_type]); |
---|
476 | 391 | } |
---|
477 | 392 | |
---|
478 | | -static ssize_t |
---|
479 | | -store_auto_online_blocks(struct device *dev, struct device_attribute *attr, |
---|
480 | | - const char *buf, size_t count) |
---|
| 393 | +static ssize_t auto_online_blocks_store(struct device *dev, |
---|
| 394 | + struct device_attribute *attr, |
---|
| 395 | + const char *buf, size_t count) |
---|
481 | 396 | { |
---|
482 | | - if (sysfs_streq(buf, "online")) |
---|
483 | | - memhp_auto_online = true; |
---|
484 | | - else if (sysfs_streq(buf, "offline")) |
---|
485 | | - memhp_auto_online = false; |
---|
486 | | - else |
---|
| 397 | + const int online_type = memhp_online_type_from_str(buf); |
---|
| 398 | + |
---|
| 399 | + if (online_type < 0) |
---|
487 | 400 | return -EINVAL; |
---|
488 | 401 | |
---|
| 402 | + memhp_default_online_type = online_type; |
---|
489 | 403 | return count; |
---|
490 | 404 | } |
---|
491 | 405 | |
---|
492 | | -static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks, |
---|
493 | | - store_auto_online_blocks); |
---|
| 406 | +static DEVICE_ATTR_RW(auto_online_blocks); |
---|
494 | 407 | |
---|
495 | 408 | /* |
---|
496 | 409 | * Some architectures will have custom drivers to do this, and |
---|
.. | .. |
---|
499 | 412 | * and will require this interface. |
---|
500 | 413 | */ |
---|
501 | 414 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
---|
502 | | -static ssize_t |
---|
503 | | -memory_probe_store(struct device *dev, struct device_attribute *attr, |
---|
504 | | - const char *buf, size_t count) |
---|
| 415 | +static ssize_t probe_store(struct device *dev, struct device_attribute *attr, |
---|
| 416 | + const char *buf, size_t count) |
---|
505 | 417 | { |
---|
506 | 418 | u64 phys_addr; |
---|
507 | 419 | int nid, ret; |
---|
.. | .. |
---|
520 | 432 | |
---|
521 | 433 | nid = memory_add_physaddr_to_nid(phys_addr); |
---|
522 | 434 | ret = __add_memory(nid, phys_addr, |
---|
523 | | - MIN_MEMORY_BLOCK_SIZE * sections_per_block); |
---|
| 435 | + MIN_MEMORY_BLOCK_SIZE * sections_per_block, |
---|
| 436 | + MHP_NONE); |
---|
524 | 437 | |
---|
525 | 438 | if (ret) |
---|
526 | 439 | goto out; |
---|
.. | .. |
---|
531 | 444 | return ret; |
---|
532 | 445 | } |
---|
533 | 446 | |
---|
534 | | -static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); |
---|
| 447 | +static DEVICE_ATTR_WO(probe); |
---|
535 | 448 | #endif |
---|
536 | 449 | |
---|
537 | 450 | #ifdef CONFIG_MEMORY_FAILURE |
---|
.. | .. |
---|
540 | 453 | */ |
---|
541 | 454 | |
---|
542 | 455 | /* Soft offline a page */ |
---|
543 | | -static ssize_t |
---|
544 | | -store_soft_offline_page(struct device *dev, |
---|
545 | | - struct device_attribute *attr, |
---|
546 | | - const char *buf, size_t count) |
---|
| 456 | +static ssize_t soft_offline_page_store(struct device *dev, |
---|
| 457 | + struct device_attribute *attr, |
---|
| 458 | + const char *buf, size_t count) |
---|
547 | 459 | { |
---|
548 | 460 | int ret; |
---|
549 | 461 | u64 pfn; |
---|
.. | .. |
---|
552 | 464 | if (kstrtoull(buf, 0, &pfn) < 0) |
---|
553 | 465 | return -EINVAL; |
---|
554 | 466 | pfn >>= PAGE_SHIFT; |
---|
555 | | - if (!pfn_valid(pfn)) |
---|
556 | | - return -ENXIO; |
---|
557 | | - /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ |
---|
558 | | - if (!pfn_to_online_page(pfn)) |
---|
559 | | - return -EIO; |
---|
560 | | - ret = soft_offline_page(pfn_to_page(pfn), 0); |
---|
| 467 | + ret = soft_offline_page(pfn, 0); |
---|
561 | 468 | return ret == 0 ? count : ret; |
---|
562 | 469 | } |
---|
563 | 470 | |
---|
564 | 471 | /* Forcibly offline a page, including killing processes. */ |
---|
565 | | -static ssize_t |
---|
566 | | -store_hard_offline_page(struct device *dev, |
---|
567 | | - struct device_attribute *attr, |
---|
568 | | - const char *buf, size_t count) |
---|
| 472 | +static ssize_t hard_offline_page_store(struct device *dev, |
---|
| 473 | + struct device_attribute *attr, |
---|
| 474 | + const char *buf, size_t count) |
---|
569 | 475 | { |
---|
570 | 476 | int ret; |
---|
571 | 477 | u64 pfn; |
---|
.. | .. |
---|
578 | 484 | return ret ? ret : count; |
---|
579 | 485 | } |
---|
580 | 486 | |
---|
581 | | -static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page); |
---|
582 | | -static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page); |
---|
| 487 | +static DEVICE_ATTR_WO(soft_offline_page); |
---|
| 488 | +static DEVICE_ATTR_WO(hard_offline_page); |
---|
583 | 489 | #endif |
---|
584 | 490 | |
---|
585 | | -/* |
---|
586 | | - * Note that phys_device is optional. It is here to allow for |
---|
587 | | - * differentiation between which *physical* devices each |
---|
588 | | - * section belongs to... |
---|
589 | | - */ |
---|
| 491 | +/* See phys_device_show(). */ |
---|
590 | 492 | int __weak arch_get_memory_phys_device(unsigned long start_pfn) |
---|
591 | 493 | { |
---|
592 | 494 | return 0; |
---|
593 | 495 | } |
---|
594 | 496 | |
---|
595 | 497 | /* |
---|
596 | | - * A reference for the returned object is held and the reference for the |
---|
597 | | - * hinted object is released. |
---|
| 498 | + * A reference for the returned memory block device is acquired. |
---|
| 499 | + * |
---|
| 500 | + * Called under device_hotplug_lock. |
---|
598 | 501 | */ |
---|
599 | | -static struct memory_block *find_memory_block_by_id(int block_id, |
---|
600 | | - struct memory_block *hint) |
---|
| 502 | +static struct memory_block *find_memory_block_by_id(unsigned long block_id) |
---|
601 | 503 | { |
---|
602 | | - struct device *hintdev = hint ? &hint->dev : NULL; |
---|
603 | | - struct device *dev; |
---|
| 504 | + struct memory_block *mem; |
---|
604 | 505 | |
---|
605 | | - dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev); |
---|
606 | | - if (hint) |
---|
607 | | - put_device(&hint->dev); |
---|
608 | | - if (!dev) |
---|
609 | | - return NULL; |
---|
610 | | - return to_memory_block(dev); |
---|
611 | | -} |
---|
612 | | - |
---|
613 | | -struct memory_block *find_memory_block_hinted(struct mem_section *section, |
---|
614 | | - struct memory_block *hint) |
---|
615 | | -{ |
---|
616 | | - int block_id = base_memory_block_id(__section_nr(section)); |
---|
617 | | - |
---|
618 | | - return find_memory_block_by_id(block_id, hint); |
---|
| 506 | + mem = xa_load(&memory_blocks, block_id); |
---|
| 507 | + if (mem) |
---|
| 508 | + get_device(&mem->dev); |
---|
| 509 | + return mem; |
---|
619 | 510 | } |
---|
620 | 511 | |
---|
621 | 512 | /* |
---|
622 | | - * For now, we have a linear search to go find the appropriate |
---|
623 | | - * memory_block corresponding to a particular phys_index. If |
---|
624 | | - * this gets to be a real problem, we can always use a radix |
---|
625 | | - * tree or something here. |
---|
626 | | - * |
---|
627 | | - * This could be made generic for all device subsystems. |
---|
| 513 | + * Called under device_hotplug_lock. |
---|
628 | 514 | */ |
---|
629 | 515 | struct memory_block *find_memory_block(struct mem_section *section) |
---|
630 | 516 | { |
---|
631 | | - return find_memory_block_hinted(section, NULL); |
---|
| 517 | + unsigned long block_id = memory_block_id(__section_nr(section)); |
---|
| 518 | + |
---|
| 519 | + return find_memory_block_by_id(block_id); |
---|
632 | 520 | } |
---|
633 | 521 | |
---|
634 | 522 | static struct attribute *memory_memblk_attrs[] = { |
---|
.. | .. |
---|
666 | 554 | memory->dev.offline = memory->state == MEM_OFFLINE; |
---|
667 | 555 | |
---|
668 | 556 | ret = device_register(&memory->dev); |
---|
669 | | - if (ret) |
---|
| 557 | + if (ret) { |
---|
670 | 558 | put_device(&memory->dev); |
---|
| 559 | + return ret; |
---|
| 560 | + } |
---|
| 561 | + ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, |
---|
| 562 | + GFP_KERNEL)); |
---|
| 563 | + if (ret) |
---|
| 564 | + device_unregister(&memory->dev); |
---|
671 | 565 | |
---|
672 | 566 | return ret; |
---|
673 | 567 | } |
---|
674 | 568 | |
---|
675 | | -static int init_memory_block(struct memory_block **memory, int block_id, |
---|
676 | | - unsigned long state) |
---|
| 569 | +static int init_memory_block(unsigned long block_id, unsigned long state) |
---|
677 | 570 | { |
---|
678 | 571 | struct memory_block *mem; |
---|
679 | | - unsigned long start_pfn; |
---|
680 | 572 | int ret = 0; |
---|
681 | 573 | |
---|
682 | | - mem = find_memory_block_by_id(block_id, NULL); |
---|
| 574 | + mem = find_memory_block_by_id(block_id); |
---|
683 | 575 | if (mem) { |
---|
684 | 576 | put_device(&mem->dev); |
---|
685 | 577 | return -EEXIST; |
---|
.. | .. |
---|
689 | 581 | return -ENOMEM; |
---|
690 | 582 | |
---|
691 | 583 | mem->start_section_nr = block_id * sections_per_block; |
---|
692 | | - mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; |
---|
693 | 584 | mem->state = state; |
---|
694 | | - start_pfn = section_nr_to_pfn(mem->start_section_nr); |
---|
695 | | - mem->phys_device = arch_get_memory_phys_device(start_pfn); |
---|
696 | 585 | mem->nid = NUMA_NO_NODE; |
---|
697 | 586 | |
---|
698 | 587 | ret = register_memory(mem); |
---|
699 | 588 | |
---|
700 | | - *memory = mem; |
---|
701 | 589 | return ret; |
---|
702 | 590 | } |
---|
703 | 591 | |
---|
704 | | -static int add_memory_block(int base_section_nr) |
---|
| 592 | +static int add_memory_block(unsigned long base_section_nr) |
---|
705 | 593 | { |
---|
706 | | - struct memory_block *mem; |
---|
707 | | - int i, ret, section_count = 0; |
---|
| 594 | + int section_count = 0; |
---|
| 595 | + unsigned long nr; |
---|
708 | 596 | |
---|
709 | | - for (i = base_section_nr; |
---|
710 | | - i < base_section_nr + sections_per_block; |
---|
711 | | - i++) |
---|
712 | | - if (present_section_nr(i)) |
---|
| 597 | + for (nr = base_section_nr; nr < base_section_nr + sections_per_block; |
---|
| 598 | + nr++) |
---|
| 599 | + if (present_section_nr(nr)) |
---|
713 | 600 | section_count++; |
---|
714 | 601 | |
---|
715 | 602 | if (section_count == 0) |
---|
716 | 603 | return 0; |
---|
717 | | - ret = init_memory_block(&mem, base_memory_block_id(base_section_nr), |
---|
718 | | - MEM_ONLINE); |
---|
719 | | - if (ret) |
---|
720 | | - return ret; |
---|
721 | | - mem->section_count = section_count; |
---|
722 | | - return 0; |
---|
| 604 | + return init_memory_block(memory_block_id(base_section_nr), |
---|
| 605 | + MEM_ONLINE); |
---|
723 | 606 | } |
---|
724 | 607 | |
---|
725 | 608 | static void unregister_memory(struct memory_block *memory) |
---|
726 | 609 | { |
---|
727 | 610 | if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) |
---|
728 | 611 | return; |
---|
| 612 | + |
---|
| 613 | + WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL); |
---|
729 | 614 | |
---|
730 | 615 | /* drop the ref. we got via find_memory_block() */ |
---|
731 | 616 | put_device(&memory->dev); |
---|
.. | .. |
---|
736 | 621 | * Create memory block devices for the given memory area. Start and size |
---|
737 | 622 | * have to be aligned to memory block granularity. Memory block devices |
---|
738 | 623 | * will be initialized as offline. |
---|
| 624 | + * |
---|
| 625 | + * Called under device_hotplug_lock. |
---|
739 | 626 | */ |
---|
740 | 627 | int create_memory_block_devices(unsigned long start, unsigned long size) |
---|
741 | 628 | { |
---|
742 | | - const int start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
---|
743 | | - int end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
---|
| 629 | + const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
---|
| 630 | + unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
---|
744 | 631 | struct memory_block *mem; |
---|
745 | 632 | unsigned long block_id; |
---|
746 | 633 | int ret = 0; |
---|
.. | .. |
---|
749 | 636 | !IS_ALIGNED(size, memory_block_size_bytes()))) |
---|
750 | 637 | return -EINVAL; |
---|
751 | 638 | |
---|
752 | | - mutex_lock(&mem_sysfs_mutex); |
---|
753 | 639 | for (block_id = start_block_id; block_id != end_block_id; block_id++) { |
---|
754 | | - ret = init_memory_block(&mem, block_id, MEM_OFFLINE); |
---|
| 640 | + ret = init_memory_block(block_id, MEM_OFFLINE); |
---|
755 | 641 | if (ret) |
---|
756 | 642 | break; |
---|
757 | | - mem->section_count = sections_per_block; |
---|
758 | 643 | } |
---|
759 | 644 | if (ret) { |
---|
760 | 645 | end_block_id = block_id; |
---|
761 | 646 | for (block_id = start_block_id; block_id != end_block_id; |
---|
762 | 647 | block_id++) { |
---|
763 | | - mem = find_memory_block_by_id(block_id, NULL); |
---|
764 | | - mem->section_count = 0; |
---|
| 648 | + mem = find_memory_block_by_id(block_id); |
---|
| 649 | + if (WARN_ON_ONCE(!mem)) |
---|
| 650 | + continue; |
---|
765 | 651 | unregister_memory(mem); |
---|
766 | 652 | } |
---|
767 | 653 | } |
---|
768 | | - mutex_unlock(&mem_sysfs_mutex); |
---|
769 | 654 | return ret; |
---|
770 | 655 | } |
---|
771 | 656 | |
---|
.. | .. |
---|
773 | 658 | * Remove memory block devices for the given memory area. Start and size |
---|
774 | 659 | * have to be aligned to memory block granularity. Memory block devices |
---|
775 | 660 | * have to be offline. |
---|
| 661 | + * |
---|
| 662 | + * Called under device_hotplug_lock. |
---|
776 | 663 | */ |
---|
777 | 664 | void remove_memory_block_devices(unsigned long start, unsigned long size) |
---|
778 | 665 | { |
---|
779 | | - const int start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
---|
780 | | - const int end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
---|
| 666 | + const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
---|
| 667 | + const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
---|
781 | 668 | struct memory_block *mem; |
---|
782 | | - int block_id; |
---|
| 669 | + unsigned long block_id; |
---|
783 | 670 | |
---|
784 | 671 | if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || |
---|
785 | 672 | !IS_ALIGNED(size, memory_block_size_bytes()))) |
---|
786 | 673 | return; |
---|
787 | 674 | |
---|
788 | | - mutex_lock(&mem_sysfs_mutex); |
---|
789 | 675 | for (block_id = start_block_id; block_id != end_block_id; block_id++) { |
---|
790 | | - mem = find_memory_block_by_id(block_id, NULL); |
---|
| 676 | + mem = find_memory_block_by_id(block_id); |
---|
791 | 677 | if (WARN_ON_ONCE(!mem)) |
---|
792 | 678 | continue; |
---|
793 | | - mem->section_count = 0; |
---|
794 | 679 | unregister_memory_block_under_nodes(mem); |
---|
795 | 680 | unregister_memory(mem); |
---|
796 | 681 | } |
---|
797 | | - mutex_unlock(&mem_sysfs_mutex); |
---|
798 | 682 | } |
---|
799 | 683 | |
---|
800 | 684 | /* return true if the memory block is offlined, otherwise, return false */ |
---|
.. | .. |
---|
828 | 712 | }; |
---|
829 | 713 | |
---|
830 | 714 | /* |
---|
831 | | - * Initialize the sysfs support for memory devices... |
---|
| 715 | + * Initialize the sysfs support for memory devices. At the time this function |
---|
| 716 | + * is called, we cannot have concurrent creation/deletion of memory block |
---|
| 717 | + * devices, the device_hotplug_lock is not needed. |
---|
832 | 718 | */ |
---|
833 | | -int __init memory_dev_init(void) |
---|
| 719 | +void __init memory_dev_init(void) |
---|
834 | 720 | { |
---|
835 | | - unsigned int i; |
---|
836 | 721 | int ret; |
---|
837 | | - int err; |
---|
838 | | - unsigned long block_sz; |
---|
| 722 | + unsigned long block_sz, nr; |
---|
| 723 | + |
---|
| 724 | + /* Validate the configured memory block size */ |
---|
| 725 | + block_sz = memory_block_size_bytes(); |
---|
| 726 | + if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE) |
---|
| 727 | + panic("Memory block size not suitable: 0x%lx\n", block_sz); |
---|
| 728 | + sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; |
---|
839 | 729 | |
---|
840 | 730 | ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); |
---|
841 | 731 | if (ret) |
---|
842 | | - goto out; |
---|
843 | | - |
---|
844 | | - block_sz = get_memory_block_size(); |
---|
845 | | - sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; |
---|
| 732 | + panic("%s() failed to register subsystem: %d\n", __func__, ret); |
---|
846 | 733 | |
---|
847 | 734 | /* |
---|
848 | 735 | * Create entries for memory sections that were found |
---|
849 | 736 | * during boot and have been initialized |
---|
850 | 737 | */ |
---|
851 | | - mutex_lock(&mem_sysfs_mutex); |
---|
852 | | - for (i = 0; i <= __highest_present_section_nr; |
---|
853 | | - i += sections_per_block) { |
---|
854 | | - err = add_memory_block(i); |
---|
855 | | - if (!ret) |
---|
856 | | - ret = err; |
---|
| 738 | + for (nr = 0; nr <= __highest_present_section_nr; |
---|
| 739 | + nr += sections_per_block) { |
---|
| 740 | + ret = add_memory_block(nr); |
---|
| 741 | + if (ret) |
---|
| 742 | + panic("%s() failed to add memory block: %d\n", __func__, |
---|
| 743 | + ret); |
---|
857 | 744 | } |
---|
858 | | - mutex_unlock(&mem_sysfs_mutex); |
---|
| 745 | +} |
---|
859 | 746 | |
---|
860 | | -out: |
---|
861 | | - if (ret) |
---|
862 | | - printk(KERN_ERR "%s() failed: %d\n", __func__, ret); |
---|
| 747 | +/** |
---|
| 748 | + * walk_memory_blocks - walk through all present memory blocks overlapped |
---|
| 749 | + * by the range [start, start + size) |
---|
| 750 | + * |
---|
| 751 | + * @start: start address of the memory range |
---|
| 752 | + * @size: size of the memory range |
---|
| 753 | + * @arg: argument passed to func |
---|
| 754 | + * @func: callback for each memory section walked |
---|
| 755 | + * |
---|
| 756 | + * This function walks through all present memory blocks overlapped by the |
---|
| 757 | + * range [start, start + size), calling func on each memory block. |
---|
| 758 | + * |
---|
| 759 | + * In case func() returns an error, walking is aborted and the error is |
---|
| 760 | + * returned. |
---|
| 761 | + * |
---|
| 762 | + * Called under device_hotplug_lock. |
---|
| 763 | + */ |
---|
| 764 | +int walk_memory_blocks(unsigned long start, unsigned long size, |
---|
| 765 | + void *arg, walk_memory_blocks_func_t func) |
---|
| 766 | +{ |
---|
| 767 | + const unsigned long start_block_id = phys_to_block_id(start); |
---|
| 768 | + const unsigned long end_block_id = phys_to_block_id(start + size - 1); |
---|
| 769 | + struct memory_block *mem; |
---|
| 770 | + unsigned long block_id; |
---|
| 771 | + int ret = 0; |
---|
| 772 | + |
---|
| 773 | + if (!size) |
---|
| 774 | + return 0; |
---|
| 775 | + |
---|
| 776 | + for (block_id = start_block_id; block_id <= end_block_id; block_id++) { |
---|
| 777 | + mem = find_memory_block_by_id(block_id); |
---|
| 778 | + if (!mem) |
---|
| 779 | + continue; |
---|
| 780 | + |
---|
| 781 | + ret = func(mem, arg); |
---|
| 782 | + put_device(&mem->dev); |
---|
| 783 | + if (ret) |
---|
| 784 | + break; |
---|
| 785 | + } |
---|
863 | 786 | return ret; |
---|
864 | 787 | } |
---|
865 | 788 | |
---|