hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/nommu.c
....@@ -1,10 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/mm/nommu.c
34 *
45 * Replacement code for mm functions to support CPU's that don't
56 * have any form of memory management unit (thus no virtual memory).
67 *
7
- * See Documentation/nommu-mmap.txt
8
+ * See Documentation/admin-guide/mm/nommu-mmap.rst
89 *
910 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
1011 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
....@@ -107,96 +108,8 @@
107108 * The ksize() function is only guaranteed to work for pointers
108109 * returned by kmalloc(). So handle arbitrary pointers here.
109110 */
110
- return PAGE_SIZE << compound_order(page);
111
+ return page_size(page);
111112 }
112
-
113
-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
114
- unsigned long start, unsigned long nr_pages,
115
- unsigned int foll_flags, struct page **pages,
116
- struct vm_area_struct **vmas, int *nonblocking)
117
-{
118
- struct vm_area_struct *vma;
119
- unsigned long vm_flags;
120
- int i;
121
-
122
- /* calculate required read or write permissions.
123
- * If FOLL_FORCE is set, we only require the "MAY" flags.
124
- */
125
- vm_flags = (foll_flags & FOLL_WRITE) ?
126
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
127
- vm_flags &= (foll_flags & FOLL_FORCE) ?
128
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
129
-
130
- for (i = 0; i < nr_pages; i++) {
131
- vma = find_vma(mm, start);
132
- if (!vma)
133
- goto finish_or_fault;
134
-
135
- /* protect what we can, including chardevs */
136
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
137
- !(vm_flags & vma->vm_flags))
138
- goto finish_or_fault;
139
-
140
- if (pages) {
141
- pages[i] = virt_to_page(start);
142
- if (pages[i])
143
- get_page(pages[i]);
144
- }
145
- if (vmas)
146
- vmas[i] = vma;
147
- start = (start + PAGE_SIZE) & PAGE_MASK;
148
- }
149
-
150
- return i;
151
-
152
-finish_or_fault:
153
- return i ? : -EFAULT;
154
-}
155
-
156
-/*
157
- * get a list of pages in an address range belonging to the specified process
158
- * and indicate the VMA that covers each page
159
- * - this is potentially dodgy as we may end incrementing the page count of a
160
- * slab page or a secondary page from a compound page
161
- * - don't permit access to VMAs that don't support it, such as I/O mappings
162
- */
163
-long get_user_pages(unsigned long start, unsigned long nr_pages,
164
- unsigned int gup_flags, struct page **pages,
165
- struct vm_area_struct **vmas)
166
-{
167
- return __get_user_pages(current, current->mm, start, nr_pages,
168
- gup_flags, pages, vmas, NULL);
169
-}
170
-EXPORT_SYMBOL(get_user_pages);
171
-
172
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
173
- unsigned int gup_flags, struct page **pages,
174
- int *locked)
175
-{
176
- return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
177
-}
178
-EXPORT_SYMBOL(get_user_pages_locked);
179
-
180
-static long __get_user_pages_unlocked(struct task_struct *tsk,
181
- struct mm_struct *mm, unsigned long start,
182
- unsigned long nr_pages, struct page **pages,
183
- unsigned int gup_flags)
184
-{
185
- long ret;
186
- down_read(&mm->mmap_sem);
187
- ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
188
- NULL, NULL);
189
- up_read(&mm->mmap_sem);
190
- return ret;
191
-}
192
-
193
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
194
- struct page **pages, unsigned int gup_flags)
195
-{
196
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
197
- pages, gup_flags);
198
-}
199
-EXPORT_SYMBOL(get_user_pages_unlocked);
200113
201114 /**
202115 * follow_pfn - look up PFN at a user virtual address
....@@ -227,7 +140,7 @@
227140 }
228141 EXPORT_SYMBOL(vfree);
229142
230
-void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
143
+void *__vmalloc(unsigned long size, gfp_t gfp_mask)
231144 {
232145 /*
233146 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
....@@ -237,27 +150,41 @@
237150 }
238151 EXPORT_SYMBOL(__vmalloc);
239152
240
-void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
153
+void *__vmalloc_node_range(unsigned long size, unsigned long align,
154
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
155
+ pgprot_t prot, unsigned long vm_flags, int node,
156
+ const void *caller)
241157 {
242
- return __vmalloc(size, flags, PAGE_KERNEL);
158
+ return __vmalloc(size, gfp_mask);
159
+}
160
+
161
+void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
162
+ int node, const void *caller)
163
+{
164
+ return __vmalloc(size, gfp_mask);
165
+}
166
+
167
+static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
168
+{
169
+ void *ret;
170
+
171
+ ret = __vmalloc(size, flags);
172
+ if (ret) {
173
+ struct vm_area_struct *vma;
174
+
175
+ mmap_write_lock(current->mm);
176
+ vma = find_vma(current->mm, (unsigned long)ret);
177
+ if (vma)
178
+ vma->vm_flags |= VM_USERMAP;
179
+ mmap_write_unlock(current->mm);
180
+ }
181
+
182
+ return ret;
243183 }
244184
245185 void *vmalloc_user(unsigned long size)
246186 {
247
- void *ret;
248
-
249
- ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
250
- if (ret) {
251
- struct vm_area_struct *vma;
252
-
253
- down_write(&current->mm->mmap_sem);
254
- vma = find_vma(current->mm, (unsigned long)ret);
255
- if (vma)
256
- vma->vm_flags |= VM_USERMAP;
257
- up_write(&current->mm->mmap_sem);
258
- }
259
-
260
- return ret;
187
+ return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
261188 }
262189 EXPORT_SYMBOL(vmalloc_user);
263190
....@@ -306,7 +233,7 @@
306233 */
307234 void *vmalloc(unsigned long size)
308235 {
309
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
236
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
310237 }
311238 EXPORT_SYMBOL(vmalloc);
312239
....@@ -324,8 +251,7 @@
324251 */
325252 void *vzalloc(unsigned long size)
326253 {
327
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
328
- PAGE_KERNEL);
254
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
329255 }
330256 EXPORT_SYMBOL(vzalloc);
331257
....@@ -365,23 +291,6 @@
365291 EXPORT_SYMBOL(vzalloc_node);
366292
367293 /**
368
- * vmalloc_exec - allocate virtually contiguous, executable memory
369
- * @size: allocation size
370
- *
371
- * Kernel-internal function to allocate enough pages to cover @size
372
- * the page level allocator and map them into contiguous and
373
- * executable kernel virtual space.
374
- *
375
- * For tight control over page level allocator and protection flags
376
- * use __vmalloc() instead.
377
- */
378
-
379
-void *vmalloc_exec(unsigned long size)
380
-{
381
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
382
-}
383
-
384
-/**
385294 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
386295 * @size: allocation size
387296 *
....@@ -390,7 +299,7 @@
390299 */
391300 void *vmalloc_32(unsigned long size)
392301 {
393
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
302
+ return __vmalloc(size, GFP_KERNEL);
394303 }
395304 EXPORT_SYMBOL(vmalloc_32);
396305
....@@ -427,7 +336,7 @@
427336 }
428337 EXPORT_SYMBOL(vunmap);
429338
430
-void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
339
+void *vm_map_ram(struct page **pages, unsigned int count, int node)
431340 {
432341 BUG();
433342 return NULL;
....@@ -445,25 +354,6 @@
445354 }
446355 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
447356
448
-/*
449
- * Implement a stub for vmalloc_sync_[un]mapping() if the architecture
450
- * chose not to have one.
451
- */
452
-void __weak vmalloc_sync_mappings(void)
453
-{
454
-}
455
-
456
-void __weak vmalloc_sync_unmappings(void)
457
-{
458
-}
459
-
460
-struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
461
-{
462
- BUG();
463
- return NULL;
464
-}
465
-EXPORT_SYMBOL_GPL(alloc_vm_area);
466
-
467357 void free_vm_area(struct vm_struct *area)
468358 {
469359 BUG();
....@@ -476,6 +366,20 @@
476366 return -EINVAL;
477367 }
478368 EXPORT_SYMBOL(vm_insert_page);
369
+
370
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
371
+ unsigned long num)
372
+{
373
+ return -EINVAL;
374
+}
375
+EXPORT_SYMBOL(vm_map_pages);
376
+
377
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
378
+ unsigned long num)
379
+{
380
+ return -EINVAL;
381
+}
382
+EXPORT_SYMBOL(vm_map_pages_zero);
479383
480384 /*
481385 * sys_brk() for the most part doesn't need the global kernel
....@@ -505,7 +409,7 @@
505409 /*
506410 * Ok, looks good - let it rip.
507411 */
508
- flush_icache_range(mm->brk, brk);
412
+ flush_icache_user_range(mm->brk, brk);
509413 return mm->brk = brk;
510414 }
511415
....@@ -654,7 +558,7 @@
654558 * add a VMA into a process's mm_struct in the appropriate place in the list
655559 * and tree and add to the address space's page tree also if not an anonymous
656560 * page
657
- * - should be called with mm->mmap_sem held writelocked
561
+ * - should be called with mm->mmap_lock held writelocked
658562 */
659563 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
660564 {
....@@ -714,7 +618,7 @@
714618 if (rb_prev)
715619 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
716620
717
- __vma_link_list(mm, vma, prev, parent);
621
+ __vma_link_list(mm, vma, prev);
718622 }
719623
720624 /*
....@@ -750,13 +654,7 @@
750654 /* remove from the MM's tree and list */
751655 rb_erase(&vma->vm_rb, &mm->mm_rb);
752656
753
- if (vma->vm_prev)
754
- vma->vm_prev->vm_next = vma->vm_next;
755
- else
756
- mm->mmap = vma->vm_next;
757
-
758
- if (vma->vm_next)
759
- vma->vm_next->vm_prev = vma->vm_prev;
657
+ __vma_unlink_list(mm, vma);
760658 }
761659
762660 /*
....@@ -774,7 +672,7 @@
774672
775673 /*
776674 * look up the first VMA in which addr resides, NULL if none
777
- * - should be called with mm->mmap_sem at least held readlocked
675
+ * - should be called with mm->mmap_lock at least held readlocked
778676 */
779677 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
780678 {
....@@ -820,7 +718,7 @@
820718
821719 /*
822720 * look up the first VMA exactly that exactly matches addr
823
- * - should be called with mm->mmap_sem at least held readlocked
721
+ * - should be called with mm->mmap_lock at least held readlocked
824722 */
825723 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
826724 unsigned long addr,
....@@ -1173,7 +1071,6 @@
11731071 unsigned long len,
11741072 unsigned long prot,
11751073 unsigned long flags,
1176
- vm_flags_t vm_flags,
11771074 unsigned long pgoff,
11781075 unsigned long *populate,
11791076 struct list_head *uf)
....@@ -1181,6 +1078,7 @@
11811078 struct vm_area_struct *vma;
11821079 struct vm_region *region;
11831080 struct rb_node *rb;
1081
+ vm_flags_t vm_flags;
11841082 unsigned long capabilities, result;
11851083 int ret;
11861084
....@@ -1199,7 +1097,7 @@
11991097
12001098 /* we've determined that we can make the mapping, now translate what we
12011099 * now know into VMA flags */
1202
- vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1100
+ vm_flags = determine_vm_flags(file, prot, flags, capabilities);
12031101
12041102 /* we're going to need to record the mapping */
12051103 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
....@@ -1338,7 +1236,9 @@
13381236 add_nommu_region(region);
13391237
13401238 /* clear anonymous mappings that don't ask for uninitialized data */
1341
- if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1239
+ if (!vma->vm_file &&
1240
+ (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1241
+ !(flags & MAP_UNINITIALIZED)))
13421242 memset((void *)region->vm_start, 0,
13431243 region->vm_end - region->vm_start);
13441244
....@@ -1353,7 +1253,7 @@
13531253 /* we flush the region from the icache only when the first executable
13541254 * mapping of it is made */
13551255 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1356
- flush_icache_range(region->vm_start, region->vm_end);
1256
+ flush_icache_user_range(region->vm_start, region->vm_end);
13571257 region->vm_icache_flushed = true;
13581258 }
13591259
....@@ -1618,9 +1518,9 @@
16181518 struct mm_struct *mm = current->mm;
16191519 int ret;
16201520
1621
- down_write(&mm->mmap_sem);
1521
+ mmap_write_lock(mm);
16221522 ret = do_munmap(mm, addr, len, NULL);
1623
- up_write(&mm->mmap_sem);
1523
+ mmap_write_unlock(mm);
16241524 return ret;
16251525 }
16261526 EXPORT_SYMBOL(vm_munmap);
....@@ -1707,17 +1607,15 @@
17071607 {
17081608 unsigned long ret;
17091609
1710
- down_write(&current->mm->mmap_sem);
1610
+ mmap_write_lock(current->mm);
17111611 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1712
- up_write(&current->mm->mmap_sem);
1612
+ mmap_write_unlock(current->mm);
17131613 return ret;
17141614 }
17151615
1716
-struct page *follow_page_mask(struct vm_area_struct *vma,
1717
- unsigned long address, unsigned int flags,
1718
- unsigned int *page_mask)
1616
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1617
+ unsigned int foll_flags)
17191618 {
1720
- *page_mask = 0;
17211619 return NULL;
17221620 }
17231621
....@@ -1770,12 +1668,21 @@
17701668 }
17711669 EXPORT_SYMBOL(filemap_fault);
17721670
1773
-void filemap_map_pages(struct vm_fault *vmf,
1671
+vm_fault_t filemap_map_pages(struct vm_fault *vmf,
17741672 pgoff_t start_pgoff, pgoff_t end_pgoff)
17751673 {
17761674 BUG();
1675
+ return 0;
17771676 }
17781677 EXPORT_SYMBOL(filemap_map_pages);
1678
+
1679
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1680
+bool filemap_allow_speculation(void)
1681
+{
1682
+ BUG();
1683
+ return false;
1684
+}
1685
+#endif
17791686
17801687 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
17811688 unsigned long addr, void *buf, int len, unsigned int gup_flags)
....@@ -1783,7 +1690,7 @@
17831690 struct vm_area_struct *vma;
17841691 int write = gup_flags & FOLL_WRITE;
17851692
1786
- if (down_read_killable(&mm->mmap_sem))
1693
+ if (mmap_read_lock_killable(mm))
17871694 return 0;
17881695
17891696 /* the access must start within one of the target process's mappings */
....@@ -1806,7 +1713,7 @@
18061713 len = 0;
18071714 }
18081715
1809
- up_read(&mm->mmap_sem);
1716
+ mmap_read_unlock(mm);
18101717
18111718 return len;
18121719 }
....@@ -1857,8 +1764,8 @@
18571764 * @newsize: The proposed filesize of the inode
18581765 *
18591766 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1860
- * make sure that that any outstanding VMAs aren't broken and then shrink the
1861
- * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1767
+ * make sure that any outstanding VMAs aren't broken and then shrink the
1768
+ * vm_regions that extend beyond so that do_mmap() doesn't
18621769 * automatically grant mappings that are too large.
18631770 */
18641771 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,