hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/hfi1/user_pages.c
....@@ -91,9 +91,7 @@
9191 /* Convert to number of pages */
9292 size = DIV_ROUND_UP(size, PAGE_SIZE);
9393
94
- down_read(&mm->mmap_sem);
95
- pinned = mm->pinned_vm;
96
- up_read(&mm->mmap_sem);
94
+ pinned = atomic64_read(&mm->pinned_vm);
9795
9896 /* First, check the absolute limit against all pinned pages. */
9997 if (pinned + npages >= ulimit && !can_lock)
....@@ -106,14 +104,13 @@
106104 bool writable, struct page **pages)
107105 {
108106 int ret;
107
+ unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
109108
110
- ret = get_user_pages_fast(vaddr, npages, writable, pages);
109
+ ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
111110 if (ret < 0)
112111 return ret;
113112
114
- down_write(&mm->mmap_sem);
115
- mm->pinned_vm += ret;
116
- up_write(&mm->mmap_sem);
113
+ atomic64_add(ret, &mm->pinned_vm);
117114
118115 return ret;
119116 }
....@@ -121,17 +118,9 @@
121118 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
122119 size_t npages, bool dirty)
123120 {
124
- size_t i;
125
-
126
- for (i = 0; i < npages; i++) {
127
- if (dirty)
128
- set_page_dirty_lock(p[i]);
129
- put_page(p[i]);
130
- }
121
+ unpin_user_pages_dirty_lock(p, npages, dirty);
131122
132123 if (mm) { /* during close after signal, mm can be NULL */
133
- down_write(&mm->mmap_sem);
134
- mm->pinned_vm -= npages;
135
- up_write(&mm->mmap_sem);
124
+ atomic64_sub(npages, &mm->pinned_vm);
136125 }
137126 }