hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/hw/qib/qib_user_pages.c
....@@ -40,50 +40,7 @@
4040 static void __qib_release_user_pages(struct page **p, size_t num_pages,
4141 int dirty)
4242 {
43
- size_t i;
44
-
45
- for (i = 0; i < num_pages; i++) {
46
- if (dirty)
47
- set_page_dirty_lock(p[i]);
48
- put_page(p[i]);
49
- }
50
-}
51
-
52
-/*
53
- * Call with current->mm->mmap_sem held.
54
- */
55
-static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
56
- struct page **p)
57
-{
58
- unsigned long lock_limit;
59
- size_t got;
60
- int ret;
61
-
62
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
63
-
64
- if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
65
- ret = -ENOMEM;
66
- goto bail;
67
- }
68
-
69
- for (got = 0; got < num_pages; got += ret) {
70
- ret = get_user_pages(start_page + got * PAGE_SIZE,
71
- num_pages - got,
72
- FOLL_WRITE | FOLL_FORCE,
73
- p + got, NULL);
74
- if (ret < 0)
75
- goto bail_release;
76
- }
77
-
78
- current->mm->pinned_vm += num_pages;
79
-
80
- ret = 0;
81
- goto bail;
82
-
83
-bail_release:
84
- __qib_release_user_pages(p, got, 0);
85
-bail:
86
- return ret;
43
+ unpin_user_pages_dirty_lock(p, num_pages, dirty);
8744 }
8845
8946 /**
....@@ -137,26 +94,44 @@
13794 int qib_get_user_pages(unsigned long start_page, size_t num_pages,
13895 struct page **p)
13996 {
97
+ unsigned long locked, lock_limit;
98
+ size_t got;
14099 int ret;
141100
142
- down_write(&current->mm->mmap_sem);
101
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
102
+ locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
143103
144
- ret = __qib_get_user_pages(start_page, num_pages, p);
104
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
105
+ ret = -ENOMEM;
106
+ goto bail;
107
+ }
145108
146
- up_write(&current->mm->mmap_sem);
109
+ mmap_read_lock(current->mm);
110
+ for (got = 0; got < num_pages; got += ret) {
111
+ ret = pin_user_pages(start_page + got * PAGE_SIZE,
112
+ num_pages - got,
113
+ FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
114
+ p + got, NULL);
115
+ if (ret < 0) {
116
+ mmap_read_unlock(current->mm);
117
+ goto bail_release;
118
+ }
119
+ }
120
+ mmap_read_unlock(current->mm);
147121
122
+ return 0;
123
+bail_release:
124
+ __qib_release_user_pages(p, got, 0);
125
+bail:
126
+ atomic64_sub(num_pages, &current->mm->pinned_vm);
148127 return ret;
149128 }
150129
151130 void qib_release_user_pages(struct page **p, size_t num_pages)
152131 {
153
- if (current->mm) /* during close after signal, mm can be NULL */
154
- down_write(&current->mm->mmap_sem);
155
-
156132 __qib_release_user_pages(p, num_pages, 1);
157133
158
- if (current->mm) {
159
- current->mm->pinned_vm -= num_pages;
160
- up_write(&current->mm->mmap_sem);
161
- }
134
+ /* during close after signal, mm can be NULL */
135
+ if (current->mm)
136
+ atomic64_sub(num_pages, &current->mm->pinned_vm);
162137 }