.. | .. |
---|
40 | 40 | static void __qib_release_user_pages(struct page **p, size_t num_pages, |
---|
41 | 41 | int dirty) |
---|
42 | 42 | { |
---|
43 | | - size_t i; |
---|
44 | | - |
---|
45 | | - for (i = 0; i < num_pages; i++) { |
---|
46 | | - if (dirty) |
---|
47 | | - set_page_dirty_lock(p[i]); |
---|
48 | | - put_page(p[i]); |
---|
49 | | - } |
---|
50 | | -} |
---|
51 | | - |
---|
52 | | -/* |
---|
53 | | - * Call with current->mm->mmap_sem held. |
---|
54 | | - */ |
---|
55 | | -static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, |
---|
56 | | - struct page **p) |
---|
57 | | -{ |
---|
58 | | - unsigned long lock_limit; |
---|
59 | | - size_t got; |
---|
60 | | - int ret; |
---|
61 | | - |
---|
62 | | - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
---|
63 | | - |
---|
64 | | - if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) { |
---|
65 | | - ret = -ENOMEM; |
---|
66 | | - goto bail; |
---|
67 | | - } |
---|
68 | | - |
---|
69 | | - for (got = 0; got < num_pages; got += ret) { |
---|
70 | | - ret = get_user_pages(start_page + got * PAGE_SIZE, |
---|
71 | | - num_pages - got, |
---|
72 | | - FOLL_WRITE | FOLL_FORCE, |
---|
73 | | - p + got, NULL); |
---|
74 | | - if (ret < 0) |
---|
75 | | - goto bail_release; |
---|
76 | | - } |
---|
77 | | - |
---|
78 | | - current->mm->pinned_vm += num_pages; |
---|
79 | | - |
---|
80 | | - ret = 0; |
---|
81 | | - goto bail; |
---|
82 | | - |
---|
83 | | -bail_release: |
---|
84 | | - __qib_release_user_pages(p, got, 0); |
---|
85 | | -bail: |
---|
86 | | - return ret; |
---|
| 43 | + unpin_user_pages_dirty_lock(p, num_pages, dirty); |
---|
87 | 44 | } |
---|
88 | 45 | |
---|
89 | 46 | /** |
---|
.. | .. |
---|
137 | 94 | int qib_get_user_pages(unsigned long start_page, size_t num_pages, |
---|
138 | 95 | struct page **p) |
---|
139 | 96 | { |
---|
| 97 | + unsigned long locked, lock_limit; |
---|
| 98 | + size_t got; |
---|
140 | 99 | int ret; |
---|
141 | 100 | |
---|
142 | | - down_write(¤t->mm->mmap_sem); |
---|
| 101 | + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
---|
| 102 | + locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm); |
---|
143 | 103 | |
---|
144 | | - ret = __qib_get_user_pages(start_page, num_pages, p); |
---|
| 104 | + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { |
---|
| 105 | + ret = -ENOMEM; |
---|
| 106 | + goto bail; |
---|
| 107 | + } |
---|
145 | 108 | |
---|
146 | | - up_write(¤t->mm->mmap_sem); |
---|
| 109 | + mmap_read_lock(current->mm); |
---|
| 110 | + for (got = 0; got < num_pages; got += ret) { |
---|
| 111 | + ret = pin_user_pages(start_page + got * PAGE_SIZE, |
---|
| 112 | + num_pages - got, |
---|
| 113 | + FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE, |
---|
| 114 | + p + got, NULL); |
---|
| 115 | + if (ret < 0) { |
---|
| 116 | + mmap_read_unlock(current->mm); |
---|
| 117 | + goto bail_release; |
---|
| 118 | + } |
---|
| 119 | + } |
---|
| 120 | + mmap_read_unlock(current->mm); |
---|
147 | 121 | |
---|
| 122 | + return 0; |
---|
| 123 | +bail_release: |
---|
| 124 | + __qib_release_user_pages(p, got, 0); |
---|
| 125 | +bail: |
---|
| 126 | + atomic64_sub(num_pages, ¤t->mm->pinned_vm); |
---|
148 | 127 | return ret; |
---|
149 | 128 | } |
---|
150 | 129 | |
---|
151 | 130 | void qib_release_user_pages(struct page **p, size_t num_pages) |
---|
152 | 131 | { |
---|
153 | | - if (current->mm) /* during close after signal, mm can be NULL */ |
---|
154 | | - down_write(¤t->mm->mmap_sem); |
---|
155 | | - |
---|
156 | 132 | __qib_release_user_pages(p, num_pages, 1); |
---|
157 | 133 | |
---|
158 | | - if (current->mm) { |
---|
159 | | - current->mm->pinned_vm -= num_pages; |
---|
160 | | - up_write(¤t->mm->mmap_sem); |
---|
161 | | - } |
---|
| 134 | + /* during close after signal, mm can be NULL */ |
---|
| 135 | + if (current->mm) |
---|
| 136 | + atomic64_sub(num_pages, ¤t->mm->pinned_vm); |
---|
162 | 137 | } |
---|