.. | .. |
---|
98 | 98 | struct task_struct *tsk = current; |
---|
99 | 99 | struct vm86plus_struct __user *user; |
---|
100 | 100 | struct vm86 *vm86 = current->thread.vm86; |
---|
101 | | - long err = 0; |
---|
102 | 101 | |
---|
103 | 102 | /* |
---|
104 | 103 | * This gets called from entry.S with interrupts disabled, but |
---|
.. | .. |
---|
114 | 113 | set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); |
---|
115 | 114 | user = vm86->user_vm86; |
---|
116 | 115 | |
---|
117 | | - if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ? |
---|
| 116 | + if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ? |
---|
118 | 117 | sizeof(struct vm86plus_struct) : |
---|
119 | | - sizeof(struct vm86_struct))) { |
---|
120 | | - pr_alert("could not access userspace vm86 info\n"); |
---|
121 | | - do_exit(SIGSEGV); |
---|
122 | | - } |
---|
| 118 | + sizeof(struct vm86_struct))) |
---|
| 119 | + goto Efault; |
---|
123 | 120 | |
---|
124 | | - put_user_try { |
---|
125 | | - put_user_ex(regs->pt.bx, &user->regs.ebx); |
---|
126 | | - put_user_ex(regs->pt.cx, &user->regs.ecx); |
---|
127 | | - put_user_ex(regs->pt.dx, &user->regs.edx); |
---|
128 | | - put_user_ex(regs->pt.si, &user->regs.esi); |
---|
129 | | - put_user_ex(regs->pt.di, &user->regs.edi); |
---|
130 | | - put_user_ex(regs->pt.bp, &user->regs.ebp); |
---|
131 | | - put_user_ex(regs->pt.ax, &user->regs.eax); |
---|
132 | | - put_user_ex(regs->pt.ip, &user->regs.eip); |
---|
133 | | - put_user_ex(regs->pt.cs, &user->regs.cs); |
---|
134 | | - put_user_ex(regs->pt.flags, &user->regs.eflags); |
---|
135 | | - put_user_ex(regs->pt.sp, &user->regs.esp); |
---|
136 | | - put_user_ex(regs->pt.ss, &user->regs.ss); |
---|
137 | | - put_user_ex(regs->es, &user->regs.es); |
---|
138 | | - put_user_ex(regs->ds, &user->regs.ds); |
---|
139 | | - put_user_ex(regs->fs, &user->regs.fs); |
---|
140 | | - put_user_ex(regs->gs, &user->regs.gs); |
---|
| 121 | + unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end); |
---|
| 122 | + unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end); |
---|
| 123 | + unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end); |
---|
| 124 | + unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end); |
---|
| 125 | + unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end); |
---|
| 126 | + unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end); |
---|
| 127 | + unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end); |
---|
| 128 | + unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end); |
---|
| 129 | + unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end); |
---|
| 130 | + unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end); |
---|
| 131 | + unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end); |
---|
| 132 | + unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end); |
---|
| 133 | + unsafe_put_user(regs->es, &user->regs.es, Efault_end); |
---|
| 134 | + unsafe_put_user(regs->ds, &user->regs.ds, Efault_end); |
---|
| 135 | + unsafe_put_user(regs->fs, &user->regs.fs, Efault_end); |
---|
| 136 | + unsafe_put_user(regs->gs, &user->regs.gs, Efault_end); |
---|
| 137 | + unsafe_put_user(vm86->screen_bitmap, &user->screen_bitmap, Efault_end); |
---|
141 | 138 | |
---|
142 | | - put_user_ex(vm86->screen_bitmap, &user->screen_bitmap); |
---|
143 | | - } put_user_catch(err); |
---|
144 | | - if (err) { |
---|
145 | | - pr_alert("could not access userspace vm86 info\n"); |
---|
146 | | - do_exit(SIGSEGV); |
---|
147 | | - } |
---|
| 139 | + user_access_end(); |
---|
148 | 140 | |
---|
149 | 141 | preempt_disable(); |
---|
150 | 142 | tsk->thread.sp0 = vm86->saved_sp0; |
---|
.. | .. |
---|
159 | 151 | lazy_load_gs(vm86->regs32.gs); |
---|
160 | 152 | |
---|
161 | 153 | regs->pt.ax = retval; |
---|
| 154 | + return; |
---|
| 155 | + |
---|
| 156 | +Efault_end: |
---|
| 157 | + user_access_end(); |
---|
| 158 | +Efault: |
---|
| 159 | + pr_alert("could not access userspace vm86 info\n"); |
---|
| 160 | + do_exit(SIGSEGV); |
---|
162 | 161 | } |
---|
163 | 162 | |
---|
164 | 163 | static void mark_screen_rdonly(struct mm_struct *mm) |
---|
.. | .. |
---|
172 | 171 | pte_t *pte; |
---|
173 | 172 | int i; |
---|
174 | 173 | |
---|
175 | | - down_write(&mm->mmap_sem); |
---|
| 174 | + mmap_write_lock(mm); |
---|
176 | 175 | pgd = pgd_offset(mm, 0xA0000); |
---|
177 | 176 | if (pgd_none_or_clear_bad(pgd)) |
---|
178 | 177 | goto out; |
---|
.. | .. |
---|
198 | 197 | } |
---|
199 | 198 | pte_unmap_unlock(pte, ptl); |
---|
200 | 199 | out: |
---|
201 | | - up_write(&mm->mmap_sem); |
---|
202 | | - flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL); |
---|
| 200 | + mmap_write_unlock(mm); |
---|
| 201 | + flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false); |
---|
203 | 202 | } |
---|
204 | 203 | |
---|
205 | 204 | |
---|
.. | .. |
---|
243 | 242 | struct kernel_vm86_regs vm86regs; |
---|
244 | 243 | struct pt_regs *regs = current_pt_regs(); |
---|
245 | 244 | unsigned long err = 0; |
---|
| 245 | + struct vm86_struct v; |
---|
246 | 246 | |
---|
247 | 247 | err = security_mmap_addr(0); |
---|
248 | 248 | if (err) { |
---|
.. | .. |
---|
278 | 278 | if (vm86->saved_sp0) |
---|
279 | 279 | return -EPERM; |
---|
280 | 280 | |
---|
281 | | - if (!access_ok(VERIFY_READ, user_vm86, plus ? |
---|
282 | | - sizeof(struct vm86_struct) : |
---|
283 | | - sizeof(struct vm86plus_struct))) |
---|
| 281 | + if (copy_from_user(&v, user_vm86, |
---|
| 282 | + offsetof(struct vm86_struct, int_revectored))) |
---|
284 | 283 | return -EFAULT; |
---|
285 | 284 | |
---|
286 | 285 | memset(&vm86regs, 0, sizeof(vm86regs)); |
---|
287 | | - get_user_try { |
---|
288 | | - unsigned short seg; |
---|
289 | | - get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx); |
---|
290 | | - get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx); |
---|
291 | | - get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx); |
---|
292 | | - get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi); |
---|
293 | | - get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi); |
---|
294 | | - get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp); |
---|
295 | | - get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax); |
---|
296 | | - get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip); |
---|
297 | | - get_user_ex(seg, &user_vm86->regs.cs); |
---|
298 | | - vm86regs.pt.cs = seg; |
---|
299 | | - get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags); |
---|
300 | | - get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp); |
---|
301 | | - get_user_ex(seg, &user_vm86->regs.ss); |
---|
302 | | - vm86regs.pt.ss = seg; |
---|
303 | | - get_user_ex(vm86regs.es, &user_vm86->regs.es); |
---|
304 | | - get_user_ex(vm86regs.ds, &user_vm86->regs.ds); |
---|
305 | | - get_user_ex(vm86regs.fs, &user_vm86->regs.fs); |
---|
306 | | - get_user_ex(vm86regs.gs, &user_vm86->regs.gs); |
---|
307 | 286 | |
---|
308 | | - get_user_ex(vm86->flags, &user_vm86->flags); |
---|
309 | | - get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap); |
---|
310 | | - get_user_ex(vm86->cpu_type, &user_vm86->cpu_type); |
---|
311 | | - } get_user_catch(err); |
---|
312 | | - if (err) |
---|
313 | | - return err; |
---|
| 287 | + vm86regs.pt.bx = v.regs.ebx; |
---|
| 288 | + vm86regs.pt.cx = v.regs.ecx; |
---|
| 289 | + vm86regs.pt.dx = v.regs.edx; |
---|
| 290 | + vm86regs.pt.si = v.regs.esi; |
---|
| 291 | + vm86regs.pt.di = v.regs.edi; |
---|
| 292 | + vm86regs.pt.bp = v.regs.ebp; |
---|
| 293 | + vm86regs.pt.ax = v.regs.eax; |
---|
| 294 | + vm86regs.pt.ip = v.regs.eip; |
---|
| 295 | + vm86regs.pt.cs = v.regs.cs; |
---|
| 296 | + vm86regs.pt.flags = v.regs.eflags; |
---|
| 297 | + vm86regs.pt.sp = v.regs.esp; |
---|
| 298 | + vm86regs.pt.ss = v.regs.ss; |
---|
| 299 | + vm86regs.es = v.regs.es; |
---|
| 300 | + vm86regs.ds = v.regs.ds; |
---|
| 301 | + vm86regs.fs = v.regs.fs; |
---|
| 302 | + vm86regs.gs = v.regs.gs; |
---|
| 303 | + |
---|
| 304 | + vm86->flags = v.flags; |
---|
| 305 | + vm86->screen_bitmap = v.screen_bitmap; |
---|
| 306 | + vm86->cpu_type = v.cpu_type; |
---|
314 | 307 | |
---|
315 | 308 | if (copy_from_user(&vm86->int_revectored, |
---|
316 | 309 | &user_vm86->int_revectored, |
---|
.. | .. |
---|
369 | 362 | preempt_disable(); |
---|
370 | 363 | tsk->thread.sp0 += 16; |
---|
371 | 364 | |
---|
372 | | - if (static_cpu_has(X86_FEATURE_SEP)) { |
---|
| 365 | + if (boot_cpu_has(X86_FEATURE_SEP)) { |
---|
373 | 366 | tsk->thread.sysenter_cs = 0; |
---|
374 | 367 | refresh_sysenter_cs(&tsk->thread); |
---|
375 | 368 | } |
---|
.. | .. |
---|
381 | 374 | mark_screen_rdonly(tsk->mm); |
---|
382 | 375 | |
---|
383 | 376 | memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); |
---|
384 | | - force_iret(); |
---|
385 | 377 | return regs->ax; |
---|
386 | 378 | } |
---|
387 | 379 | |
---|
.. | .. |
---|
583 | 575 | return 1; /* we let this handle by the calling routine */ |
---|
584 | 576 | current->thread.trap_nr = trapno; |
---|
585 | 577 | current->thread.error_code = error_code; |
---|
586 | | - force_sig(SIGTRAP, current); |
---|
| 578 | + force_sig(SIGTRAP); |
---|
587 | 579 | return 0; |
---|
588 | 580 | } |
---|
589 | 581 | |
---|