| .. | .. |
|---|
| 18 | 18 | * hardened usercopy checks by added "unconst" to all the const copies, |
|---|
| 19 | 19 | * and making sure "cache_size" isn't optimized into a const. |
|---|
| 20 | 20 | */ |
|---|
| 21 | | -static volatile size_t unconst = 0; |
|---|
| 21 | +static volatile size_t unconst; |
|---|
| 22 | 22 | static volatile size_t cache_size = 1024; |
|---|
| 23 | 23 | static struct kmem_cache *whitelist_cache; |
|---|
| 24 | 24 | |
|---|
| .. | .. |
|---|
| 30 | 30 | */ |
|---|
| 31 | 31 | static noinline unsigned char *trick_compiler(unsigned char *stack) |
|---|
| 32 | 32 | { |
|---|
| 33 | | - return stack + 0; |
|---|
| 33 | + return stack + unconst; |
|---|
| 34 | 34 | } |
|---|
| 35 | 35 | |
|---|
| 36 | 36 | static noinline unsigned char *do_usercopy_stack_callee(int value) |
|---|
| 37 | 37 | { |
|---|
| 38 | | - unsigned char buf[32]; |
|---|
| 38 | + unsigned char buf[128]; |
|---|
| 39 | 39 | int i; |
|---|
| 40 | 40 | |
|---|
| 41 | 41 | /* Exercise stack to avoid everything living in registers. */ |
|---|
| .. | .. |
|---|
| 43 | 43 | buf[i] = value & 0xff; |
|---|
| 44 | 44 | } |
|---|
| 45 | 45 | |
|---|
| 46 | | - return trick_compiler(buf); |
|---|
| 46 | + /* |
|---|
| 47 | + * Put the target buffer in the middle of stack allocation |
|---|
| 48 | + * so that we don't step on future stack users regardless |
|---|
| 49 | + * of stack growth direction. |
|---|
| 50 | + */ |
|---|
| 51 | + return trick_compiler(&buf[(128/2)-32]); |
|---|
| 47 | 52 | } |
|---|
| 48 | 53 | |
|---|
| 49 | 54 | static noinline void do_usercopy_stack(bool to_user, bool bad_frame) |
|---|
| .. | .. |
|---|
| 65 | 70 | bad_stack = task_stack_page(current) + THREAD_SIZE; |
|---|
| 66 | 71 | bad_stack -= sizeof(unsigned long); |
|---|
| 67 | 72 | } |
|---|
| 73 | + |
|---|
| 74 | +#ifdef ARCH_HAS_CURRENT_STACK_POINTER |
|---|
| 75 | + pr_info("stack : %px\n", (void *)current_stack_pointer); |
|---|
| 76 | +#endif |
|---|
| 77 | + pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); |
|---|
| 78 | + pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); |
|---|
| 68 | 79 | |
|---|
| 69 | 80 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, |
|---|
| 70 | 81 | PROT_READ | PROT_WRITE | PROT_EXEC, |
|---|
| .. | .. |
|---|
| 304 | 315 | return; |
|---|
| 305 | 316 | } |
|---|
| 306 | 317 | |
|---|
| 307 | | - pr_info("attempting good copy_to_user from kernel rodata\n"); |
|---|
| 318 | + pr_info("attempting good copy_to_user from kernel rodata: %px\n", |
|---|
| 319 | + test_text); |
|---|
| 308 | 320 | if (copy_to_user((void __user *)user_addr, test_text, |
|---|
| 309 | 321 | unconst + sizeof(test_text))) { |
|---|
| 310 | 322 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
|---|
| 311 | 323 | goto free_user; |
|---|
| 312 | 324 | } |
|---|
| 313 | 325 | |
|---|
| 314 | | - pr_info("attempting bad copy_to_user from kernel text\n"); |
|---|
| 315 | | - if (copy_to_user((void __user *)user_addr, vm_mmap, |
|---|
| 326 | + pr_info("attempting bad copy_to_user from kernel text: %px\n", |
|---|
| 327 | + vm_mmap); |
|---|
| 328 | + if (copy_to_user((void __user *)user_addr, __va_function(vm_mmap), |
|---|
| 316 | 329 | unconst + PAGE_SIZE)) { |
|---|
| 317 | 330 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
|---|
| 318 | 331 | goto free_user; |
|---|
| 319 | 332 | } |
|---|
| 333 | + pr_err("FAIL: survived bad copy_to_user()\n"); |
|---|
| 320 | 334 | |
|---|
| 321 | 335 | free_user: |
|---|
| 322 | 336 | vm_munmap(user_addr, PAGE_SIZE); |
|---|