.. | .. |
---|
11 | 11 | #include <linux/sched/signal.h> |
---|
12 | 12 | #include <linux/sched/task_stack.h> |
---|
13 | 13 | #include <linux/uaccess.h> |
---|
| 14 | +#include <linux/slab.h> |
---|
| 15 | + |
---|
| 16 | +#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) |
---|
| 17 | +#include <asm/desc.h> |
---|
| 18 | +#endif |
---|
14 | 19 | |
---|
15 | 20 | struct lkdtm_list { |
---|
16 | 21 | struct list_head node; |
---|
.. | .. |
---|
22 | 27 | * recurse past the end of THREAD_SIZE by default. |
---|
23 | 28 | */ |
---|
24 | 29 | #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) |
---|
25 | | -#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) |
---|
| 30 | +#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) |
---|
26 | 31 | #else |
---|
27 | 32 | #define REC_STACK_SIZE (THREAD_SIZE / 8) |
---|
28 | 33 | #endif |
---|
.. | .. |
---|
32 | 37 | |
---|
33 | 38 | static DEFINE_SPINLOCK(lock_me_up); |
---|
34 | 39 | |
---|
35 | | -static int recursive_loop(int remaining) |
---|
| 40 | +/* |
---|
| 41 | + * Make sure compiler does not optimize this function or stack frame away: |
---|
| 42 | + * - function marked noinline |
---|
| 43 | + * - stack variables are marked volatile |
---|
| 44 | + * - stack variables are written (memset()) and read (pr_info()) |
---|
| 45 | + * - function has external effects (pr_info()) |
---|
| 46 | + * */ |
---|
| 47 | +static int noinline recursive_loop(int remaining) |
---|
36 | 48 | { |
---|
37 | | - char buf[REC_STACK_SIZE]; |
---|
| 49 | + volatile char buf[REC_STACK_SIZE]; |
---|
38 | 50 | |
---|
39 | | - /* Make sure compiler does not optimize this away. */ |
---|
40 | | - memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE); |
---|
| 51 | + memset((void *)buf, remaining & 0xFF, sizeof(buf)); |
---|
| 52 | + pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)], |
---|
| 53 | + recur_count); |
---|
41 | 54 | if (!remaining) |
---|
42 | 55 | return 0; |
---|
43 | 56 | else |
---|
.. | .. |
---|
67 | 80 | |
---|
68 | 81 | void lkdtm_WARNING(void) |
---|
69 | 82 | { |
---|
70 | | - WARN(1, "Warning message trigger count: %d\n", warn_counter++); |
---|
| 83 | + WARN_ON(++warn_counter); |
---|
| 84 | +} |
---|
| 85 | + |
---|
| 86 | +void lkdtm_WARNING_MESSAGE(void) |
---|
| 87 | +{ |
---|
| 88 | + WARN(1, "Warning message trigger count: %d\n", ++warn_counter); |
---|
71 | 89 | } |
---|
72 | 90 | |
---|
73 | 91 | void lkdtm_EXCEPTION(void) |
---|
.. | .. |
---|
81 | 99 | ; |
---|
82 | 100 | } |
---|
83 | 101 | |
---|
84 | | -void lkdtm_OVERFLOW(void) |
---|
| 102 | +void lkdtm_EXHAUST_STACK(void) |
---|
85 | 103 | { |
---|
86 | | - (void) recursive_loop(recur_count); |
---|
| 104 | + pr_info("Calling function with %lu frame size to depth %d ...\n", |
---|
| 105 | + REC_STACK_SIZE, recur_count); |
---|
| 106 | + recursive_loop(recur_count); |
---|
| 107 | + pr_info("FAIL: survived without exhausting stack?!\n"); |
---|
87 | 108 | } |
---|
88 | 109 | |
---|
89 | 110 | static noinline void __lkdtm_CORRUPT_STACK(void *stack) |
---|
.. | .. |
---|
97 | 118 | /* Use default char array length that triggers stack protection. */ |
---|
98 | 119 | char data[8] __aligned(sizeof(void *)); |
---|
99 | 120 | |
---|
100 | | - __lkdtm_CORRUPT_STACK(&data); |
---|
101 | | - |
---|
102 | | - pr_info("Corrupted stack containing char array ...\n"); |
---|
| 121 | + pr_info("Corrupting stack containing char array ...\n"); |
---|
| 122 | + __lkdtm_CORRUPT_STACK((void *)&data); |
---|
103 | 123 | } |
---|
104 | 124 | |
---|
105 | 125 | /* Same as above but will only get a canary with -fstack-protector-strong */ |
---|
.. | .. |
---|
110 | 130 | unsigned long *ptr; |
---|
111 | 131 | } data __aligned(sizeof(void *)); |
---|
112 | 132 | |
---|
113 | | - __lkdtm_CORRUPT_STACK(&data); |
---|
114 | | - |
---|
115 | | - pr_info("Corrupted stack containing union ...\n"); |
---|
| 133 | + pr_info("Corrupting stack containing union ...\n"); |
---|
| 134 | + __lkdtm_CORRUPT_STACK((void *)&data); |
---|
116 | 135 | } |
---|
117 | 136 | |
---|
118 | 137 | void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) |
---|
.. | .. |
---|
125 | 144 | if (*p == 0) |
---|
126 | 145 | val = 0x87654321; |
---|
127 | 146 | *p = val; |
---|
| 147 | + |
---|
| 148 | + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) |
---|
| 149 | + pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n"); |
---|
128 | 150 | } |
---|
129 | 151 | |
---|
130 | 152 | void lkdtm_SOFTLOCKUP(void) |
---|
.. | .. |
---|
153 | 175 | { |
---|
154 | 176 | set_current_state(TASK_UNINTERRUPTIBLE); |
---|
155 | 177 | schedule(); |
---|
| 178 | +} |
---|
| 179 | + |
---|
| 180 | +volatile unsigned int huge = INT_MAX - 2; |
---|
| 181 | +volatile unsigned int ignored; |
---|
| 182 | + |
---|
| 183 | +void lkdtm_OVERFLOW_SIGNED(void) |
---|
| 184 | +{ |
---|
| 185 | + int value; |
---|
| 186 | + |
---|
| 187 | + value = huge; |
---|
| 188 | + pr_info("Normal signed addition ...\n"); |
---|
| 189 | + value += 1; |
---|
| 190 | + ignored = value; |
---|
| 191 | + |
---|
| 192 | + pr_info("Overflowing signed addition ...\n"); |
---|
| 193 | + value += 4; |
---|
| 194 | + ignored = value; |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | + |
---|
| 198 | +void lkdtm_OVERFLOW_UNSIGNED(void) |
---|
| 199 | +{ |
---|
| 200 | + unsigned int value; |
---|
| 201 | + |
---|
| 202 | + value = huge; |
---|
| 203 | + pr_info("Normal unsigned addition ...\n"); |
---|
| 204 | + value += 1; |
---|
| 205 | + ignored = value; |
---|
| 206 | + |
---|
| 207 | + pr_info("Overflowing unsigned addition ...\n"); |
---|
| 208 | + value += 4; |
---|
| 209 | + ignored = value; |
---|
| 210 | +} |
---|
| 211 | + |
---|
| 212 | +/* Intentionally using old-style flex array definition of 1 byte. */ |
---|
| 213 | +struct array_bounds_flex_array { |
---|
| 214 | + int one; |
---|
| 215 | + int two; |
---|
| 216 | + char data[1]; |
---|
| 217 | +}; |
---|
| 218 | + |
---|
| 219 | +struct array_bounds { |
---|
| 220 | + int one; |
---|
| 221 | + int two; |
---|
| 222 | + char data[8]; |
---|
| 223 | + int three; |
---|
| 224 | +}; |
---|
| 225 | + |
---|
| 226 | +void lkdtm_ARRAY_BOUNDS(void) |
---|
| 227 | +{ |
---|
| 228 | + struct array_bounds_flex_array *not_checked; |
---|
| 229 | + struct array_bounds *checked; |
---|
| 230 | + volatile int i; |
---|
| 231 | + |
---|
| 232 | + not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); |
---|
| 233 | + checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); |
---|
| 234 | + if (!not_checked || !checked) { |
---|
| 235 | + kfree(not_checked); |
---|
| 236 | + kfree(checked); |
---|
| 237 | + return; |
---|
| 238 | + } |
---|
| 239 | + |
---|
| 240 | + pr_info("Array access within bounds ...\n"); |
---|
| 241 | + /* For both, touch all bytes in the actual member size. */ |
---|
| 242 | + for (i = 0; i < sizeof(checked->data); i++) |
---|
| 243 | + checked->data[i] = 'A'; |
---|
| 244 | + /* |
---|
| 245 | + * For the uninstrumented flex array member, also touch 1 byte |
---|
| 246 | + * beyond to verify it is correctly uninstrumented. |
---|
| 247 | + */ |
---|
| 248 | + for (i = 0; i < sizeof(not_checked->data) + 1; i++) |
---|
| 249 | + not_checked->data[i] = 'A'; |
---|
| 250 | + |
---|
| 251 | + pr_info("Array access beyond bounds ...\n"); |
---|
| 252 | + for (i = 0; i < sizeof(checked->data) + 1; i++) |
---|
| 253 | + checked->data[i] = 'B'; |
---|
| 254 | + |
---|
| 255 | + kfree(not_checked); |
---|
| 256 | + kfree(checked); |
---|
| 257 | + pr_err("FAIL: survived array bounds overflow!\n"); |
---|
156 | 258 | } |
---|
157 | 259 | |
---|
158 | 260 | void lkdtm_CORRUPT_LIST_ADD(void) |
---|
.. | .. |
---|
218 | 320 | pr_err("list_del() corruption not detected!\n"); |
---|
219 | 321 | } |
---|
220 | 322 | |
---|
221 | | -/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */ |
---|
222 | | -void lkdtm_CORRUPT_USER_DS(void) |
---|
223 | | -{ |
---|
224 | | - pr_info("setting bad task size limit\n"); |
---|
225 | | - set_fs(KERNEL_DS); |
---|
226 | | - |
---|
227 | | - /* Make sure we do not keep running with a KERNEL_DS! */ |
---|
228 | | - force_sig(SIGKILL, current); |
---|
229 | | -} |
---|
230 | | - |
---|
231 | 323 | /* Test that VMAP_STACK is actually allocating with a leading guard page */ |
---|
232 | 324 | void lkdtm_STACK_GUARD_PAGE_LEADING(void) |
---|
233 | 325 | { |
---|
.. | .. |
---|
239 | 331 | |
---|
240 | 332 | byte = *ptr; |
---|
241 | 333 | |
---|
242 | | - pr_err("FAIL: accessed page before stack!\n"); |
---|
| 334 | + pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte); |
---|
243 | 335 | } |
---|
244 | 336 | |
---|
245 | 337 | /* Test that VMAP_STACK is actually allocating with a trailing guard page */ |
---|
.. | .. |
---|
253 | 345 | |
---|
254 | 346 | byte = *ptr; |
---|
255 | 347 | |
---|
256 | | - pr_err("FAIL: accessed page after stack!\n"); |
---|
| 348 | + pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); |
---|
| 349 | +} |
---|
| 350 | + |
---|
| 351 | +void lkdtm_UNSET_SMEP(void) |
---|
| 352 | +{ |
---|
| 353 | +#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) |
---|
| 354 | +#define MOV_CR4_DEPTH 64 |
---|
| 355 | + void (*direct_write_cr4)(unsigned long val); |
---|
| 356 | + unsigned char *insn; |
---|
| 357 | + unsigned long cr4; |
---|
| 358 | + int i; |
---|
| 359 | + |
---|
| 360 | + cr4 = native_read_cr4(); |
---|
| 361 | + |
---|
| 362 | + if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) { |
---|
| 363 | + pr_err("FAIL: SMEP not in use\n"); |
---|
| 364 | + return; |
---|
| 365 | + } |
---|
| 366 | + cr4 &= ~(X86_CR4_SMEP); |
---|
| 367 | + |
---|
| 368 | + pr_info("trying to clear SMEP normally\n"); |
---|
| 369 | + native_write_cr4(cr4); |
---|
| 370 | + if (cr4 == native_read_cr4()) { |
---|
| 371 | + pr_err("FAIL: pinning SMEP failed!\n"); |
---|
| 372 | + cr4 |= X86_CR4_SMEP; |
---|
| 373 | + pr_info("restoring SMEP\n"); |
---|
| 374 | + native_write_cr4(cr4); |
---|
| 375 | + return; |
---|
| 376 | + } |
---|
| 377 | + pr_info("ok: SMEP did not get cleared\n"); |
---|
| 378 | + |
---|
| 379 | + /* |
---|
| 380 | + * To test the post-write pinning verification we need to call |
---|
| 381 | + * directly into the middle of native_write_cr4() where the |
---|
| 382 | + * cr4 write happens, skipping any pinning. This searches for |
---|
| 383 | + * the cr4 writing instruction. |
---|
| 384 | + */ |
---|
| 385 | + insn = (unsigned char *)native_write_cr4; |
---|
| 386 | + for (i = 0; i < MOV_CR4_DEPTH; i++) { |
---|
| 387 | + /* mov %rdi, %cr4 */ |
---|
| 388 | + if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) |
---|
| 389 | + break; |
---|
| 390 | + /* mov %rdi,%rax; mov %rax, %cr4 */ |
---|
| 391 | + if (insn[i] == 0x48 && insn[i+1] == 0x89 && |
---|
| 392 | + insn[i+2] == 0xf8 && insn[i+3] == 0x0f && |
---|
| 393 | + insn[i+4] == 0x22 && insn[i+5] == 0xe0) |
---|
| 394 | + break; |
---|
| 395 | + } |
---|
| 396 | + if (i >= MOV_CR4_DEPTH) { |
---|
| 397 | + pr_info("ok: cannot locate cr4 writing call gadget\n"); |
---|
| 398 | + return; |
---|
| 399 | + } |
---|
| 400 | + direct_write_cr4 = (void *)(insn + i); |
---|
| 401 | + |
---|
| 402 | + pr_info("trying to clear SMEP with call gadget\n"); |
---|
| 403 | + direct_write_cr4(cr4); |
---|
| 404 | + if (native_read_cr4() & X86_CR4_SMEP) { |
---|
| 405 | + pr_info("ok: SMEP removal was reverted\n"); |
---|
| 406 | + } else { |
---|
| 407 | + pr_err("FAIL: cleared SMEP not detected!\n"); |
---|
| 408 | + cr4 |= X86_CR4_SMEP; |
---|
| 409 | + pr_info("restoring SMEP\n"); |
---|
| 410 | + native_write_cr4(cr4); |
---|
| 411 | + } |
---|
| 412 | +#else |
---|
| 413 | + pr_err("XFAIL: this test is x86_64-only\n"); |
---|
| 414 | +#endif |
---|
| 415 | +} |
---|
| 416 | + |
---|
| 417 | +void lkdtm_DOUBLE_FAULT(void) |
---|
| 418 | +{ |
---|
| 419 | +#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) |
---|
| 420 | + /* |
---|
| 421 | + * Trigger #DF by setting the stack limit to zero. This clobbers |
---|
| 422 | + * a GDT TLS slot, which is okay because the current task will die |
---|
| 423 | + * anyway due to the double fault. |
---|
| 424 | + */ |
---|
| 425 | + struct desc_struct d = { |
---|
| 426 | + .type = 3, /* expand-up, writable, accessed data */ |
---|
| 427 | + .p = 1, /* present */ |
---|
| 428 | + .d = 1, /* 32-bit */ |
---|
| 429 | + .g = 0, /* limit in bytes */ |
---|
| 430 | + .s = 1, /* not system */ |
---|
| 431 | + }; |
---|
| 432 | + |
---|
| 433 | + local_irq_disable(); |
---|
| 434 | + write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()), |
---|
| 435 | + GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S); |
---|
| 436 | + |
---|
| 437 | + /* |
---|
| 438 | + * Put our zero-limit segment in SS and then trigger a fault. The |
---|
| 439 | + * 4-byte access to (%esp) will fault with #SS, and the attempt to |
---|
| 440 | + * deliver the fault will recursively cause #SS and result in #DF. |
---|
| 441 | + * This whole process happens while NMIs and MCEs are blocked by the |
---|
| 442 | + * MOV SS window. This is nice because an NMI with an invalid SS |
---|
| 443 | + * would also double-fault, resulting in the NMI or MCE being lost. |
---|
| 444 | + */ |
---|
| 445 | + asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: |
---|
| 446 | + "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); |
---|
| 447 | + |
---|
| 448 | + pr_err("FAIL: tried to double fault but didn't die\n"); |
---|
| 449 | +#else |
---|
| 450 | + pr_err("XFAIL: this test is ia32-only\n"); |
---|
| 451 | +#endif |
---|
| 452 | +} |
---|
| 453 | + |
---|
| 454 | +#ifdef CONFIG_ARM64 |
---|
| 455 | +static noinline void change_pac_parameters(void) |
---|
| 456 | +{ |
---|
| 457 | + if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) { |
---|
| 458 | + /* Reset the keys of current task */ |
---|
| 459 | + ptrauth_thread_init_kernel(current); |
---|
| 460 | + ptrauth_thread_switch_kernel(current); |
---|
| 461 | + } |
---|
| 462 | +} |
---|
| 463 | +#endif |
---|
| 464 | + |
---|
| 465 | +noinline void lkdtm_CORRUPT_PAC(void) |
---|
| 466 | +{ |
---|
| 467 | +#ifdef CONFIG_ARM64 |
---|
| 468 | +#define CORRUPT_PAC_ITERATE 10 |
---|
| 469 | + int i; |
---|
| 470 | + |
---|
| 471 | + if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) |
---|
| 472 | + pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n"); |
---|
| 473 | + |
---|
| 474 | + if (!system_supports_address_auth()) { |
---|
| 475 | + pr_err("FAIL: CPU lacks pointer authentication feature\n"); |
---|
| 476 | + return; |
---|
| 477 | + } |
---|
| 478 | + |
---|
| 479 | + pr_info("changing PAC parameters to force function return failure...\n"); |
---|
| 480 | + /* |
---|
| 481 | + * PAC is a hash value computed from input keys, return address and |
---|
| 482 | + * stack pointer. As pac has fewer bits so there is a chance of |
---|
| 483 | + * collision, so iterate few times to reduce the collision probability. |
---|
| 484 | + */ |
---|
| 485 | + for (i = 0; i < CORRUPT_PAC_ITERATE; i++) |
---|
| 486 | + change_pac_parameters(); |
---|
| 487 | + |
---|
| 488 | + pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n"); |
---|
| 489 | +#else |
---|
| 490 | + pr_err("XFAIL: this test is arm64-only\n"); |
---|
| 491 | +#endif |
---|
257 | 492 | } |
---|