.. | .. |
---|
38 | 38 | #include <linux/jiffies.h> |
---|
39 | 39 | #include <linux/random.h> |
---|
40 | 40 | #include <linux/sched.h> |
---|
| 41 | +#include <linux/bitops.h> |
---|
| 42 | +#include <linux/slab.h> |
---|
| 43 | +#include <linux/notifier.h> |
---|
41 | 44 | #include <asm/unaligned.h> |
---|
42 | 45 | |
---|
43 | 46 | /** |
---|
.. | .. |
---|
336 | 339 | }; |
---|
337 | 340 | |
---|
338 | 341 | static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; |
---|
| 342 | +DEFINE_PER_CPU(unsigned long, net_rand_noise); |
---|
| 343 | +EXPORT_PER_CPU_SYMBOL(net_rand_noise); |
---|
339 | 344 | |
---|
340 | 345 | /* |
---|
341 | 346 | * This is the core CPRNG function. As "pseudorandom", this is not used |
---|
.. | .. |
---|
359 | 364 | static inline u32 siprand_u32(struct siprand_state *s) |
---|
360 | 365 | { |
---|
361 | 366 | unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; |
---|
| 367 | + unsigned long n = raw_cpu_read(net_rand_noise); |
---|
362 | 368 | |
---|
| 369 | + v3 ^= n; |
---|
363 | 370 | PRND_SIPROUND(v0, v1, v2, v3); |
---|
364 | 371 | PRND_SIPROUND(v0, v1, v2, v3); |
---|
| 372 | + v0 ^= n; |
---|
365 | 373 | s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; |
---|
366 | 374 | return v1 + v3; |
---|
367 | 375 | } |
---|
.. | .. |
---|
544 | 552 | * To avoid worrying about whether it's safe to delay that interrupt |
---|
545 | 553 | * long enough to seed all CPUs, just schedule an immediate timer event. |
---|
546 | 554 | */ |
---|
547 | | -static void prandom_timer_start(struct random_ready_callback *unused) |
---|
| 555 | +static int prandom_timer_start(struct notifier_block *nb, |
---|
| 556 | + unsigned long action, void *data) |
---|
548 | 557 | { |
---|
549 | 558 | mod_timer(&seed_timer, jiffies); |
---|
| 559 | + return 0; |
---|
550 | 560 | } |
---|
| 561 | + |
---|
| 562 | +#ifdef CONFIG_RANDOM32_SELFTEST |
---|
| 563 | +/* Principle: True 32-bit random numbers will all have 16 differing bits on |
---|
| 564 | + * average. For each 32-bit number, there are 601M numbers differing by 16 |
---|
| 565 | + * bits, and 89% of the numbers differ by at least 12 bits. Note that more |
---|
| 566 | + * than 16 differing bits also implies a correlation with inverted bits. Thus |
---|
| 567 | + * we take 1024 random numbers and compare each of them to the other ones, |
---|
| 568 | + * counting the deviation of correlated bits to 16. Constants report 32, |
---|
| 569 | + * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the |
---|
| 570 | + * u32 total, TEST_SIZE may be as large as 4096 samples. |
---|
| 571 | + */ |
---|
| 572 | +#define TEST_SIZE 1024 |
---|
| 573 | +static int __init prandom32_state_selftest(void) |
---|
| 574 | +{ |
---|
| 575 | + unsigned int x, y, bits, samples; |
---|
| 576 | + u32 xor, flip; |
---|
| 577 | + u32 total; |
---|
| 578 | + u32 *data; |
---|
| 579 | + |
---|
| 580 | + data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL); |
---|
| 581 | + if (!data) |
---|
| 582 | + return 0; |
---|
| 583 | + |
---|
| 584 | + for (samples = 0; samples < TEST_SIZE; samples++) |
---|
| 585 | + data[samples] = prandom_u32(); |
---|
| 586 | + |
---|
| 587 | + flip = total = 0; |
---|
| 588 | + for (x = 0; x < samples; x++) { |
---|
| 589 | + for (y = 0; y < samples; y++) { |
---|
| 590 | + if (x == y) |
---|
| 591 | + continue; |
---|
| 592 | + xor = data[x] ^ data[y]; |
---|
| 593 | + flip |= xor; |
---|
| 594 | + bits = hweight32(xor); |
---|
| 595 | + total += (bits - 16) * (bits - 16); |
---|
| 596 | + } |
---|
| 597 | + } |
---|
| 598 | + |
---|
| 599 | + /* We'll return the average deviation as 2*sqrt(corr/samples), which |
---|
| 600 | + * is also sqrt(4*corr/samples) which provides a better resolution. |
---|
| 601 | + */ |
---|
| 602 | + bits = int_sqrt(total / (samples * (samples - 1)) * 4); |
---|
| 603 | + if (bits > 6) |
---|
| 604 | + pr_warn("prandom32: self test failed (at least %u bits" |
---|
| 605 | + " correlated, fixed_mask=%#x fixed_value=%#x\n", |
---|
| 606 | + bits, ~flip, data[0] & ~flip); |
---|
| 607 | + else |
---|
| 608 | + pr_info("prandom32: self test passed (less than %u bits" |
---|
| 609 | + " correlated)\n", |
---|
| 610 | + bits+1); |
---|
| 611 | + kfree(data); |
---|
| 612 | + return 0; |
---|
| 613 | +} |
---|
| 614 | +core_initcall(prandom32_state_selftest); |
---|
| 615 | +#endif /* CONFIG_RANDOM32_SELFTEST */ |
---|
551 | 616 | |
---|
552 | 617 | /* |
---|
553 | 618 | * Start periodic full reseeding as soon as strong |
---|
.. | .. |
---|
555 | 620 | */ |
---|
556 | 621 | static int __init prandom_init_late(void) |
---|
557 | 622 | { |
---|
558 | | - static struct random_ready_callback random_ready = { |
---|
559 | | - .func = prandom_timer_start |
---|
| 623 | + static struct notifier_block random_ready = { |
---|
| 624 | + .notifier_call = prandom_timer_start |
---|
560 | 625 | }; |
---|
561 | | - int ret = add_random_ready_callback(&random_ready); |
---|
| 626 | + int ret = register_random_ready_notifier(&random_ready); |
---|
562 | 627 | |
---|
563 | 628 | if (ret == -EALREADY) { |
---|
564 | | - prandom_timer_start(&random_ready); |
---|
| 629 | + prandom_timer_start(&random_ready, 0, NULL); |
---|
565 | 630 | ret = 0; |
---|
566 | 631 | } |
---|
567 | 632 | return ret; |
---|