.. | .. |
---|
439 | 439 | phys_addr_t addr = page_start + i * PAGE_SIZE; |
---|
440 | 440 | pages[i] = pfn_to_page(addr >> PAGE_SHIFT); |
---|
441 | 441 | } |
---|
442 | | - vaddr = vmap(pages, page_count, VM_MAP, prot); |
---|
| 442 | + /* |
---|
| 443 | + * VM_IOREMAP used here to bypass this region during vread() |
---|
| 444 | + * and kmap_atomic() (i.e. kcore) to avoid __va() failures. |
---|
| 445 | + */ |
---|
| 446 | + vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); |
---|
443 | 447 | kfree(pages); |
---|
444 | 448 | |
---|
445 | 449 | /* |
---|
.. | .. |
---|
514 | 518 | sig ^= PERSISTENT_RAM_SIG; |
---|
515 | 519 | |
---|
516 | 520 | if (prz->buffer->sig == sig) { |
---|
517 | | - if (buffer_size(prz) == 0) { |
---|
| 521 | + if (buffer_size(prz) == 0 && buffer_start(prz) == 0) { |
---|
518 | 522 | pr_debug("found existing empty buffer\n"); |
---|
519 | 523 | return 0; |
---|
520 | 524 | } |
---|
.. | .. |
---|
587 | 591 | raw_spin_lock_init(&prz->buffer_lock); |
---|
588 | 592 | prz->flags = flags; |
---|
589 | 593 | prz->label = kstrdup(label, GFP_KERNEL); |
---|
| 594 | + if (!prz->label) |
---|
| 595 | + goto err; |
---|
590 | 596 | |
---|
591 | 597 | ret = persistent_ram_buffer_map(start, size, prz, memtype); |
---|
592 | 598 | if (ret) |
---|