| .. | .. |
|---|
| 255 | 255 | } |
|---|
| 256 | 256 | |
|---|
| 257 | 257 | static bool |
|---|
| 258 | | -nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes) |
|---|
| 258 | +nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) |
|---|
| 259 | 259 | { |
|---|
| 260 | 260 | const struct nvkm_vmm_desc *desc = it->desc; |
|---|
| 261 | 261 | const int type = desc->type == SPT; |
|---|
| 262 | 262 | struct nvkm_vmm_pt *pgt = it->pt[0]; |
|---|
| 263 | + bool dma; |
|---|
| 264 | + |
|---|
| 265 | + if (pfn) { |
|---|
| 266 | + /* Need to clear PTE valid bits before we dma_unmap_page(). */ |
|---|
| 267 | + dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes); |
|---|
| 268 | + if (dma) { |
|---|
| 269 | + /* GPU may have cached the PT, flush before unmap. */ |
|---|
| 270 | + nvkm_vmm_flush_mark(it); |
|---|
| 271 | + nvkm_vmm_flush(it); |
|---|
| 272 | + desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes); |
|---|
| 273 | + } |
|---|
| 274 | + } |
|---|
| 263 | 275 | |
|---|
| 264 | 276 | /* Drop PTE references. */ |
|---|
| 265 | 277 | pgt->refs[type] -= ptes; |
|---|
| .. | .. |
|---|
| 349 | 361 | } |
|---|
| 350 | 362 | |
|---|
| 351 | 363 | static bool |
|---|
| 352 | | -nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes) |
|---|
| 364 | +nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) |
|---|
| 353 | 365 | { |
|---|
| 354 | 366 | const struct nvkm_vmm_desc *desc = it->desc; |
|---|
| 355 | 367 | const int type = desc->type == SPT; |
|---|
| .. | .. |
|---|
| 379 | 391 | } |
|---|
| 380 | 392 | |
|---|
| 381 | 393 | static bool |
|---|
| 382 | | -nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes) |
|---|
| 394 | +nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) |
|---|
| 383 | 395 | { |
|---|
| 384 | 396 | struct nvkm_vmm_pt *pt = it->pt[0]; |
|---|
| 385 | 397 | if (it->desc->type == PGD) |
|---|
| .. | .. |
|---|
| 387 | 399 | else |
|---|
| 388 | 400 | if (it->desc->type == LPT) |
|---|
| 389 | 401 | memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes); |
|---|
| 390 | | - return nvkm_vmm_unref_ptes(it, ptei, ptes); |
|---|
| 402 | + return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes); |
|---|
| 391 | 403 | } |
|---|
| 392 | 404 | |
|---|
| 393 | 405 | static bool |
|---|
| 394 | | -nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes) |
|---|
| 406 | +nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) |
|---|
| 395 | 407 | { |
|---|
| 396 | 408 | nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes); |
|---|
| 397 | | - return nvkm_vmm_ref_ptes(it, ptei, ptes); |
|---|
| 409 | + return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes); |
|---|
| 398 | 410 | } |
|---|
| 399 | 411 | |
|---|
| 400 | 412 | static bool |
|---|
| .. | .. |
|---|
| 487 | 499 | |
|---|
| 488 | 500 | static inline u64 |
|---|
| 489 | 501 | nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, |
|---|
| 490 | | - u64 addr, u64 size, const char *name, bool ref, |
|---|
| 491 | | - bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32), |
|---|
| 502 | + u64 addr, u64 size, const char *name, bool ref, bool pfn, |
|---|
| 503 | + bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32), |
|---|
| 492 | 504 | nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map, |
|---|
| 493 | 505 | nvkm_vmm_pxe_func CLR_PTES) |
|---|
| 494 | 506 | { |
|---|
| .. | .. |
|---|
| 548 | 560 | } |
|---|
| 549 | 561 | |
|---|
| 550 | 562 | /* Handle PTE updates. */ |
|---|
| 551 | | - if (!REF_PTES || REF_PTES(&it, ptei, ptes)) { |
|---|
| 563 | + if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) { |
|---|
| 552 | 564 | struct nvkm_mmu_pt *pt = pgt->pt[type]; |
|---|
| 553 | 565 | if (MAP_PTES || CLR_PTES) { |
|---|
| 554 | 566 | if (MAP_PTES) |
|---|
| .. | .. |
|---|
| 568 | 580 | it.pte[it.lvl]++; |
|---|
| 569 | 581 | } |
|---|
| 570 | 582 | } |
|---|
| 571 | | - }; |
|---|
| 583 | + } |
|---|
| 572 | 584 | |
|---|
| 573 | 585 | nvkm_vmm_flush(&it); |
|---|
| 574 | 586 | return ~0ULL; |
|---|
| .. | .. |
|---|
| 590 | 602 | nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, |
|---|
| 591 | 603 | u64 addr, u64 size) |
|---|
| 592 | 604 | { |
|---|
| 593 | | - nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, |
|---|
| 605 | + nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false, |
|---|
| 594 | 606 | nvkm_vmm_sparse_unref_ptes, NULL, NULL, |
|---|
| 595 | 607 | page->desc->func->invalid ? |
|---|
| 596 | 608 | page->desc->func->invalid : page->desc->func->unmap); |
|---|
| .. | .. |
|---|
| 602 | 614 | { |
|---|
| 603 | 615 | if ((page->type & NVKM_VMM_PAGE_SPARSE)) { |
|---|
| 604 | 616 | u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref", |
|---|
| 605 | | - true, nvkm_vmm_sparse_ref_ptes, NULL, |
|---|
| 606 | | - NULL, page->desc->func->sparse); |
|---|
| 617 | + true, false, nvkm_vmm_sparse_ref_ptes, |
|---|
| 618 | + NULL, NULL, page->desc->func->sparse); |
|---|
| 607 | 619 | if (fail != ~0ULL) { |
|---|
| 608 | 620 | if ((size = fail - addr)) |
|---|
| 609 | 621 | nvkm_vmm_ptes_sparse_put(vmm, page, addr, size); |
|---|
| .. | .. |
|---|
| 666 | 678 | |
|---|
| 667 | 679 | static void |
|---|
| 668 | 680 | nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, |
|---|
| 669 | | - u64 addr, u64 size, bool sparse) |
|---|
| 681 | + u64 addr, u64 size, bool sparse, bool pfn) |
|---|
| 670 | 682 | { |
|---|
| 671 | 683 | const struct nvkm_vmm_desc_func *func = page->desc->func; |
|---|
| 672 | 684 | nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref", |
|---|
| 673 | | - false, nvkm_vmm_unref_ptes, NULL, NULL, |
|---|
| 685 | + false, pfn, nvkm_vmm_unref_ptes, NULL, NULL, |
|---|
| 674 | 686 | sparse ? func->sparse : func->invalid ? func->invalid : |
|---|
| 675 | 687 | func->unmap); |
|---|
| 676 | 688 | } |
|---|
| .. | .. |
|---|
| 681 | 693 | nvkm_vmm_pte_func func) |
|---|
| 682 | 694 | { |
|---|
| 683 | 695 | u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true, |
|---|
| 684 | | - nvkm_vmm_ref_ptes, func, map, NULL); |
|---|
| 696 | + false, nvkm_vmm_ref_ptes, func, map, NULL); |
|---|
| 685 | 697 | if (fail != ~0ULL) { |
|---|
| 686 | 698 | if ((size = fail - addr)) |
|---|
| 687 | | - nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false); |
|---|
| 699 | + nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false); |
|---|
| 688 | 700 | return -ENOMEM; |
|---|
| 689 | 701 | } |
|---|
| 690 | 702 | return 0; |
|---|
| .. | .. |
|---|
| 692 | 704 | |
|---|
| 693 | 705 | static void |
|---|
| 694 | 706 | nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, |
|---|
| 695 | | - u64 addr, u64 size, bool sparse) |
|---|
| 707 | + u64 addr, u64 size, bool sparse, bool pfn) |
|---|
| 696 | 708 | { |
|---|
| 697 | 709 | const struct nvkm_vmm_desc_func *func = page->desc->func; |
|---|
| 698 | | - nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL, |
|---|
| 710 | + nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn, |
|---|
| 711 | + NULL, NULL, NULL, |
|---|
| 699 | 712 | sparse ? func->sparse : func->invalid ? func->invalid : |
|---|
| 700 | 713 | func->unmap); |
|---|
| 701 | 714 | } |
|---|
| .. | .. |
|---|
| 705 | 718 | u64 addr, u64 size, struct nvkm_vmm_map *map, |
|---|
| 706 | 719 | nvkm_vmm_pte_func func) |
|---|
| 707 | 720 | { |
|---|
| 708 | | - nvkm_vmm_iter(vmm, page, addr, size, "map", false, |
|---|
| 721 | + nvkm_vmm_iter(vmm, page, addr, size, "map", false, false, |
|---|
| 709 | 722 | NULL, func, map, NULL); |
|---|
| 710 | 723 | } |
|---|
| 711 | 724 | |
|---|
| .. | .. |
|---|
| 713 | 726 | nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, |
|---|
| 714 | 727 | u64 addr, u64 size) |
|---|
| 715 | 728 | { |
|---|
| 716 | | - nvkm_vmm_iter(vmm, page, addr, size, "unref", false, |
|---|
| 729 | + nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false, |
|---|
| 717 | 730 | nvkm_vmm_unref_ptes, NULL, NULL, NULL); |
|---|
| 718 | 731 | } |
|---|
| 719 | 732 | |
|---|
| .. | .. |
|---|
| 721 | 734 | nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, |
|---|
| 722 | 735 | u64 addr, u64 size) |
|---|
| 723 | 736 | { |
|---|
| 724 | | - u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, |
|---|
| 737 | + u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false, |
|---|
| 725 | 738 | nvkm_vmm_ref_ptes, NULL, NULL, NULL); |
|---|
| 726 | 739 | if (fail != ~0ULL) { |
|---|
| 727 | 740 | if (fail != addr) |
|---|
| .. | .. |
|---|
| 763 | 776 | new->part = vma->part; |
|---|
| 764 | 777 | new->user = vma->user; |
|---|
| 765 | 778 | new->busy = vma->busy; |
|---|
| 779 | + new->mapped = vma->mapped; |
|---|
| 766 | 780 | list_add(&new->head, &vma->head); |
|---|
| 767 | 781 | return new; |
|---|
| 782 | +} |
|---|
| 783 | + |
|---|
| 784 | +static inline void |
|---|
| 785 | +nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 786 | +{ |
|---|
| 787 | + rb_erase(&vma->tree, &vmm->free); |
|---|
| 788 | +} |
|---|
| 789 | + |
|---|
| 790 | +static inline void |
|---|
| 791 | +nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 792 | +{ |
|---|
| 793 | + nvkm_vmm_free_remove(vmm, vma); |
|---|
| 794 | + list_del(&vma->head); |
|---|
| 795 | + kfree(vma); |
|---|
| 768 | 796 | } |
|---|
| 769 | 797 | |
|---|
| 770 | 798 | static void |
|---|
| .. | .. |
|---|
| 795 | 823 | rb_insert_color(&vma->tree, &vmm->free); |
|---|
| 796 | 824 | } |
|---|
| 797 | 825 | |
|---|
| 798 | | -void |
|---|
| 826 | +static inline void |
|---|
| 827 | +nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 828 | +{ |
|---|
| 829 | + rb_erase(&vma->tree, &vmm->root); |
|---|
| 830 | +} |
|---|
| 831 | + |
|---|
| 832 | +static inline void |
|---|
| 833 | +nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 834 | +{ |
|---|
| 835 | + nvkm_vmm_node_remove(vmm, vma); |
|---|
| 836 | + list_del(&vma->head); |
|---|
| 837 | + kfree(vma); |
|---|
| 838 | +} |
|---|
| 839 | + |
|---|
| 840 | +static void |
|---|
| 799 | 841 | nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 800 | 842 | { |
|---|
| 801 | 843 | struct rb_node **ptr = &vmm->root.rb_node; |
|---|
| .. | .. |
|---|
| 834 | 876 | return NULL; |
|---|
| 835 | 877 | } |
|---|
| 836 | 878 | |
|---|
| 879 | +#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \ |
|---|
| 880 | + list_entry((root)->head.dir, struct nvkm_vma, head)) |
|---|
| 881 | + |
|---|
| 882 | +static struct nvkm_vma * |
|---|
| 883 | +nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev, |
|---|
| 884 | + struct nvkm_vma *vma, struct nvkm_vma *next, u64 size) |
|---|
| 885 | +{ |
|---|
| 886 | + if (next) { |
|---|
| 887 | + if (vma->size == size) { |
|---|
| 888 | + vma->size += next->size; |
|---|
| 889 | + nvkm_vmm_node_delete(vmm, next); |
|---|
| 890 | + if (prev) { |
|---|
| 891 | + prev->size += vma->size; |
|---|
| 892 | + nvkm_vmm_node_delete(vmm, vma); |
|---|
| 893 | + return prev; |
|---|
| 894 | + } |
|---|
| 895 | + return vma; |
|---|
| 896 | + } |
|---|
| 897 | + BUG_ON(prev); |
|---|
| 898 | + |
|---|
| 899 | + nvkm_vmm_node_remove(vmm, next); |
|---|
| 900 | + vma->size -= size; |
|---|
| 901 | + next->addr -= size; |
|---|
| 902 | + next->size += size; |
|---|
| 903 | + nvkm_vmm_node_insert(vmm, next); |
|---|
| 904 | + return next; |
|---|
| 905 | + } |
|---|
| 906 | + |
|---|
| 907 | + if (prev) { |
|---|
| 908 | + if (vma->size != size) { |
|---|
| 909 | + nvkm_vmm_node_remove(vmm, vma); |
|---|
| 910 | + prev->size += size; |
|---|
| 911 | + vma->addr += size; |
|---|
| 912 | + vma->size -= size; |
|---|
| 913 | + nvkm_vmm_node_insert(vmm, vma); |
|---|
| 914 | + } else { |
|---|
| 915 | + prev->size += vma->size; |
|---|
| 916 | + nvkm_vmm_node_delete(vmm, vma); |
|---|
| 917 | + } |
|---|
| 918 | + return prev; |
|---|
| 919 | + } |
|---|
| 920 | + |
|---|
| 921 | + return vma; |
|---|
| 922 | +} |
|---|
| 923 | + |
|---|
| 924 | +struct nvkm_vma * |
|---|
| 925 | +nvkm_vmm_node_split(struct nvkm_vmm *vmm, |
|---|
| 926 | + struct nvkm_vma *vma, u64 addr, u64 size) |
|---|
| 927 | +{ |
|---|
| 928 | + struct nvkm_vma *prev = NULL; |
|---|
| 929 | + |
|---|
| 930 | + if (vma->addr != addr) { |
|---|
| 931 | + prev = vma; |
|---|
| 932 | + if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) |
|---|
| 933 | + return NULL; |
|---|
| 934 | + vma->part = true; |
|---|
| 935 | + nvkm_vmm_node_insert(vmm, vma); |
|---|
| 936 | + } |
|---|
| 937 | + |
|---|
| 938 | + if (vma->size != size) { |
|---|
| 939 | + struct nvkm_vma *tmp; |
|---|
| 940 | + if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { |
|---|
| 941 | + nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size); |
|---|
| 942 | + return NULL; |
|---|
| 943 | + } |
|---|
| 944 | + tmp->part = true; |
|---|
| 945 | + nvkm_vmm_node_insert(vmm, tmp); |
|---|
| 946 | + } |
|---|
| 947 | + |
|---|
| 948 | + return vma; |
|---|
| 949 | +} |
|---|
| 950 | + |
|---|
| 951 | +static void |
|---|
| 952 | +nvkm_vma_dump(struct nvkm_vma *vma) |
|---|
| 953 | +{ |
|---|
| 954 | + printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n", |
|---|
| 955 | + vma->addr, (u64)vma->size, |
|---|
| 956 | + vma->used ? '-' : 'F', |
|---|
| 957 | + vma->mapref ? 'R' : '-', |
|---|
| 958 | + vma->sparse ? 'S' : '-', |
|---|
| 959 | + vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', |
|---|
| 960 | + vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', |
|---|
| 961 | + vma->part ? 'P' : '-', |
|---|
| 962 | + vma->user ? 'U' : '-', |
|---|
| 963 | + vma->busy ? 'B' : '-', |
|---|
| 964 | + vma->mapped ? 'M' : '-', |
|---|
| 965 | + vma->memory); |
|---|
| 966 | +} |
|---|
| 967 | + |
|---|
| 968 | +static void |
|---|
| 969 | +nvkm_vmm_dump(struct nvkm_vmm *vmm) |
|---|
| 970 | +{ |
|---|
| 971 | + struct nvkm_vma *vma; |
|---|
| 972 | + list_for_each_entry(vma, &vmm->list, head) { |
|---|
| 973 | + nvkm_vma_dump(vma); |
|---|
| 974 | + } |
|---|
| 975 | +} |
|---|
| 976 | + |
|---|
| 837 | 977 | static void |
|---|
| 838 | 978 | nvkm_vmm_dtor(struct nvkm_vmm *vmm) |
|---|
| 839 | 979 | { |
|---|
| 840 | 980 | struct nvkm_vma *vma; |
|---|
| 841 | 981 | struct rb_node *node; |
|---|
| 982 | + |
|---|
| 983 | + if (0) |
|---|
| 984 | + nvkm_vmm_dump(vmm); |
|---|
| 842 | 985 | |
|---|
| 843 | 986 | while ((node = rb_first(&vmm->root))) { |
|---|
| 844 | 987 | struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); |
|---|
| .. | .. |
|---|
| 872 | 1015 | } |
|---|
| 873 | 1016 | } |
|---|
| 874 | 1017 | |
|---|
| 875 | | -int |
|---|
| 1018 | +static int |
|---|
| 1019 | +nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size) |
|---|
| 1020 | +{ |
|---|
| 1021 | + struct nvkm_vma *vma; |
|---|
| 1022 | + if (!(vma = nvkm_vma_new(addr, size))) |
|---|
| 1023 | + return -ENOMEM; |
|---|
| 1024 | + vma->mapref = true; |
|---|
| 1025 | + vma->sparse = false; |
|---|
| 1026 | + vma->used = true; |
|---|
| 1027 | + vma->user = true; |
|---|
| 1028 | + nvkm_vmm_node_insert(vmm, vma); |
|---|
| 1029 | + list_add_tail(&vma->head, &vmm->list); |
|---|
| 1030 | + return 0; |
|---|
| 1031 | +} |
|---|
| 1032 | + |
|---|
| 1033 | +static int |
|---|
| 876 | 1034 | nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, |
|---|
| 877 | | - u32 pd_header, u64 addr, u64 size, struct lock_class_key *key, |
|---|
| 878 | | - const char *name, struct nvkm_vmm *vmm) |
|---|
| 1035 | + u32 pd_header, bool managed, u64 addr, u64 size, |
|---|
| 1036 | + struct lock_class_key *key, const char *name, |
|---|
| 1037 | + struct nvkm_vmm *vmm) |
|---|
| 879 | 1038 | { |
|---|
| 880 | 1039 | static struct lock_class_key _key; |
|---|
| 881 | 1040 | const struct nvkm_vmm_page *page = func->page; |
|---|
| 882 | 1041 | const struct nvkm_vmm_desc *desc; |
|---|
| 883 | 1042 | struct nvkm_vma *vma; |
|---|
| 884 | | - int levels, bits = 0; |
|---|
| 1043 | + int levels, bits = 0, ret; |
|---|
| 885 | 1044 | |
|---|
| 886 | 1045 | vmm->func = func; |
|---|
| 887 | 1046 | vmm->mmu = mmu; |
|---|
| .. | .. |
|---|
| 909 | 1068 | if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX)) |
|---|
| 910 | 1069 | return -EINVAL; |
|---|
| 911 | 1070 | |
|---|
| 912 | | - vmm->start = addr; |
|---|
| 913 | | - vmm->limit = size ? (addr + size) : (1ULL << bits); |
|---|
| 914 | | - if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits)) |
|---|
| 915 | | - return -EINVAL; |
|---|
| 916 | | - |
|---|
| 917 | 1071 | /* Allocate top-level page table. */ |
|---|
| 918 | 1072 | vmm->pd = nvkm_vmm_pt_new(desc, false, NULL); |
|---|
| 919 | 1073 | if (!vmm->pd) |
|---|
| .. | .. |
|---|
| 936 | 1090 | vmm->free = RB_ROOT; |
|---|
| 937 | 1091 | vmm->root = RB_ROOT; |
|---|
| 938 | 1092 | |
|---|
| 939 | | - if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) |
|---|
| 940 | | - return -ENOMEM; |
|---|
| 1093 | + if (managed) { |
|---|
| 1094 | + /* Address-space will be managed by the client for the most |
|---|
| 1095 | + * part, except for a specified area where NVKM allocations |
|---|
| 1096 | + * are allowed to be placed. |
|---|
| 1097 | + */ |
|---|
| 1098 | + vmm->start = 0; |
|---|
| 1099 | + vmm->limit = 1ULL << bits; |
|---|
| 1100 | + if (addr + size < addr || addr + size > vmm->limit) |
|---|
| 1101 | + return -EINVAL; |
|---|
| 941 | 1102 | |
|---|
| 942 | | - nvkm_vmm_free_insert(vmm, vma); |
|---|
| 943 | | - list_add(&vma->head, &vmm->list); |
|---|
| 1103 | + /* Client-managed area before the NVKM-managed area. */ |
|---|
| 1104 | + if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr))) |
|---|
| 1105 | + return ret; |
|---|
| 1106 | + |
|---|
| 1107 | + /* NVKM-managed area. */ |
|---|
| 1108 | + if (size) { |
|---|
| 1109 | + if (!(vma = nvkm_vma_new(addr, size))) |
|---|
| 1110 | + return -ENOMEM; |
|---|
| 1111 | + nvkm_vmm_free_insert(vmm, vma); |
|---|
| 1112 | + list_add_tail(&vma->head, &vmm->list); |
|---|
| 1113 | + } |
|---|
| 1114 | + |
|---|
| 1115 | + /* Client-managed area after the NVKM-managed area. */ |
|---|
| 1116 | + addr = addr + size; |
|---|
| 1117 | + size = vmm->limit - addr; |
|---|
| 1118 | + if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size))) |
|---|
| 1119 | + return ret; |
|---|
| 1120 | + } else { |
|---|
| 1121 | + /* Address-space fully managed by NVKM, requiring calls to |
|---|
| 1122 | + * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space. |
|---|
| 1123 | + */ |
|---|
| 1124 | + vmm->start = addr; |
|---|
| 1125 | + vmm->limit = size ? (addr + size) : (1ULL << bits); |
|---|
| 1126 | + if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits)) |
|---|
| 1127 | + return -EINVAL; |
|---|
| 1128 | + |
|---|
| 1129 | + if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) |
|---|
| 1130 | + return -ENOMEM; |
|---|
| 1131 | + |
|---|
| 1132 | + nvkm_vmm_free_insert(vmm, vma); |
|---|
| 1133 | + list_add(&vma->head, &vmm->list); |
|---|
| 1134 | + } |
|---|
| 1135 | + |
|---|
| 944 | 1136 | return 0; |
|---|
| 945 | 1137 | } |
|---|
| 946 | 1138 | |
|---|
| 947 | 1139 | int |
|---|
| 948 | 1140 | nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, |
|---|
| 949 | | - u32 hdr, u64 addr, u64 size, struct lock_class_key *key, |
|---|
| 950 | | - const char *name, struct nvkm_vmm **pvmm) |
|---|
| 1141 | + u32 hdr, bool managed, u64 addr, u64 size, |
|---|
| 1142 | + struct lock_class_key *key, const char *name, |
|---|
| 1143 | + struct nvkm_vmm **pvmm) |
|---|
| 951 | 1144 | { |
|---|
| 952 | 1145 | if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL))) |
|---|
| 953 | 1146 | return -ENOMEM; |
|---|
| 954 | | - return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm); |
|---|
| 1147 | + return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm); |
|---|
| 955 | 1148 | } |
|---|
| 956 | 1149 | |
|---|
| 957 | | -#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \ |
|---|
| 958 | | - list_entry((root)->head.dir, struct nvkm_vma, head) |
|---|
| 1150 | +static struct nvkm_vma * |
|---|
| 1151 | +nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, |
|---|
| 1152 | + u64 addr, u64 size, u8 page, bool map) |
|---|
| 1153 | +{ |
|---|
| 1154 | + struct nvkm_vma *prev = NULL; |
|---|
| 1155 | + struct nvkm_vma *next = NULL; |
|---|
| 1156 | + |
|---|
| 1157 | + if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { |
|---|
| 1158 | + if (prev->memory || prev->mapped != map) |
|---|
| 1159 | + prev = NULL; |
|---|
| 1160 | + } |
|---|
| 1161 | + |
|---|
| 1162 | + if (vma->addr + vma->size == addr + size && (next = node(vma, next))) { |
|---|
| 1163 | + if (!next->part || |
|---|
| 1164 | + next->memory || next->mapped != map) |
|---|
| 1165 | + next = NULL; |
|---|
| 1166 | + } |
|---|
| 1167 | + |
|---|
| 1168 | + if (prev || next) |
|---|
| 1169 | + return nvkm_vmm_node_merge(vmm, prev, vma, next, size); |
|---|
| 1170 | + return nvkm_vmm_node_split(vmm, vma, addr, size); |
|---|
| 1171 | +} |
|---|
| 1172 | + |
|---|
| 1173 | +int |
|---|
| 1174 | +nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size) |
|---|
| 1175 | +{ |
|---|
| 1176 | + struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); |
|---|
| 1177 | + struct nvkm_vma *next; |
|---|
| 1178 | + u64 limit = addr + size; |
|---|
| 1179 | + u64 start = addr; |
|---|
| 1180 | + |
|---|
| 1181 | + if (!vma) |
|---|
| 1182 | + return -EINVAL; |
|---|
| 1183 | + |
|---|
| 1184 | + do { |
|---|
| 1185 | + if (!vma->mapped || vma->memory) |
|---|
| 1186 | + continue; |
|---|
| 1187 | + |
|---|
| 1188 | + size = min(limit - start, vma->size - (start - vma->addr)); |
|---|
| 1189 | + |
|---|
| 1190 | + nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd], |
|---|
| 1191 | + start, size, false, true); |
|---|
| 1192 | + |
|---|
| 1193 | + next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false); |
|---|
| 1194 | + if (!WARN_ON(!next)) { |
|---|
| 1195 | + vma = next; |
|---|
| 1196 | + vma->refd = NVKM_VMA_PAGE_NONE; |
|---|
| 1197 | + vma->mapped = false; |
|---|
| 1198 | + } |
|---|
| 1199 | + } while ((vma = node(vma, next)) && (start = vma->addr) < limit); |
|---|
| 1200 | + |
|---|
| 1201 | + return 0; |
|---|
| 1202 | +} |
|---|
| 1203 | + |
|---|
| 1204 | +/*TODO: |
|---|
| 1205 | + * - Avoid PT readback (for dma_unmap etc), this might end up being dealt |
|---|
| 1206 | + * with inside HMM, which would be a lot nicer for us to deal with. |
|---|
| 1207 | + * - Support for systems without a 4KiB page size. |
|---|
| 1208 | + */ |
|---|
| 1209 | +int |
|---|
| 1210 | +nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn) |
|---|
| 1211 | +{ |
|---|
| 1212 | + const struct nvkm_vmm_page *page = vmm->func->page; |
|---|
| 1213 | + struct nvkm_vma *vma, *tmp; |
|---|
| 1214 | + u64 limit = addr + size; |
|---|
| 1215 | + u64 start = addr; |
|---|
| 1216 | + int pm = size >> shift; |
|---|
| 1217 | + int pi = 0; |
|---|
| 1218 | + |
|---|
| 1219 | + /* Only support mapping where the page size of the incoming page |
|---|
| 1220 | + * array matches a page size available for direct mapping. |
|---|
| 1221 | + */ |
|---|
| 1222 | + while (page->shift && (page->shift != shift || |
|---|
| 1223 | + page->desc->func->pfn == NULL)) |
|---|
| 1224 | + page++; |
|---|
| 1225 | + |
|---|
| 1226 | + if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) || |
|---|
| 1227 | + !IS_ALIGNED(size, 1ULL << shift) || |
|---|
| 1228 | + addr + size < addr || addr + size > vmm->limit) { |
|---|
| 1229 | + VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n", |
|---|
| 1230 | + shift, page->shift, addr, size); |
|---|
| 1231 | + return -EINVAL; |
|---|
| 1232 | + } |
|---|
| 1233 | + |
|---|
| 1234 | + if (!(vma = nvkm_vmm_node_search(vmm, addr))) |
|---|
| 1235 | + return -ENOENT; |
|---|
| 1236 | + |
|---|
| 1237 | + do { |
|---|
| 1238 | + bool map = !!(pfn[pi] & NVKM_VMM_PFN_V); |
|---|
| 1239 | + bool mapped = vma->mapped; |
|---|
| 1240 | + u64 size = limit - start; |
|---|
| 1241 | + u64 addr = start; |
|---|
| 1242 | + int pn, ret = 0; |
|---|
| 1243 | + |
|---|
| 1244 | + /* Narrow the operation window to cover a single action (page |
|---|
| 1245 | + * should be mapped or not) within a single VMA. |
|---|
| 1246 | + */ |
|---|
| 1247 | + for (pn = 0; pi + pn < pm; pn++) { |
|---|
| 1248 | + if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V)) |
|---|
| 1249 | + break; |
|---|
| 1250 | + } |
|---|
| 1251 | + size = min_t(u64, size, pn << page->shift); |
|---|
| 1252 | + size = min_t(u64, size, vma->size + vma->addr - addr); |
|---|
| 1253 | + |
|---|
| 1254 | + /* Reject any operation to unmanaged regions, and areas that |
|---|
| 1255 | + * have nvkm_memory objects mapped in them already. |
|---|
| 1256 | + */ |
|---|
| 1257 | + if (!vma->mapref || vma->memory) { |
|---|
| 1258 | + ret = -EINVAL; |
|---|
| 1259 | + goto next; |
|---|
| 1260 | + } |
|---|
| 1261 | + |
|---|
| 1262 | + /* In order to both properly refcount GPU page tables, and |
|---|
| 1263 | + * prevent "normal" mappings and these direct mappings from |
|---|
| 1264 | + * interfering with each other, we need to track contiguous |
|---|
| 1265 | + * ranges that have been mapped with this interface. |
|---|
| 1266 | + * |
|---|
| 1267 | + * Here we attempt to either split an existing VMA so we're |
|---|
| 1268 | + * able to flag the region as either unmapped/mapped, or to |
|---|
| 1269 | + * merge with adjacent VMAs that are already compatible. |
|---|
| 1270 | + * |
|---|
| 1271 | + * If the region is already compatible, nothing is required. |
|---|
| 1272 | + */ |
|---|
| 1273 | + if (map != mapped) { |
|---|
| 1274 | + tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size, |
|---|
| 1275 | + page - |
|---|
| 1276 | + vmm->func->page, map); |
|---|
| 1277 | + if (WARN_ON(!tmp)) { |
|---|
| 1278 | + ret = -ENOMEM; |
|---|
| 1279 | + goto next; |
|---|
| 1280 | + } |
|---|
| 1281 | + |
|---|
| 1282 | + if ((tmp->mapped = map)) |
|---|
| 1283 | + tmp->refd = page - vmm->func->page; |
|---|
| 1284 | + else |
|---|
| 1285 | + tmp->refd = NVKM_VMA_PAGE_NONE; |
|---|
| 1286 | + vma = tmp; |
|---|
| 1287 | + } |
|---|
| 1288 | + |
|---|
| 1289 | + /* Update HW page tables. */ |
|---|
| 1290 | + if (map) { |
|---|
| 1291 | + struct nvkm_vmm_map args; |
|---|
| 1292 | + args.page = page; |
|---|
| 1293 | + args.pfn = &pfn[pi]; |
|---|
| 1294 | + |
|---|
| 1295 | + if (!mapped) { |
|---|
| 1296 | + ret = nvkm_vmm_ptes_get_map(vmm, page, addr, |
|---|
| 1297 | + size, &args, page-> |
|---|
| 1298 | + desc->func->pfn); |
|---|
| 1299 | + } else { |
|---|
| 1300 | + nvkm_vmm_ptes_map(vmm, page, addr, size, &args, |
|---|
| 1301 | + page->desc->func->pfn); |
|---|
| 1302 | + } |
|---|
| 1303 | + } else { |
|---|
| 1304 | + if (mapped) { |
|---|
| 1305 | + nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, |
|---|
| 1306 | + false, true); |
|---|
| 1307 | + } |
|---|
| 1308 | + } |
|---|
| 1309 | + |
|---|
| 1310 | +next: |
|---|
| 1311 | + /* Iterate to next operation. */ |
|---|
| 1312 | + if (vma->addr + vma->size == addr + size) |
|---|
| 1313 | + vma = node(vma, next); |
|---|
| 1314 | + start += size; |
|---|
| 1315 | + |
|---|
| 1316 | + if (ret) { |
|---|
| 1317 | + /* Failure is signalled by clearing the valid bit on |
|---|
| 1318 | + * any PFN that couldn't be modified as requested. |
|---|
| 1319 | + */ |
|---|
| 1320 | + while (size) { |
|---|
| 1321 | + pfn[pi++] = NVKM_VMM_PFN_NONE; |
|---|
| 1322 | + size -= 1 << page->shift; |
|---|
| 1323 | + } |
|---|
| 1324 | + } else { |
|---|
| 1325 | + pi += size >> page->shift; |
|---|
| 1326 | + } |
|---|
| 1327 | + } while (vma && start < limit); |
|---|
| 1328 | + |
|---|
| 1329 | + return 0; |
|---|
| 1330 | +} |
|---|
| 959 | 1331 | |
|---|
| 960 | 1332 | void |
|---|
| 961 | 1333 | nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 962 | 1334 | { |
|---|
| 1335 | + struct nvkm_vma *prev = NULL; |
|---|
| 963 | 1336 | struct nvkm_vma *next; |
|---|
| 964 | 1337 | |
|---|
| 965 | 1338 | nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); |
|---|
| 966 | 1339 | nvkm_memory_unref(&vma->memory); |
|---|
| 1340 | + vma->mapped = false; |
|---|
| 967 | 1341 | |
|---|
| 968 | | - if (vma->part) { |
|---|
| 969 | | - struct nvkm_vma *prev = node(vma, prev); |
|---|
| 970 | | - if (!prev->memory) { |
|---|
| 971 | | - prev->size += vma->size; |
|---|
| 972 | | - rb_erase(&vma->tree, &vmm->root); |
|---|
| 973 | | - list_del(&vma->head); |
|---|
| 974 | | - kfree(vma); |
|---|
| 975 | | - vma = prev; |
|---|
| 976 | | - } |
|---|
| 977 | | - } |
|---|
| 978 | | - |
|---|
| 979 | | - next = node(vma, next); |
|---|
| 980 | | - if (next && next->part) { |
|---|
| 981 | | - if (!next->memory) { |
|---|
| 982 | | - vma->size += next->size; |
|---|
| 983 | | - rb_erase(&next->tree, &vmm->root); |
|---|
| 984 | | - list_del(&next->head); |
|---|
| 985 | | - kfree(next); |
|---|
| 986 | | - } |
|---|
| 987 | | - } |
|---|
| 1342 | + if (vma->part && (prev = node(vma, prev)) && prev->mapped) |
|---|
| 1343 | + prev = NULL; |
|---|
| 1344 | + if ((next = node(vma, next)) && (!next->part || next->mapped)) |
|---|
| 1345 | + next = NULL; |
|---|
| 1346 | + nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); |
|---|
| 988 | 1347 | } |
|---|
| 989 | 1348 | |
|---|
| 990 | 1349 | void |
|---|
| 991 | | -nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) |
|---|
| 1350 | +nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn) |
|---|
| 992 | 1351 | { |
|---|
| 993 | 1352 | const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; |
|---|
| 994 | 1353 | |
|---|
| 995 | 1354 | if (vma->mapref) { |
|---|
| 996 | | - nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse); |
|---|
| 1355 | + nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn); |
|---|
| 997 | 1356 | vma->refd = NVKM_VMA_PAGE_NONE; |
|---|
| 998 | 1357 | } else { |
|---|
| 999 | | - nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse); |
|---|
| 1358 | + nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn); |
|---|
| 1000 | 1359 | } |
|---|
| 1001 | 1360 | |
|---|
| 1002 | 1361 | nvkm_vmm_unmap_region(vmm, vma); |
|---|
| .. | .. |
|---|
| 1007 | 1366 | { |
|---|
| 1008 | 1367 | if (vma->memory) { |
|---|
| 1009 | 1368 | mutex_lock(&vmm->mutex); |
|---|
| 1010 | | - nvkm_vmm_unmap_locked(vmm, vma); |
|---|
| 1369 | + nvkm_vmm_unmap_locked(vmm, vma, false); |
|---|
| 1011 | 1370 | mutex_unlock(&vmm->mutex); |
|---|
| 1012 | 1371 | } |
|---|
| 1013 | 1372 | } |
|---|
| .. | .. |
|---|
| 1141 | 1500 | nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); |
|---|
| 1142 | 1501 | nvkm_memory_unref(&vma->memory); |
|---|
| 1143 | 1502 | vma->memory = nvkm_memory_ref(map->memory); |
|---|
| 1503 | + vma->mapped = true; |
|---|
| 1144 | 1504 | vma->tags = map->tags; |
|---|
| 1145 | 1505 | return 0; |
|---|
| 1146 | 1506 | } |
|---|
| .. | .. |
|---|
| 1163 | 1523 | struct nvkm_vma *prev, *next; |
|---|
| 1164 | 1524 | |
|---|
| 1165 | 1525 | if ((prev = node(vma, prev)) && !prev->used) { |
|---|
| 1166 | | - rb_erase(&prev->tree, &vmm->free); |
|---|
| 1167 | | - list_del(&prev->head); |
|---|
| 1168 | 1526 | vma->addr = prev->addr; |
|---|
| 1169 | 1527 | vma->size += prev->size; |
|---|
| 1170 | | - kfree(prev); |
|---|
| 1528 | + nvkm_vmm_free_delete(vmm, prev); |
|---|
| 1171 | 1529 | } |
|---|
| 1172 | 1530 | |
|---|
| 1173 | 1531 | if ((next = node(vma, next)) && !next->used) { |
|---|
| 1174 | | - rb_erase(&next->tree, &vmm->free); |
|---|
| 1175 | | - list_del(&next->head); |
|---|
| 1176 | 1532 | vma->size += next->size; |
|---|
| 1177 | | - kfree(next); |
|---|
| 1533 | + nvkm_vmm_free_delete(vmm, next); |
|---|
| 1178 | 1534 | } |
|---|
| 1179 | 1535 | |
|---|
| 1180 | 1536 | nvkm_vmm_free_insert(vmm, vma); |
|---|
| .. | .. |
|---|
| 1190 | 1546 | |
|---|
| 1191 | 1547 | if (vma->mapref || !vma->sparse) { |
|---|
| 1192 | 1548 | do { |
|---|
| 1193 | | - const bool map = next->memory != NULL; |
|---|
| 1549 | + const bool mem = next->memory != NULL; |
|---|
| 1550 | + const bool map = next->mapped; |
|---|
| 1194 | 1551 | const u8 refd = next->refd; |
|---|
| 1195 | 1552 | const u64 addr = next->addr; |
|---|
| 1196 | 1553 | u64 size = next->size; |
|---|
| 1197 | 1554 | |
|---|
| 1198 | 1555 | /* Merge regions that are in the same state. */ |
|---|
| 1199 | 1556 | while ((next = node(next, next)) && next->part && |
|---|
| 1200 | | - (next->memory != NULL) == map && |
|---|
| 1557 | + (next->mapped == map) && |
|---|
| 1558 | + (next->memory != NULL) == mem && |
|---|
| 1201 | 1559 | (next->refd == refd)) |
|---|
| 1202 | 1560 | size += next->size; |
|---|
| 1203 | 1561 | |
|---|
| .. | .. |
|---|
| 1207 | 1565 | * the page tree. |
|---|
| 1208 | 1566 | */ |
|---|
| 1209 | 1567 | nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr, |
|---|
| 1210 | | - size, vma->sparse); |
|---|
| 1568 | + size, vma->sparse, |
|---|
| 1569 | + !mem); |
|---|
| 1211 | 1570 | } else |
|---|
| 1212 | 1571 | if (refd != NVKM_VMA_PAGE_NONE) { |
|---|
| 1213 | 1572 | /* Drop allocation-time PTE references. */ |
|---|
| .. | .. |
|---|
| 1222 | 1581 | */ |
|---|
| 1223 | 1582 | next = vma; |
|---|
| 1224 | 1583 | do { |
|---|
| 1225 | | - if (next->memory) |
|---|
| 1584 | + if (next->mapped) |
|---|
| 1226 | 1585 | nvkm_vmm_unmap_region(vmm, next); |
|---|
| 1227 | 1586 | } while ((next = node(vma, next)) && next->part); |
|---|
| 1228 | 1587 | |
|---|
| .. | .. |
|---|
| 1250 | 1609 | } |
|---|
| 1251 | 1610 | |
|---|
| 1252 | 1611 | /* Remove VMA from the list of allocated nodes. */ |
|---|
| 1253 | | - rb_erase(&vma->tree, &vmm->root); |
|---|
| 1612 | + nvkm_vmm_node_remove(vmm, vma); |
|---|
| 1254 | 1613 | |
|---|
| 1255 | 1614 | /* Merge VMA back into the free list. */ |
|---|
| 1256 | 1615 | vma->page = NVKM_VMA_PAGE_NONE; |
|---|
| .. | .. |
|---|
| 1357 | 1716 | tail = ALIGN_DOWN(tail, vmm->func->page_block); |
|---|
| 1358 | 1717 | |
|---|
| 1359 | 1718 | if (addr <= tail && tail - addr >= size) { |
|---|
| 1360 | | - rb_erase(&this->tree, &vmm->free); |
|---|
| 1719 | + nvkm_vmm_free_remove(vmm, this); |
|---|
| 1361 | 1720 | vma = this; |
|---|
| 1362 | 1721 | break; |
|---|
| 1363 | 1722 | } |
|---|
| .. | .. |
|---|
| 1443 | 1802 | } |
|---|
| 1444 | 1803 | |
|---|
| 1445 | 1804 | static bool |
|---|
| 1446 | | -nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes) |
|---|
| 1805 | +nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) |
|---|
| 1447 | 1806 | { |
|---|
| 1448 | 1807 | const struct nvkm_vmm_desc *desc = it->desc; |
|---|
| 1449 | 1808 | const int type = desc->type == SPT; |
|---|
| .. | .. |
|---|
| 1465 | 1824 | if (ret) |
|---|
| 1466 | 1825 | return ret; |
|---|
| 1467 | 1826 | |
|---|
| 1468 | | - nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, |
|---|
| 1827 | + nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false, |
|---|
| 1469 | 1828 | nvkm_vmm_boot_ptes, NULL, NULL, NULL); |
|---|
| 1470 | 1829 | vmm->bootstrapped = true; |
|---|
| 1471 | 1830 | return 0; |
|---|
| .. | .. |
|---|
| 1505 | 1864 | struct nvkm_mmu *mmu = device->mmu; |
|---|
| 1506 | 1865 | struct nvkm_vmm *vmm = NULL; |
|---|
| 1507 | 1866 | int ret; |
|---|
| 1508 | | - ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm); |
|---|
| 1867 | + ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc, |
|---|
| 1868 | + key, name, &vmm); |
|---|
| 1509 | 1869 | if (ret) |
|---|
| 1510 | 1870 | nvkm_vmm_unref(&vmm); |
|---|
| 1511 | 1871 | *pvmm = vmm; |
|---|