| .. | .. |
|---|
| 71 | 71 | kbase_csf_protected_memory_alloc( |
|---|
| 72 | 72 | struct kbase_device *const kbdev, |
|---|
| 73 | 73 | struct tagged_addr *phys, |
|---|
| 74 | | - size_t num_pages) |
|---|
| 74 | + size_t num_pages, |
|---|
| 75 | + bool is_small_page) |
|---|
| 75 | 76 | { |
|---|
| 76 | 77 | size_t i; |
|---|
| 77 | 78 | struct protected_memory_allocator_device *pma_dev = |
|---|
| 78 | 79 | kbdev->csf.pma_dev; |
|---|
| 79 | | - struct protected_memory_allocation **pma = |
|---|
| 80 | | - kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL); |
|---|
| 80 | + struct protected_memory_allocation **pma = NULL; |
|---|
| 81 | + unsigned int order = KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER; |
|---|
| 82 | + unsigned int num_pages_order; |
|---|
| 83 | + |
|---|
| 84 | + if (is_small_page) |
|---|
| 85 | + order = KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER; |
|---|
| 86 | + |
|---|
| 87 | + num_pages_order = (1u << order); |
|---|
| 88 | + |
|---|
| 89 | + /* Ensure the requested num_pages is aligned with |
|---|
| 90 | + * the order type passed as argument. |
|---|
| 91 | + * |
|---|
| 92 | + * pma_alloc_page() will then handle the granularity |
|---|
| 93 | + * of the allocation based on order. |
|---|
| 94 | + */ |
|---|
| 95 | + num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order); |
|---|
| 96 | + |
|---|
| 97 | + pma = kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL); |
|---|
| 81 | 98 | |
|---|
| 82 | 99 | if (WARN_ON(!pma_dev) || WARN_ON(!phys) || !pma) |
|---|
| 83 | 100 | return NULL; |
|---|
| 84 | 101 | |
|---|
| 85 | 102 | for (i = 0; i < num_pages; i++) { |
|---|
| 86 | | - pma[i] = pma_dev->ops.pma_alloc_page(pma_dev, |
|---|
| 87 | | - KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER); |
|---|
| 103 | + phys_addr_t phys_addr; |
|---|
| 104 | + |
|---|
| 105 | + pma[i] = pma_dev->ops.pma_alloc_page(pma_dev, order); |
|---|
| 88 | 106 | if (!pma[i]) |
|---|
| 89 | 107 | break; |
|---|
| 90 | 108 | |
|---|
| 91 | | - phys[i] = as_tagged(pma_dev->ops.pma_get_phys_addr(pma_dev, |
|---|
| 92 | | - pma[i])); |
|---|
| 109 | + phys_addr = pma_dev->ops.pma_get_phys_addr(pma_dev, pma[i]); |
|---|
| 110 | + |
|---|
| 111 | + if (order) { |
|---|
| 112 | + size_t j; |
|---|
| 113 | + |
|---|
| 114 | + *phys++ = as_tagged_tag(phys_addr, HUGE_HEAD | HUGE_PAGE); |
|---|
| 115 | + |
|---|
| 116 | + for (j = 1; j < num_pages_order; j++) { |
|---|
| 117 | + *phys++ = as_tagged_tag(phys_addr + |
|---|
| 118 | + PAGE_SIZE * j, |
|---|
| 119 | + HUGE_PAGE); |
|---|
| 120 | + } |
|---|
| 121 | + } else { |
|---|
| 122 | + phys[i] = as_tagged(phys_addr); |
|---|
| 123 | + } |
|---|
| 93 | 124 | } |
|---|
| 94 | 125 | |
|---|
| 95 | 126 | if (i != num_pages) { |
|---|
| 96 | | - kbase_csf_protected_memory_free(kbdev, pma, i); |
|---|
| 127 | + kbase_csf_protected_memory_free(kbdev, pma, i * num_pages_order, is_small_page); |
|---|
| 97 | 128 | return NULL; |
|---|
| 98 | 129 | } |
|---|
| 99 | 130 | |
|---|
| .. | .. |
|---|
| 103 | 134 | void kbase_csf_protected_memory_free( |
|---|
| 104 | 135 | struct kbase_device *const kbdev, |
|---|
| 105 | 136 | struct protected_memory_allocation **pma, |
|---|
| 106 | | - size_t num_pages) |
|---|
| 137 | + size_t num_pages, |
|---|
| 138 | + bool is_small_page) |
|---|
| 107 | 139 | { |
|---|
| 108 | 140 | size_t i; |
|---|
| 109 | 141 | struct protected_memory_allocator_device *pma_dev = |
|---|
| 110 | 142 | kbdev->csf.pma_dev; |
|---|
| 143 | + unsigned int num_pages_order = (1u << KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER); |
|---|
| 144 | + |
|---|
| 145 | + if (is_small_page) |
|---|
| 146 | + num_pages_order = (1u << KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER); |
|---|
| 111 | 147 | |
|---|
| 112 | 148 | if (WARN_ON(!pma_dev) || WARN_ON(!pma)) |
|---|
| 113 | 149 | return; |
|---|
| 114 | 150 | |
|---|
| 151 | + /* Ensure the requested num_pages is aligned with |
|---|
| 152 | + * the order type passed as argument. |
|---|
| 153 | + * |
|---|
| 154 | + * pma_alloc_page() will then handle the granularity |
|---|
| 155 | + * of the allocation based on order. |
|---|
| 156 | + */ |
|---|
| 157 | + num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order); |
|---|
| 158 | + |
|---|
| 115 | 159 | for (i = 0; i < num_pages; i++) |
|---|
| 116 | 160 | pma_dev->ops.pma_free_page(pma_dev, pma[i]); |
|---|
| 117 | 161 | |
|---|