hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_protected_memory.c
....@@ -71,29 +71,60 @@
7171 kbase_csf_protected_memory_alloc(
7272 struct kbase_device *const kbdev,
7373 struct tagged_addr *phys,
74
- size_t num_pages)
74
+ size_t num_pages,
75
+ bool is_small_page)
7576 {
7677 size_t i;
7778 struct protected_memory_allocator_device *pma_dev =
7879 kbdev->csf.pma_dev;
79
- struct protected_memory_allocation **pma =
80
- kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL);
80
+ struct protected_memory_allocation **pma = NULL;
81
+ unsigned int order = KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER;
82
+ unsigned int num_pages_order;
83
+
84
+ if (is_small_page)
85
+ order = KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER;
86
+
87
+ num_pages_order = (1u << order);
88
+
89
+ /* Ensure the requested num_pages is aligned with
90
+ * the order type passed as argument.
91
+ *
92
+ * pma_alloc_page() will then handle the granularity
93
+ * of the allocation based on order.
94
+ */
95
+ num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order);
96
+
97
+ pma = kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL);
8198
8299 if (WARN_ON(!pma_dev) || WARN_ON(!phys) || !pma)
83100 return NULL;
84101
85102 for (i = 0; i < num_pages; i++) {
86
- pma[i] = pma_dev->ops.pma_alloc_page(pma_dev,
87
- KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
103
+ phys_addr_t phys_addr;
104
+
105
+ pma[i] = pma_dev->ops.pma_alloc_page(pma_dev, order);
88106 if (!pma[i])
89107 break;
90108
91
- phys[i] = as_tagged(pma_dev->ops.pma_get_phys_addr(pma_dev,
92
- pma[i]));
109
+ phys_addr = pma_dev->ops.pma_get_phys_addr(pma_dev, pma[i]);
110
+
111
+ if (order) {
112
+ size_t j;
113
+
114
+ *phys++ = as_tagged_tag(phys_addr, HUGE_HEAD | HUGE_PAGE);
115
+
116
+ for (j = 1; j < num_pages_order; j++) {
117
+ *phys++ = as_tagged_tag(phys_addr +
118
+ PAGE_SIZE * j,
119
+ HUGE_PAGE);
120
+ }
121
+ } else {
122
+ phys[i] = as_tagged(phys_addr);
123
+ }
93124 }
94125
95126 if (i != num_pages) {
96
- kbase_csf_protected_memory_free(kbdev, pma, i);
127
+ kbase_csf_protected_memory_free(kbdev, pma, i * num_pages_order, is_small_page);
97128 return NULL;
98129 }
99130
....@@ -103,15 +134,28 @@
103134 void kbase_csf_protected_memory_free(
104135 struct kbase_device *const kbdev,
105136 struct protected_memory_allocation **pma,
106
- size_t num_pages)
137
+ size_t num_pages,
138
+ bool is_small_page)
107139 {
108140 size_t i;
109141 struct protected_memory_allocator_device *pma_dev =
110142 kbdev->csf.pma_dev;
143
+ unsigned int num_pages_order = (1u << KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER);
144
+
145
+ if (is_small_page)
146
+ num_pages_order = (1u << KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
111147
112148 if (WARN_ON(!pma_dev) || WARN_ON(!pma))
113149 return;
114150
151
+ /* Ensure the requested num_pages is aligned with
152
+ * the order type passed as argument.
153
+ *
154
+ * pma_alloc_page() will then handle the granularity
155
+ * of the allocation based on order.
156
+ */
157
+ num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order);
158
+
115159 for (i = 0; i < num_pages; i++)
116160 pma_dev->ops.pma_free_page(pma_dev, pma[i]);
117161