hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/drivers/video/rockchip/rga3/rga_iommu.c
....@@ -5,7 +5,7 @@
55 * Author: Huang Lee <Putin.li@rock-chips.com>
66 */
77
8
-#define pr_fmt(fmt) "rga2_mmu: " fmt
8
+#define pr_fmt(fmt) "rga_iommu: " fmt
99
1010 #include "rga_iommu.h"
1111 #include "rga_dma_buf.h"
....@@ -170,6 +170,12 @@
170170 * size * channel_num * address_size
171171 */
172172 order = get_order(size * 3 * sizeof(*mmu_base->buf_virtual));
173
+ if (order >= MAX_ORDER) {
174
+ pr_err("Can not alloc pages with order[%d] for mmu_page_table, max_order = %d\n",
175
+ order, MAX_ORDER);
176
+ goto err_free_mmu_base;
177
+ }
178
+
173179 mmu_base->buf_virtual = (uint32_t *) __get_free_pages(GFP_KERNEL | GFP_DMA32, order);
174180 if (mmu_base->buf_virtual == NULL) {
175181 pr_err("Can not alloc pages for mmu_page_table\n");
....@@ -178,6 +184,12 @@
178184 mmu_base->buf_order = order;
179185
180186 order = get_order(size * sizeof(*mmu_base->pages));
187
+ if (order >= MAX_ORDER) {
188
+ pr_err("Can not alloc pages with order[%d] for mmu_base->pages, max_order = %d\n",
189
+ order, MAX_ORDER);
190
+ goto err_free_buf_virtual;
191
+ }
192
+
181193 mmu_base->pages = (struct page **)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
182194 if (mmu_base->pages == NULL) {
183195 pr_err("Can not alloc pages for mmu_base->pages\n");
....@@ -219,6 +231,38 @@
219231
220232 kfree(base);
221233 *mmu_base = NULL;
234
+}
235
+
236
+static int rga_iommu_intr_fault_handler(struct iommu_domain *iommu, struct device *iommu_dev,
237
+ unsigned long iova, int status, void *arg)
238
+{
239
+ struct rga_scheduler_t *scheduler = (struct rga_scheduler_t *)arg;
240
+ struct rga_job *job = scheduler->running_job;
241
+
242
+ if (job == NULL)
243
+ return 0;
244
+
245
+ pr_err("IOMMU intr fault, IOVA[0x%lx], STATUS[0x%x]\n", iova, status);
246
+ if (scheduler->ops->irq)
247
+ scheduler->ops->irq(scheduler);
248
+
249
+ /* iommu interrupts on rga2 do not affect rga2 itself. */
250
+ if (!test_bit(RGA_JOB_STATE_INTR_ERR, &job->state)) {
251
+ set_bit(RGA_JOB_STATE_INTR_ERR, &job->state);
252
+ scheduler->ops->soft_reset(scheduler);
253
+ }
254
+
255
+ if (status & RGA_IOMMU_IRQ_PAGE_FAULT) {
256
+ pr_err("RGA IOMMU: page fault! Please check the memory size.\n");
257
+ job->ret = -EACCES;
258
+ } else if (status & RGA_IOMMU_IRQ_BUS_ERROR) {
259
+ pr_err("RGA IOMMU: bus error! Please check if the memory is invalid or has been freed.\n");
260
+ job->ret = -EACCES;
261
+ } else {
262
+ pr_err("RGA IOMMU: Wrong IOMMU interrupt signal!\n");
263
+ }
264
+
265
+ return 0;
222266 }
223267
224268 int rga_iommu_detach(struct rga_iommu_info *info)
....@@ -306,6 +350,9 @@
306350 if (main_iommu == NULL) {
307351 main_iommu = scheduler->iommu_info;
308352 main_iommu_index = i;
353
+ iommu_set_fault_handler(main_iommu->domain,
354
+ rga_iommu_intr_fault_handler,
355
+ (void *)scheduler);
309356 } else {
310357 scheduler->iommu_info->domain = main_iommu->domain;
311358 scheduler->iommu_info->default_dev = main_iommu->default_dev;