| .. | .. |
|---|
| 5 | 5 | * Author: Huang Lee <Putin.li@rock-chips.com> |
|---|
| 6 | 6 | */ |
|---|
| 7 | 7 | |
|---|
| 8 | | -#define pr_fmt(fmt) "rga2_mmu: " fmt |
|---|
| 8 | +#define pr_fmt(fmt) "rga_iommu: " fmt |
|---|
| 9 | 9 | |
|---|
| 10 | 10 | #include "rga_iommu.h" |
|---|
| 11 | 11 | #include "rga_dma_buf.h" |
|---|
| .. | .. |
|---|
| 170 | 170 | * size * channel_num * address_size |
|---|
| 171 | 171 | */ |
|---|
| 172 | 172 | order = get_order(size * 3 * sizeof(*mmu_base->buf_virtual)); |
|---|
| 173 | + if (order >= MAX_ORDER) { |
|---|
| 174 | + pr_err("Can not alloc pages with order[%d] for mmu_page_table, max_order = %d\n", |
|---|
| 175 | + order, MAX_ORDER); |
|---|
| 176 | + goto err_free_mmu_base; |
|---|
| 177 | + } |
|---|
| 178 | + |
|---|
| 173 | 179 | mmu_base->buf_virtual = (uint32_t *) __get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
|---|
| 174 | 180 | if (mmu_base->buf_virtual == NULL) { |
|---|
| 175 | 181 | pr_err("Can not alloc pages for mmu_page_table\n"); |
|---|
| .. | .. |
|---|
| 178 | 184 | mmu_base->buf_order = order; |
|---|
| 179 | 185 | |
|---|
| 180 | 186 | order = get_order(size * sizeof(*mmu_base->pages)); |
|---|
| 187 | + if (order >= MAX_ORDER) { |
|---|
| 188 | + pr_err("Can not alloc pages with order[%d] for mmu_base->pages, max_order = %d\n", |
|---|
| 189 | + order, MAX_ORDER); |
|---|
| 190 | + goto err_free_buf_virtual; |
|---|
| 191 | + } |
|---|
| 192 | + |
|---|
| 181 | 193 | mmu_base->pages = (struct page **)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
|---|
| 182 | 194 | if (mmu_base->pages == NULL) { |
|---|
| 183 | 195 | pr_err("Can not alloc pages for mmu_base->pages\n"); |
|---|
| .. | .. |
|---|
| 219 | 231 | |
|---|
| 220 | 232 | kfree(base); |
|---|
| 221 | 233 | *mmu_base = NULL; |
|---|
| 234 | +} |
|---|
| 235 | + |
|---|
| 236 | +static int rga_iommu_intr_fault_handler(struct iommu_domain *iommu, struct device *iommu_dev, |
|---|
| 237 | + unsigned long iova, int status, void *arg) |
|---|
| 238 | +{ |
|---|
| 239 | + struct rga_scheduler_t *scheduler = (struct rga_scheduler_t *)arg; |
|---|
| 240 | + struct rga_job *job = scheduler->running_job; |
|---|
| 241 | + |
|---|
| 242 | + if (job == NULL) |
|---|
| 243 | + return 0; |
|---|
| 244 | + |
|---|
| 245 | + pr_err("IOMMU intr fault, IOVA[0x%lx], STATUS[0x%x]\n", iova, status); |
|---|
| 246 | + if (scheduler->ops->irq) |
|---|
| 247 | + scheduler->ops->irq(scheduler); |
|---|
| 248 | + |
|---|
| 249 | + /* iommu interrupts on rga2 do not affect rga2 itself. */ |
|---|
| 250 | + if (!test_bit(RGA_JOB_STATE_INTR_ERR, &job->state)) { |
|---|
| 251 | + set_bit(RGA_JOB_STATE_INTR_ERR, &job->state); |
|---|
| 252 | + scheduler->ops->soft_reset(scheduler); |
|---|
| 253 | + } |
|---|
| 254 | + |
|---|
| 255 | + if (status & RGA_IOMMU_IRQ_PAGE_FAULT) { |
|---|
| 256 | + pr_err("RGA IOMMU: page fault! Please check the memory size.\n"); |
|---|
| 257 | + job->ret = -EACCES; |
|---|
| 258 | + } else if (status & RGA_IOMMU_IRQ_BUS_ERROR) { |
|---|
| 259 | + pr_err("RGA IOMMU: bus error! Please check if the memory is invalid or has been freed.\n"); |
|---|
| 260 | + job->ret = -EACCES; |
|---|
| 261 | + } else { |
|---|
| 262 | + pr_err("RGA IOMMU: Wrong IOMMU interrupt signal!\n"); |
|---|
| 263 | + } |
|---|
| 264 | + |
|---|
| 265 | + return 0; |
|---|
| 222 | 266 | } |
|---|
| 223 | 267 | |
|---|
| 224 | 268 | int rga_iommu_detach(struct rga_iommu_info *info) |
|---|
| .. | .. |
|---|
| 306 | 350 | if (main_iommu == NULL) { |
|---|
| 307 | 351 | main_iommu = scheduler->iommu_info; |
|---|
| 308 | 352 | main_iommu_index = i; |
|---|
| 353 | + iommu_set_fault_handler(main_iommu->domain, |
|---|
| 354 | + rga_iommu_intr_fault_handler, |
|---|
| 355 | + (void *)scheduler); |
|---|
| 309 | 356 | } else { |
|---|
| 310 | 357 | scheduler->iommu_info->domain = main_iommu->domain; |
|---|
| 311 | 358 | scheduler->iommu_info->default_dev = main_iommu->default_dev; |
|---|