| .. | .. |
|---|
| 8 | 8 | #include <linux/dma-mapping.h> |
|---|
| 9 | 9 | #include <linux/errno.h> |
|---|
| 10 | 10 | #include <linux/interrupt.h> |
|---|
| 11 | +#include <linux/io.h> |
|---|
| 11 | 12 | #include <linux/iopoll.h> |
|---|
| 12 | 13 | #include <linux/kernel.h> |
|---|
| 13 | 14 | #include <linux/module.h> |
|---|
| .. | .. |
|---|
| 17 | 18 | #include <linux/of_device.h> |
|---|
| 18 | 19 | |
|---|
| 19 | 20 | #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT) |
|---|
| 20 | | -#define CMDQ_IRQ_MASK 0xffff |
|---|
| 21 | 21 | #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) |
|---|
| 22 | 22 | |
|---|
| 23 | 23 | #define CMDQ_CURR_IRQ_STATUS 0x10 |
|---|
| 24 | +#define CMDQ_SYNC_TOKEN_UPDATE 0x68 |
|---|
| 24 | 25 | #define CMDQ_THR_SLOT_CYCLES 0x30 |
|---|
| 25 | 26 | #define CMDQ_THR_BASE 0x100 |
|---|
| 26 | 27 | #define CMDQ_THR_SIZE 0x80 |
|---|
| .. | .. |
|---|
| 55 | 56 | void __iomem *base; |
|---|
| 56 | 57 | struct list_head task_busy_list; |
|---|
| 57 | 58 | u32 priority; |
|---|
| 58 | | - bool atomic_exec; |
|---|
| 59 | 59 | }; |
|---|
| 60 | 60 | |
|---|
| 61 | 61 | struct cmdq_task { |
|---|
| .. | .. |
|---|
| 69 | 69 | struct cmdq { |
|---|
| 70 | 70 | struct mbox_controller mbox; |
|---|
| 71 | 71 | void __iomem *base; |
|---|
| 72 | | - u32 irq; |
|---|
| 72 | + int irq; |
|---|
| 73 | 73 | u32 thread_nr; |
|---|
| 74 | + u32 irq_mask; |
|---|
| 74 | 75 | struct cmdq_thread *thread; |
|---|
| 75 | 76 | struct clk *clock; |
|---|
| 76 | 77 | bool suspended; |
|---|
| 78 | + u8 shift_pa; |
|---|
| 77 | 79 | }; |
|---|
| 80 | + |
|---|
| 81 | +struct gce_plat { |
|---|
| 82 | + u32 thread_nr; |
|---|
| 83 | + u8 shift; |
|---|
| 84 | +}; |
|---|
| 85 | + |
|---|
| 86 | +u8 cmdq_get_shift_pa(struct mbox_chan *chan) |
|---|
| 87 | +{ |
|---|
| 88 | + struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); |
|---|
| 89 | + |
|---|
| 90 | + return cmdq->shift_pa; |
|---|
| 91 | +} |
|---|
| 92 | +EXPORT_SYMBOL(cmdq_get_shift_pa); |
|---|
| 78 | 93 | |
|---|
| 79 | 94 | static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) |
|---|
| 80 | 95 | { |
|---|
| .. | .. |
|---|
| 103 | 118 | |
|---|
| 104 | 119 | static void cmdq_init(struct cmdq *cmdq) |
|---|
| 105 | 120 | { |
|---|
| 121 | + int i; |
|---|
| 122 | + |
|---|
| 106 | 123 | WARN_ON(clk_enable(cmdq->clock) < 0); |
|---|
| 107 | 124 | writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); |
|---|
| 125 | + for (i = 0; i <= CMDQ_MAX_EVENT; i++) |
|---|
| 126 | + writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); |
|---|
| 108 | 127 | clk_disable(cmdq->clock); |
|---|
| 109 | 128 | } |
|---|
| 110 | 129 | |
|---|
| .. | .. |
|---|
| 149 | 168 | dma_sync_single_for_cpu(dev, prev_task->pa_base, |
|---|
| 150 | 169 | prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); |
|---|
| 151 | 170 | prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = |
|---|
| 152 | | - (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base; |
|---|
| 171 | + (u64)CMDQ_JUMP_BY_PA << 32 | |
|---|
| 172 | + (task->pa_base >> task->cmdq->shift_pa); |
|---|
| 153 | 173 | dma_sync_single_for_device(dev, prev_task->pa_base, |
|---|
| 154 | 174 | prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); |
|---|
| 155 | 175 | |
|---|
| 156 | 176 | cmdq_thread_invalidate_fetched_data(thread); |
|---|
| 157 | 177 | } |
|---|
| 158 | 178 | |
|---|
| 159 | | -static bool cmdq_command_is_wfe(u64 cmd) |
|---|
| 160 | | -{ |
|---|
| 161 | | - u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; |
|---|
| 162 | | - u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32; |
|---|
| 163 | | - u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff; |
|---|
| 164 | | - |
|---|
| 165 | | - return ((cmd & wfe_mask) == (wfe_op | wfe_option)); |
|---|
| 166 | | -} |
|---|
| 167 | | - |
|---|
| 168 | | -/* we assume tasks in the same display GCE thread are waiting the same event. */ |
|---|
| 169 | | -static void cmdq_task_remove_wfe(struct cmdq_task *task) |
|---|
| 170 | | -{ |
|---|
| 171 | | - struct device *dev = task->cmdq->mbox.dev; |
|---|
| 172 | | - u64 *base = task->pkt->va_base; |
|---|
| 173 | | - int i; |
|---|
| 174 | | - |
|---|
| 175 | | - dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size, |
|---|
| 176 | | - DMA_TO_DEVICE); |
|---|
| 177 | | - for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++) |
|---|
| 178 | | - if (cmdq_command_is_wfe(base[i])) |
|---|
| 179 | | - base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 | |
|---|
| 180 | | - CMDQ_JUMP_PASS; |
|---|
| 181 | | - dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size, |
|---|
| 182 | | - DMA_TO_DEVICE); |
|---|
| 183 | | -} |
|---|
| 184 | | - |
|---|
| 185 | 179 | static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread) |
|---|
| 186 | 180 | { |
|---|
| 187 | 181 | return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; |
|---|
| 188 | | -} |
|---|
| 189 | | - |
|---|
| 190 | | -static void cmdq_thread_wait_end(struct cmdq_thread *thread, |
|---|
| 191 | | - unsigned long end_pa) |
|---|
| 192 | | -{ |
|---|
| 193 | | - struct device *dev = thread->chan->mbox->dev; |
|---|
| 194 | | - unsigned long curr_pa; |
|---|
| 195 | | - |
|---|
| 196 | | - if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR, |
|---|
| 197 | | - curr_pa, curr_pa == end_pa, 1, 20)) |
|---|
| 198 | | - dev_err(dev, "GCE thread cannot run to end.\n"); |
|---|
| 199 | 182 | } |
|---|
| 200 | 183 | |
|---|
| 201 | 184 | static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta) |
|---|
| .. | .. |
|---|
| 215 | 198 | { |
|---|
| 216 | 199 | struct cmdq_thread *thread = task->thread; |
|---|
| 217 | 200 | struct cmdq_task *next_task; |
|---|
| 201 | + struct cmdq *cmdq = task->cmdq; |
|---|
| 218 | 202 | |
|---|
| 219 | | - dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task); |
|---|
| 220 | | - WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0); |
|---|
| 203 | + dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); |
|---|
| 204 | + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); |
|---|
| 221 | 205 | next_task = list_first_entry_or_null(&thread->task_busy_list, |
|---|
| 222 | 206 | struct cmdq_task, list_entry); |
|---|
| 223 | 207 | if (next_task) |
|---|
| 224 | | - writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 208 | + writel(next_task->pa_base >> cmdq->shift_pa, |
|---|
| 209 | + thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 225 | 210 | cmdq_thread_resume(thread); |
|---|
| 226 | 211 | } |
|---|
| 227 | 212 | |
|---|
| .. | .. |
|---|
| 251 | 236 | else |
|---|
| 252 | 237 | return; |
|---|
| 253 | 238 | |
|---|
| 254 | | - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 239 | + curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa; |
|---|
| 255 | 240 | |
|---|
| 256 | 241 | list_for_each_entry_safe(task, tmp, &thread->task_busy_list, |
|---|
| 257 | 242 | list_entry) { |
|---|
| .. | .. |
|---|
| 284 | 269 | unsigned long irq_status, flags = 0L; |
|---|
| 285 | 270 | int bit; |
|---|
| 286 | 271 | |
|---|
| 287 | | - irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK; |
|---|
| 288 | | - if (!(irq_status ^ CMDQ_IRQ_MASK)) |
|---|
| 272 | + irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; |
|---|
| 273 | + if (!(irq_status ^ cmdq->irq_mask)) |
|---|
| 289 | 274 | return IRQ_NONE; |
|---|
| 290 | 275 | |
|---|
| 291 | | - for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) { |
|---|
| 276 | + for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) { |
|---|
| 292 | 277 | struct cmdq_thread *thread = &cmdq->thread[bit]; |
|---|
| 293 | 278 | |
|---|
| 294 | 279 | spin_lock_irqsave(&thread->chan->lock, flags); |
|---|
| .. | .. |
|---|
| 337 | 322 | { |
|---|
| 338 | 323 | struct cmdq *cmdq = platform_get_drvdata(pdev); |
|---|
| 339 | 324 | |
|---|
| 340 | | - mbox_controller_unregister(&cmdq->mbox); |
|---|
| 341 | 325 | clk_unprepare(cmdq->clock); |
|---|
| 342 | | - |
|---|
| 343 | | - if (cmdq->mbox.chans) |
|---|
| 344 | | - devm_kfree(&pdev->dev, cmdq->mbox.chans); |
|---|
| 345 | | - |
|---|
| 346 | | - if (cmdq->thread) |
|---|
| 347 | | - devm_kfree(&pdev->dev, cmdq->thread); |
|---|
| 348 | | - |
|---|
| 349 | | - devm_kfree(&pdev->dev, cmdq); |
|---|
| 350 | 326 | |
|---|
| 351 | 327 | return 0; |
|---|
| 352 | 328 | } |
|---|
| .. | .. |
|---|
| 374 | 350 | |
|---|
| 375 | 351 | if (list_empty(&thread->task_busy_list)) { |
|---|
| 376 | 352 | WARN_ON(clk_enable(cmdq->clock) < 0); |
|---|
| 353 | + /* |
|---|
| 354 | + * The thread reset will clear thread related register to 0, |
|---|
| 355 | + * including pc, end, priority, irq, suspend and enable. Thus |
|---|
| 356 | + * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable |
|---|
| 357 | + * thread and make it running. |
|---|
| 358 | + */ |
|---|
| 377 | 359 | WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); |
|---|
| 378 | 360 | |
|---|
| 379 | | - writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 380 | | - writel(task->pa_base + pkt->cmd_buf_size, |
|---|
| 361 | + writel(task->pa_base >> cmdq->shift_pa, |
|---|
| 362 | + thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 363 | + writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa, |
|---|
| 381 | 364 | thread->base + CMDQ_THR_END_ADDR); |
|---|
| 365 | + |
|---|
| 382 | 366 | writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); |
|---|
| 383 | 367 | writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); |
|---|
| 384 | 368 | writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); |
|---|
| 385 | 369 | } else { |
|---|
| 386 | 370 | WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); |
|---|
| 387 | | - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 388 | | - end_pa = readl(thread->base + CMDQ_THR_END_ADDR); |
|---|
| 389 | | - |
|---|
| 390 | | - /* |
|---|
| 391 | | - * Atomic execution should remove the following wfe, i.e. only |
|---|
| 392 | | - * wait event at first task, and prevent to pause when running. |
|---|
| 393 | | - */ |
|---|
| 394 | | - if (thread->atomic_exec) { |
|---|
| 395 | | - /* GCE is executing if command is not WFE */ |
|---|
| 396 | | - if (!cmdq_thread_is_in_wfe(thread)) { |
|---|
| 397 | | - cmdq_thread_resume(thread); |
|---|
| 398 | | - cmdq_thread_wait_end(thread, end_pa); |
|---|
| 399 | | - WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); |
|---|
| 400 | | - /* set to this task directly */ |
|---|
| 401 | | - writel(task->pa_base, |
|---|
| 402 | | - thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 403 | | - } else { |
|---|
| 404 | | - cmdq_task_insert_into_thread(task); |
|---|
| 405 | | - cmdq_task_remove_wfe(task); |
|---|
| 406 | | - smp_mb(); /* modify jump before enable thread */ |
|---|
| 407 | | - } |
|---|
| 371 | + curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << |
|---|
| 372 | + cmdq->shift_pa; |
|---|
| 373 | + end_pa = readl(thread->base + CMDQ_THR_END_ADDR) << |
|---|
| 374 | + cmdq->shift_pa; |
|---|
| 375 | + /* check boundary */ |
|---|
| 376 | + if (curr_pa == end_pa - CMDQ_INST_SIZE || |
|---|
| 377 | + curr_pa == end_pa) { |
|---|
| 378 | + /* set to this task directly */ |
|---|
| 379 | + writel(task->pa_base >> cmdq->shift_pa, |
|---|
| 380 | + thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 408 | 381 | } else { |
|---|
| 409 | | - /* check boundary */ |
|---|
| 410 | | - if (curr_pa == end_pa - CMDQ_INST_SIZE || |
|---|
| 411 | | - curr_pa == end_pa) { |
|---|
| 412 | | - /* set to this task directly */ |
|---|
| 413 | | - writel(task->pa_base, |
|---|
| 414 | | - thread->base + CMDQ_THR_CURR_ADDR); |
|---|
| 415 | | - } else { |
|---|
| 416 | | - cmdq_task_insert_into_thread(task); |
|---|
| 417 | | - smp_mb(); /* modify jump before enable thread */ |
|---|
| 418 | | - } |
|---|
| 382 | + cmdq_task_insert_into_thread(task); |
|---|
| 383 | + smp_mb(); /* modify jump before enable thread */ |
|---|
| 419 | 384 | } |
|---|
| 420 | | - writel(task->pa_base + pkt->cmd_buf_size, |
|---|
| 385 | + writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa, |
|---|
| 421 | 386 | thread->base + CMDQ_THR_END_ADDR); |
|---|
| 422 | 387 | cmdq_thread_resume(thread); |
|---|
| 423 | 388 | } |
|---|
| .. | .. |
|---|
| 433 | 398 | |
|---|
| 434 | 399 | static void cmdq_mbox_shutdown(struct mbox_chan *chan) |
|---|
| 435 | 400 | { |
|---|
| 401 | + struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; |
|---|
| 402 | + struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); |
|---|
| 403 | + struct cmdq_task *task, *tmp; |
|---|
| 404 | + unsigned long flags; |
|---|
| 405 | + |
|---|
| 406 | + spin_lock_irqsave(&thread->chan->lock, flags); |
|---|
| 407 | + if (list_empty(&thread->task_busy_list)) |
|---|
| 408 | + goto done; |
|---|
| 409 | + |
|---|
| 410 | + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); |
|---|
| 411 | + |
|---|
| 412 | + /* make sure executed tasks have success callback */ |
|---|
| 413 | + cmdq_thread_irq_handler(cmdq, thread); |
|---|
| 414 | + if (list_empty(&thread->task_busy_list)) |
|---|
| 415 | + goto done; |
|---|
| 416 | + |
|---|
| 417 | + list_for_each_entry_safe(task, tmp, &thread->task_busy_list, |
|---|
| 418 | + list_entry) { |
|---|
| 419 | + cmdq_task_exec_done(task, CMDQ_CB_ERROR); |
|---|
| 420 | + kfree(task); |
|---|
| 421 | + } |
|---|
| 422 | + |
|---|
| 423 | + cmdq_thread_disable(cmdq, thread); |
|---|
| 424 | + clk_disable(cmdq->clock); |
|---|
| 425 | +done: |
|---|
| 426 | + /* |
|---|
| 427 | + * The thread->task_busy_list empty means thread already disable. The |
|---|
| 428 | + * cmdq_mbox_send_data() always reset thread which clear disable and |
|---|
| 429 | + * suspend statue when first pkt send to channel, so there is no need |
|---|
| 430 | + * to do any operation here, only unlock and leave. |
|---|
| 431 | + */ |
|---|
| 432 | + spin_unlock_irqrestore(&thread->chan->lock, flags); |
|---|
| 433 | +} |
|---|
| 434 | + |
|---|
| 435 | +static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) |
|---|
| 436 | +{ |
|---|
| 437 | + struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; |
|---|
| 438 | + struct cmdq_task_cb *cb; |
|---|
| 439 | + struct cmdq_cb_data data; |
|---|
| 440 | + struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); |
|---|
| 441 | + struct cmdq_task *task, *tmp; |
|---|
| 442 | + unsigned long flags; |
|---|
| 443 | + u32 enable; |
|---|
| 444 | + |
|---|
| 445 | + spin_lock_irqsave(&thread->chan->lock, flags); |
|---|
| 446 | + if (list_empty(&thread->task_busy_list)) |
|---|
| 447 | + goto out; |
|---|
| 448 | + |
|---|
| 449 | + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); |
|---|
| 450 | + if (!cmdq_thread_is_in_wfe(thread)) |
|---|
| 451 | + goto wait; |
|---|
| 452 | + |
|---|
| 453 | + list_for_each_entry_safe(task, tmp, &thread->task_busy_list, |
|---|
| 454 | + list_entry) { |
|---|
| 455 | + cb = &task->pkt->async_cb; |
|---|
| 456 | + if (cb->cb) { |
|---|
| 457 | + data.sta = CMDQ_CB_ERROR; |
|---|
| 458 | + data.data = cb->data; |
|---|
| 459 | + cb->cb(data); |
|---|
| 460 | + } |
|---|
| 461 | + list_del(&task->list_entry); |
|---|
| 462 | + kfree(task); |
|---|
| 463 | + } |
|---|
| 464 | + |
|---|
| 465 | + cmdq_thread_resume(thread); |
|---|
| 466 | + cmdq_thread_disable(cmdq, thread); |
|---|
| 467 | + clk_disable(cmdq->clock); |
|---|
| 468 | + |
|---|
| 469 | +out: |
|---|
| 470 | + spin_unlock_irqrestore(&thread->chan->lock, flags); |
|---|
| 471 | + return 0; |
|---|
| 472 | + |
|---|
| 473 | +wait: |
|---|
| 474 | + cmdq_thread_resume(thread); |
|---|
| 475 | + spin_unlock_irqrestore(&thread->chan->lock, flags); |
|---|
| 476 | + if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, |
|---|
| 477 | + enable, enable == 0, 1, timeout)) { |
|---|
| 478 | + dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", |
|---|
| 479 | + (u32)(thread->base - cmdq->base)); |
|---|
| 480 | + |
|---|
| 481 | + return -EFAULT; |
|---|
| 482 | + } |
|---|
| 483 | + return 0; |
|---|
| 436 | 484 | } |
|---|
| 437 | 485 | |
|---|
| 438 | 486 | static const struct mbox_chan_ops cmdq_mbox_chan_ops = { |
|---|
| 439 | 487 | .send_data = cmdq_mbox_send_data, |
|---|
| 440 | 488 | .startup = cmdq_mbox_startup, |
|---|
| 441 | 489 | .shutdown = cmdq_mbox_shutdown, |
|---|
| 490 | + .flush = cmdq_mbox_flush, |
|---|
| 442 | 491 | }; |
|---|
| 443 | 492 | |
|---|
| 444 | 493 | static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, |
|---|
| .. | .. |
|---|
| 452 | 501 | |
|---|
| 453 | 502 | thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; |
|---|
| 454 | 503 | thread->priority = sp->args[1]; |
|---|
| 455 | | - thread->atomic_exec = (sp->args[2] != 0); |
|---|
| 456 | 504 | thread->chan = &mbox->chans[ind]; |
|---|
| 457 | 505 | |
|---|
| 458 | 506 | return &mbox->chans[ind]; |
|---|
| .. | .. |
|---|
| 464 | 512 | struct resource *res; |
|---|
| 465 | 513 | struct cmdq *cmdq; |
|---|
| 466 | 514 | int err, i; |
|---|
| 515 | + struct gce_plat *plat_data; |
|---|
| 467 | 516 | |
|---|
| 468 | 517 | cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL); |
|---|
| 469 | 518 | if (!cmdq) |
|---|
| .. | .. |
|---|
| 477 | 526 | } |
|---|
| 478 | 527 | |
|---|
| 479 | 528 | cmdq->irq = platform_get_irq(pdev, 0); |
|---|
| 480 | | - if (!cmdq->irq) { |
|---|
| 481 | | - dev_err(dev, "failed to get irq\n"); |
|---|
| 529 | + if (cmdq->irq < 0) |
|---|
| 530 | + return cmdq->irq; |
|---|
| 531 | + |
|---|
| 532 | + plat_data = (struct gce_plat *)of_device_get_match_data(dev); |
|---|
| 533 | + if (!plat_data) { |
|---|
| 534 | + dev_err(dev, "failed to get match data\n"); |
|---|
| 482 | 535 | return -EINVAL; |
|---|
| 483 | 536 | } |
|---|
| 537 | + |
|---|
| 538 | + cmdq->thread_nr = plat_data->thread_nr; |
|---|
| 539 | + cmdq->shift_pa = plat_data->shift; |
|---|
| 540 | + cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0); |
|---|
| 484 | 541 | err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, |
|---|
| 485 | 542 | "mtk_cmdq", cmdq); |
|---|
| 486 | 543 | if (err < 0) { |
|---|
| .. | .. |
|---|
| 497 | 554 | return PTR_ERR(cmdq->clock); |
|---|
| 498 | 555 | } |
|---|
| 499 | 556 | |
|---|
| 500 | | - cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev); |
|---|
| 501 | 557 | cmdq->mbox.dev = dev; |
|---|
| 502 | 558 | cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr, |
|---|
| 503 | 559 | sizeof(*cmdq->mbox.chans), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 524 | 580 | cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; |
|---|
| 525 | 581 | } |
|---|
| 526 | 582 | |
|---|
| 527 | | - err = mbox_controller_register(&cmdq->mbox); |
|---|
| 583 | + err = devm_mbox_controller_register(dev, &cmdq->mbox); |
|---|
| 528 | 584 | if (err < 0) { |
|---|
| 529 | 585 | dev_err(dev, "failed to register mailbox: %d\n", err); |
|---|
| 530 | 586 | return err; |
|---|
| .. | .. |
|---|
| 543 | 599 | .resume = cmdq_resume, |
|---|
| 544 | 600 | }; |
|---|
| 545 | 601 | |
|---|
| 602 | +static const struct gce_plat gce_plat_v2 = {.thread_nr = 16}; |
|---|
| 603 | +static const struct gce_plat gce_plat_v3 = {.thread_nr = 24}; |
|---|
| 604 | +static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3}; |
|---|
| 605 | + |
|---|
| 546 | 606 | static const struct of_device_id cmdq_of_ids[] = { |
|---|
| 547 | | - {.compatible = "mediatek,mt8173-gce", .data = (void *)16}, |
|---|
| 607 | + {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2}, |
|---|
| 608 | + {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3}, |
|---|
| 609 | + {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4}, |
|---|
| 548 | 610 | {} |
|---|
| 549 | 611 | }; |
|---|
| 550 | 612 | |
|---|