| .. | .. |
|---|
| 13 | 13 | #include "rga_mm.h" |
|---|
| 14 | 14 | #include "rga_iommu.h" |
|---|
| 15 | 15 | #include "rga_debugger.h" |
|---|
| 16 | | - |
|---|
| 17 | | -struct rga_job * |
|---|
| 18 | | -rga_scheduler_get_pending_job_list(struct rga_scheduler_t *scheduler) |
|---|
| 19 | | -{ |
|---|
| 20 | | - unsigned long flags; |
|---|
| 21 | | - struct rga_job *job; |
|---|
| 22 | | - |
|---|
| 23 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 24 | | - |
|---|
| 25 | | - job = list_first_entry_or_null(&scheduler->todo_list, |
|---|
| 26 | | - struct rga_job, head); |
|---|
| 27 | | - |
|---|
| 28 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 29 | | - |
|---|
| 30 | | - return job; |
|---|
| 31 | | -} |
|---|
| 32 | | - |
|---|
| 33 | | -struct rga_job * |
|---|
| 34 | | -rga_scheduler_get_running_job(struct rga_scheduler_t *scheduler) |
|---|
| 35 | | -{ |
|---|
| 36 | | - unsigned long flags; |
|---|
| 37 | | - struct rga_job *job; |
|---|
| 38 | | - |
|---|
| 39 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 40 | | - |
|---|
| 41 | | - job = scheduler->running_job; |
|---|
| 42 | | - |
|---|
| 43 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 44 | | - |
|---|
| 45 | | - return job; |
|---|
| 46 | | -} |
|---|
| 47 | | - |
|---|
| 48 | | -struct rga_scheduler_t *rga_job_get_scheduler(struct rga_job *job) |
|---|
| 49 | | -{ |
|---|
| 50 | | - return job->scheduler; |
|---|
| 51 | | -} |
|---|
| 16 | +#include "rga_common.h" |
|---|
| 52 | 17 | |
|---|
| 53 | 18 | static void rga_job_free(struct rga_job *job) |
|---|
| 54 | 19 | { |
|---|
| 55 | 20 | free_page((unsigned long)job); |
|---|
| 56 | 21 | } |
|---|
| 57 | 22 | |
|---|
| 58 | | -void rga_job_session_destroy(struct rga_session *session) |
|---|
| 23 | +static void rga_job_kref_release(struct kref *ref) |
|---|
| 59 | 24 | { |
|---|
| 60 | | - struct rga_scheduler_t *scheduler = NULL; |
|---|
| 61 | | - struct rga_job *job_pos, *job_q; |
|---|
| 62 | | - int i; |
|---|
| 25 | + struct rga_job *job; |
|---|
| 63 | 26 | |
|---|
| 64 | | - unsigned long flags; |
|---|
| 27 | + job = container_of(ref, struct rga_job, refcount); |
|---|
| 65 | 28 | |
|---|
| 66 | | - for (i = 0; i < rga_drvdata->num_of_scheduler; i++) { |
|---|
| 67 | | - scheduler = rga_drvdata->scheduler[i]; |
|---|
| 29 | + rga_job_free(job); |
|---|
| 30 | +} |
|---|
| 68 | 31 | |
|---|
| 69 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 32 | +static int rga_job_put(struct rga_job *job) |
|---|
| 33 | +{ |
|---|
| 34 | + return kref_put(&job->refcount, rga_job_kref_release); |
|---|
| 35 | +} |
|---|
| 70 | 36 | |
|---|
| 71 | | - list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) { |
|---|
| 72 | | - if (session == job_pos->session) { |
|---|
| 73 | | - list_del(&job_pos->head); |
|---|
| 74 | | - |
|---|
| 75 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 76 | | - |
|---|
| 77 | | - rga_job_free(job_pos); |
|---|
| 78 | | - |
|---|
| 79 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 80 | | - } |
|---|
| 81 | | - } |
|---|
| 82 | | - |
|---|
| 83 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 84 | | - } |
|---|
| 37 | +static void rga_job_get(struct rga_job *job) |
|---|
| 38 | +{ |
|---|
| 39 | + kref_get(&job->refcount); |
|---|
| 85 | 40 | } |
|---|
| 86 | 41 | |
|---|
| 87 | 42 | static int rga_job_cleanup(struct rga_job *job) |
|---|
| 88 | 43 | { |
|---|
| 89 | | - if (DEBUGGER_EN(TIME)) |
|---|
| 90 | | - pr_err("(pid:%d) job clean use time = %lld\n", job->pid, |
|---|
| 91 | | - ktime_us_delta(ktime_get(), job->timestamp)); |
|---|
| 44 | + rga_job_put(job); |
|---|
| 92 | 45 | |
|---|
| 93 | | - rga_job_free(job); |
|---|
| 46 | + if (DEBUGGER_EN(TIME)) |
|---|
| 47 | + pr_info("request[%d], job cleanup total cost time %lld us\n", |
|---|
| 48 | + job->request_id, |
|---|
| 49 | + ktime_us_delta(ktime_get(), job->timestamp)); |
|---|
| 94 | 50 | |
|---|
| 95 | 51 | return 0; |
|---|
| 96 | 52 | } |
|---|
| .. | .. |
|---|
| 165 | 121 | return NULL; |
|---|
| 166 | 122 | |
|---|
| 167 | 123 | INIT_LIST_HEAD(&job->head); |
|---|
| 124 | + kref_init(&job->refcount); |
|---|
| 168 | 125 | |
|---|
| 169 | 126 | job->timestamp = ktime_get(); |
|---|
| 170 | 127 | job->pid = current->pid; |
|---|
| .. | .. |
|---|
| 232 | 189 | return ret; |
|---|
| 233 | 190 | } |
|---|
| 234 | 191 | |
|---|
| 192 | + set_bit(RGA_JOB_STATE_RUNNING, &job->state); |
|---|
| 193 | + |
|---|
| 235 | 194 | /* for debug */ |
|---|
| 236 | 195 | if (DEBUGGER_EN(MSG)) |
|---|
| 237 | 196 | rga_job_dump_info(job); |
|---|
| 238 | 197 | |
|---|
| 239 | 198 | return ret; |
|---|
| 240 | | - |
|---|
| 241 | 199 | } |
|---|
| 242 | 200 | |
|---|
| 243 | | -static void rga_job_next(struct rga_scheduler_t *scheduler) |
|---|
| 201 | +void rga_job_next(struct rga_scheduler_t *scheduler) |
|---|
| 244 | 202 | { |
|---|
| 203 | + int ret; |
|---|
| 245 | 204 | struct rga_job *job = NULL; |
|---|
| 246 | 205 | unsigned long flags; |
|---|
| 247 | 206 | |
|---|
| .. | .. |
|---|
| 261 | 220 | scheduler->job_count--; |
|---|
| 262 | 221 | |
|---|
| 263 | 222 | scheduler->running_job = job; |
|---|
| 223 | + set_bit(RGA_JOB_STATE_PREPARE, &job->state); |
|---|
| 224 | + rga_job_get(job); |
|---|
| 264 | 225 | |
|---|
| 265 | 226 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 266 | 227 | |
|---|
| 267 | | - job->ret = rga_job_run(job, scheduler); |
|---|
| 228 | + ret = rga_job_run(job, scheduler); |
|---|
| 268 | 229 | /* If some error before hw run */ |
|---|
| 269 | | - if (job->ret < 0) { |
|---|
| 270 | | - pr_err("some error on rga_job_run before hw start, %s(%d)\n", |
|---|
| 271 | | - __func__, __LINE__); |
|---|
| 230 | + if (ret < 0) { |
|---|
| 231 | + pr_err("some error on rga_job_run before hw start, %s(%d)\n", __func__, __LINE__); |
|---|
| 272 | 232 | |
|---|
| 273 | 233 | spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 274 | 234 | |
|---|
| 275 | 235 | scheduler->running_job = NULL; |
|---|
| 236 | + rga_job_put(job); |
|---|
| 276 | 237 | |
|---|
| 277 | 238 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 278 | 239 | |
|---|
| 240 | + job->ret = ret; |
|---|
| 279 | 241 | rga_request_release_signal(scheduler, job); |
|---|
| 280 | 242 | |
|---|
| 281 | 243 | goto next_job; |
|---|
| 282 | 244 | } |
|---|
| 245 | + |
|---|
| 246 | + rga_job_put(job); |
|---|
| 283 | 247 | } |
|---|
| 284 | 248 | |
|---|
| 285 | | -static void rga_job_finish_and_next(struct rga_scheduler_t *scheduler, |
|---|
| 286 | | - struct rga_job *job, int ret) |
|---|
| 287 | | -{ |
|---|
| 288 | | - ktime_t now; |
|---|
| 289 | | - |
|---|
| 290 | | - job->ret = ret; |
|---|
| 291 | | - |
|---|
| 292 | | - if (DEBUGGER_EN(TIME)) { |
|---|
| 293 | | - now = ktime_get(); |
|---|
| 294 | | - pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time)); |
|---|
| 295 | | - pr_info("(pid:%d) job done use time = %lld\n", job->pid, |
|---|
| 296 | | - ktime_us_delta(now, job->timestamp)); |
|---|
| 297 | | - } |
|---|
| 298 | | - |
|---|
| 299 | | - rga_mm_unmap_job_info(job); |
|---|
| 300 | | - |
|---|
| 301 | | - rga_request_release_signal(scheduler, job); |
|---|
| 302 | | - |
|---|
| 303 | | - rga_job_next(scheduler); |
|---|
| 304 | | - |
|---|
| 305 | | - rga_power_disable(scheduler); |
|---|
| 306 | | -} |
|---|
| 307 | | - |
|---|
| 308 | | -void rga_job_done(struct rga_scheduler_t *scheduler, int ret) |
|---|
| 249 | +struct rga_job *rga_job_done(struct rga_scheduler_t *scheduler) |
|---|
| 309 | 250 | { |
|---|
| 310 | 251 | struct rga_job *job; |
|---|
| 311 | 252 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 314 | 255 | spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 315 | 256 | |
|---|
| 316 | 257 | job = scheduler->running_job; |
|---|
| 258 | + if (job == NULL) { |
|---|
| 259 | + pr_err("core[0x%x] running job has been cleanup.\n", scheduler->core); |
|---|
| 260 | + |
|---|
| 261 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 262 | + return NULL; |
|---|
| 263 | + } |
|---|
| 317 | 264 | scheduler->running_job = NULL; |
|---|
| 318 | 265 | |
|---|
| 319 | 266 | scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); |
|---|
| 267 | + set_bit(RGA_JOB_STATE_DONE, &job->state); |
|---|
| 320 | 268 | |
|---|
| 321 | 269 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 270 | + |
|---|
| 271 | + if (scheduler->ops->read_back_reg) |
|---|
| 272 | + scheduler->ops->read_back_reg(job, scheduler); |
|---|
| 322 | 273 | |
|---|
| 323 | 274 | if (DEBUGGER_EN(DUMP_IMAGE)) |
|---|
| 324 | 275 | rga_dump_job_image(job); |
|---|
| 325 | 276 | |
|---|
| 326 | | - rga_job_finish_and_next(scheduler, job, ret); |
|---|
| 277 | + if (DEBUGGER_EN(TIME)) |
|---|
| 278 | + pr_info("request[%d], hardware[%s] cost time %lld us\n", |
|---|
| 279 | + job->request_id, |
|---|
| 280 | + rga_get_core_name(scheduler->core), |
|---|
| 281 | + ktime_us_delta(now, job->hw_running_time)); |
|---|
| 282 | + |
|---|
| 283 | + rga_mm_unmap_job_info(job); |
|---|
| 284 | + |
|---|
| 285 | + return job; |
|---|
| 327 | 286 | } |
|---|
| 328 | 287 | |
|---|
| 329 | 288 | static void rga_job_scheduler_timeout_clean(struct rga_scheduler_t *scheduler) |
|---|
| .. | .. |
|---|
| 391 | 350 | } |
|---|
| 392 | 351 | |
|---|
| 393 | 352 | scheduler->job_count++; |
|---|
| 353 | + set_bit(RGA_JOB_STATE_PENDING, &job->state); |
|---|
| 394 | 354 | |
|---|
| 395 | 355 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 396 | 356 | } |
|---|
| 397 | 357 | |
|---|
| 398 | 358 | static struct rga_scheduler_t *rga_job_schedule(struct rga_job *job) |
|---|
| 399 | 359 | { |
|---|
| 360 | + int i; |
|---|
| 400 | 361 | struct rga_scheduler_t *scheduler = NULL; |
|---|
| 362 | + |
|---|
| 363 | + for (i = 0; i < rga_drvdata->num_of_scheduler; i++) { |
|---|
| 364 | + scheduler = rga_drvdata->scheduler[i]; |
|---|
| 365 | + rga_job_scheduler_timeout_clean(scheduler); |
|---|
| 366 | + } |
|---|
| 401 | 367 | |
|---|
| 402 | 368 | if (rga_drvdata->num_of_scheduler > 1) { |
|---|
| 403 | 369 | job->core = rga_job_assign(job); |
|---|
| .. | .. |
|---|
| 411 | 377 | job->scheduler = rga_drvdata->scheduler[0]; |
|---|
| 412 | 378 | } |
|---|
| 413 | 379 | |
|---|
| 414 | | - scheduler = rga_job_get_scheduler(job); |
|---|
| 380 | + scheduler = job->scheduler; |
|---|
| 415 | 381 | if (scheduler == NULL) { |
|---|
| 416 | 382 | pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__); |
|---|
| 417 | 383 | job->ret = -EFAULT; |
|---|
| 418 | 384 | return NULL; |
|---|
| 419 | 385 | } |
|---|
| 420 | | - |
|---|
| 421 | | - rga_job_scheduler_timeout_clean(scheduler); |
|---|
| 422 | 386 | |
|---|
| 423 | 387 | return scheduler; |
|---|
| 424 | 388 | } |
|---|
| .. | .. |
|---|
| 530 | 494 | return false; |
|---|
| 531 | 495 | } |
|---|
| 532 | 496 | |
|---|
| 533 | | -static int rga_request_get_current_mm(struct rga_request *request) |
|---|
| 497 | +static struct mm_struct *rga_request_get_current_mm(struct rga_request *request) |
|---|
| 534 | 498 | { |
|---|
| 535 | 499 | int i; |
|---|
| 536 | 500 | |
|---|
| .. | .. |
|---|
| 538 | 502 | if (rga_is_need_current_mm(&(request->task_list[i]))) { |
|---|
| 539 | 503 | mmgrab(current->mm); |
|---|
| 540 | 504 | mmget(current->mm); |
|---|
| 541 | | - request->current_mm = current->mm; |
|---|
| 542 | 505 | |
|---|
| 543 | | - break; |
|---|
| 506 | + return current->mm; |
|---|
| 544 | 507 | } |
|---|
| 545 | 508 | } |
|---|
| 546 | 509 | |
|---|
| 547 | | - return 0; |
|---|
| 510 | + return NULL; |
|---|
| 548 | 511 | } |
|---|
| 549 | 512 | |
|---|
| 550 | | -static void rga_request_put_current_mm(struct rga_request *request) |
|---|
| 513 | +static void rga_request_put_current_mm(struct mm_struct *mm) |
|---|
| 551 | 514 | { |
|---|
| 552 | | - if (request->current_mm == NULL) |
|---|
| 515 | + if (mm == NULL) |
|---|
| 553 | 516 | return; |
|---|
| 554 | 517 | |
|---|
| 555 | | - mmput(request->current_mm); |
|---|
| 556 | | - mmdrop(request->current_mm); |
|---|
| 557 | | - request->current_mm = NULL; |
|---|
| 518 | + mmput(mm); |
|---|
| 519 | + mmdrop(mm); |
|---|
| 558 | 520 | } |
|---|
| 559 | 521 | |
|---|
| 560 | | -static int rga_request_alloc_release_fence(struct dma_fence **release_fence) |
|---|
| 561 | | -{ |
|---|
| 562 | | - struct dma_fence *fence; |
|---|
| 563 | | - |
|---|
| 564 | | - fence = rga_dma_fence_alloc(); |
|---|
| 565 | | - if (IS_ERR(fence)) { |
|---|
| 566 | | - pr_err("Can not alloc release fence!\n"); |
|---|
| 567 | | - return IS_ERR(fence); |
|---|
| 568 | | - } |
|---|
| 569 | | - |
|---|
| 570 | | - *release_fence = fence; |
|---|
| 571 | | - |
|---|
| 572 | | - return rga_dma_fence_get_fd(fence); |
|---|
| 573 | | -} |
|---|
| 574 | | - |
|---|
| 575 | | -static int rga_request_add_acquire_fence_callback(int acquire_fence_fd, void *private, |
|---|
| 522 | +static int rga_request_add_acquire_fence_callback(int acquire_fence_fd, |
|---|
| 523 | + struct rga_request *request, |
|---|
| 576 | 524 | dma_fence_func_t cb_func) |
|---|
| 577 | 525 | { |
|---|
| 578 | 526 | int ret; |
|---|
| 579 | 527 | struct dma_fence *acquire_fence = NULL; |
|---|
| 528 | + struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager; |
|---|
| 580 | 529 | |
|---|
| 581 | 530 | if (DEBUGGER_EN(MSG)) |
|---|
| 582 | 531 | pr_info("acquire_fence_fd = %d", acquire_fence_fd); |
|---|
| .. | .. |
|---|
| 587 | 536 | __func__, acquire_fence_fd); |
|---|
| 588 | 537 | return -EINVAL; |
|---|
| 589 | 538 | } |
|---|
| 590 | | - /* close acquire fence fd */ |
|---|
| 591 | | - ksys_close(acquire_fence_fd); |
|---|
| 539 | + |
|---|
| 540 | + if (!request->feature.user_close_fence) { |
|---|
| 541 | + /* close acquire fence fd */ |
|---|
| 542 | +#ifdef CONFIG_NO_GKI |
|---|
| 543 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 544 | + close_fd(acquire_fence_fd); |
|---|
| 545 | +#else |
|---|
| 546 | + ksys_close(acquire_fence_fd); |
|---|
| 547 | +#endif |
|---|
| 548 | +#else |
|---|
| 549 | + pr_err("Please update the driver to v1.2.28 to prevent acquire_fence_fd leaks."); |
|---|
| 550 | + return -EFAULT; |
|---|
| 551 | +#endif |
|---|
| 552 | + } |
|---|
| 553 | + |
|---|
| 592 | 554 | |
|---|
| 593 | 555 | ret = rga_dma_fence_get_status(acquire_fence); |
|---|
| 594 | | - if (ret == 0) { |
|---|
| 595 | | - ret = rga_dma_fence_add_callback(acquire_fence, cb_func, private); |
|---|
| 596 | | - if (ret < 0) { |
|---|
| 597 | | - if (ret == -ENOENT) |
|---|
| 598 | | - return 1; |
|---|
| 556 | + if (ret < 0) { |
|---|
| 557 | + pr_err("%s: Current acquire fence unexpectedly has error status before signal\n", |
|---|
| 558 | + __func__); |
|---|
| 559 | + return ret; |
|---|
| 560 | + } else if (ret > 0) { |
|---|
| 561 | + /* has been signaled */ |
|---|
| 562 | + return ret; |
|---|
| 563 | + } |
|---|
| 599 | 564 | |
|---|
| 565 | + /* |
|---|
| 566 | + * Ensure that the request will not be free early when |
|---|
| 567 | + * the callback is called. |
|---|
| 568 | + */ |
|---|
| 569 | + mutex_lock(&request_manager->lock); |
|---|
| 570 | + rga_request_get(request); |
|---|
| 571 | + mutex_unlock(&request_manager->lock); |
|---|
| 572 | + |
|---|
| 573 | + ret = rga_dma_fence_add_callback(acquire_fence, cb_func, (void *)request); |
|---|
| 574 | + if (ret < 0) { |
|---|
| 575 | + if (ret != -ENOENT) |
|---|
| 600 | 576 | pr_err("%s: failed to add fence callback\n", __func__); |
|---|
| 601 | | - return ret; |
|---|
| 602 | | - } |
|---|
| 603 | | - } else { |
|---|
| 577 | + |
|---|
| 578 | + mutex_lock(&request_manager->lock); |
|---|
| 579 | + rga_request_put(request); |
|---|
| 580 | + mutex_unlock(&request_manager->lock); |
|---|
| 604 | 581 | return ret; |
|---|
| 605 | 582 | } |
|---|
| 606 | 583 | |
|---|
| .. | .. |
|---|
| 682 | 659 | scheduler->ops->soft_reset(scheduler); |
|---|
| 683 | 660 | } |
|---|
| 684 | 661 | |
|---|
| 685 | | - pr_err("reset core[%d] by request abort", scheduler->core); |
|---|
| 662 | + pr_err("reset core[%d] by request[%d] abort", |
|---|
| 663 | + scheduler->core, request->id); |
|---|
| 686 | 664 | running_abort_count++; |
|---|
| 687 | 665 | } |
|---|
| 688 | 666 | } |
|---|
| .. | .. |
|---|
| 715 | 693 | static void rga_request_release_abort(struct rga_request *request, int err_code) |
|---|
| 716 | 694 | { |
|---|
| 717 | 695 | unsigned long flags; |
|---|
| 696 | + struct mm_struct *current_mm; |
|---|
| 718 | 697 | struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager; |
|---|
| 719 | 698 | |
|---|
| 720 | 699 | if (rga_request_scheduler_job_abort(request) > 0) |
|---|
| .. | .. |
|---|
| 729 | 708 | |
|---|
| 730 | 709 | request->is_running = false; |
|---|
| 731 | 710 | request->is_done = false; |
|---|
| 732 | | - |
|---|
| 733 | | - rga_request_put_current_mm(request); |
|---|
| 711 | + current_mm = request->current_mm; |
|---|
| 712 | + request->current_mm = NULL; |
|---|
| 734 | 713 | |
|---|
| 735 | 714 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 715 | + |
|---|
| 716 | + rga_request_put_current_mm(current_mm); |
|---|
| 736 | 717 | |
|---|
| 737 | 718 | rga_dma_fence_signal(request->release_fence, err_code); |
|---|
| 738 | 719 | |
|---|
| .. | .. |
|---|
| 740 | 721 | /* current submit request put */ |
|---|
| 741 | 722 | rga_request_put(request); |
|---|
| 742 | 723 | mutex_unlock(&request_manager->lock); |
|---|
| 724 | +} |
|---|
| 725 | + |
|---|
| 726 | +void rga_request_session_destroy_abort(struct rga_session *session) |
|---|
| 727 | +{ |
|---|
| 728 | + int request_id; |
|---|
| 729 | + struct rga_request *request; |
|---|
| 730 | + struct rga_pending_request_manager *request_manager; |
|---|
| 731 | + |
|---|
| 732 | + request_manager = rga_drvdata->pend_request_manager; |
|---|
| 733 | + if (request_manager == NULL) { |
|---|
| 734 | + pr_err("rga_pending_request_manager is null!\n"); |
|---|
| 735 | + return; |
|---|
| 736 | + } |
|---|
| 737 | + |
|---|
| 738 | + mutex_lock(&request_manager->lock); |
|---|
| 739 | + |
|---|
| 740 | + idr_for_each_entry(&request_manager->request_idr, request, request_id) { |
|---|
| 741 | + if (session == request->session) { |
|---|
| 742 | + pr_err("[tgid:%d pid:%d] destroy request[%d] when the user exits", |
|---|
| 743 | + session->tgid, current->pid, request->id); |
|---|
| 744 | + rga_request_put(request); |
|---|
| 745 | + } |
|---|
| 746 | + } |
|---|
| 747 | + |
|---|
| 748 | + mutex_unlock(&request_manager->lock); |
|---|
| 749 | +} |
|---|
| 750 | + |
|---|
| 751 | +static int rga_request_timeout_query_state(struct rga_request *request) |
|---|
| 752 | +{ |
|---|
| 753 | + int i; |
|---|
| 754 | + unsigned long flags; |
|---|
| 755 | + struct rga_scheduler_t *scheduler = NULL; |
|---|
| 756 | + struct rga_job *job = NULL; |
|---|
| 757 | + |
|---|
| 758 | + for (i = 0; i < rga_drvdata->num_of_scheduler; i++) { |
|---|
| 759 | + scheduler = rga_drvdata->scheduler[i]; |
|---|
| 760 | + |
|---|
| 761 | + spin_lock_irqsave(&scheduler->irq_lock, flags); |
|---|
| 762 | + |
|---|
| 763 | + if (scheduler->running_job) { |
|---|
| 764 | + job = scheduler->running_job; |
|---|
| 765 | + if (request->id == job->request_id) { |
|---|
| 766 | + if (test_bit(RGA_JOB_STATE_DONE, &job->state) && |
|---|
| 767 | + test_bit(RGA_JOB_STATE_FINISH, &job->state)) { |
|---|
| 768 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 769 | + return request->ret; |
|---|
| 770 | + } else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) && |
|---|
| 771 | + test_bit(RGA_JOB_STATE_FINISH, &job->state)) { |
|---|
| 772 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 773 | + pr_err("request[%d] hardware has finished, but the software has timeout!\n", |
|---|
| 774 | + request->id); |
|---|
| 775 | + return -EBUSY; |
|---|
| 776 | + } else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) && |
|---|
| 777 | + !test_bit(RGA_JOB_STATE_FINISH, &job->state)) { |
|---|
| 778 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 779 | + pr_err("request[%d] hardware has timeout.\n", request->id); |
|---|
| 780 | + return -EBUSY; |
|---|
| 781 | + } |
|---|
| 782 | + } |
|---|
| 783 | + } |
|---|
| 784 | + |
|---|
| 785 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
|---|
| 786 | + } |
|---|
| 787 | + |
|---|
| 788 | + return request->ret; |
|---|
| 743 | 789 | } |
|---|
| 744 | 790 | |
|---|
| 745 | 791 | static int rga_request_wait(struct rga_request *request) |
|---|
| .. | .. |
|---|
| 752 | 798 | |
|---|
| 753 | 799 | switch (left_time) { |
|---|
| 754 | 800 | case 0: |
|---|
| 755 | | - pr_err("%s timeout", __func__); |
|---|
| 756 | | - ret = -EBUSY; |
|---|
| 801 | + ret = rga_request_timeout_query_state(request); |
|---|
| 757 | 802 | goto err_request_abort; |
|---|
| 758 | 803 | case -ERESTARTSYS: |
|---|
| 759 | 804 | ret = -ERESTARTSYS; |
|---|
| .. | .. |
|---|
| 778 | 823 | struct rga_job *job; |
|---|
| 779 | 824 | |
|---|
| 780 | 825 | for (i = 0; i < request->task_count; i++) { |
|---|
| 781 | | - job = rga_job_commit(&(request->task_list[i]), request); |
|---|
| 826 | + struct rga_req *req = &(request->task_list[i]); |
|---|
| 827 | + |
|---|
| 828 | + if (DEBUGGER_EN(MSG)) { |
|---|
| 829 | + pr_info("commit request[%d] task[%d]:\n", request->id, i); |
|---|
| 830 | + rga_cmd_print_debug_info(req); |
|---|
| 831 | + } |
|---|
| 832 | + |
|---|
| 833 | + job = rga_job_commit(req, request); |
|---|
| 782 | 834 | if (IS_ERR(job)) { |
|---|
| 783 | 835 | pr_err("request[%d] task[%d] job_commit failed.\n", request->id, i); |
|---|
| 784 | 836 | rga_request_release_abort(request, PTR_ERR(job)); |
|---|
| .. | .. |
|---|
| 799 | 851 | static void rga_request_acquire_fence_signaled_cb(struct dma_fence *fence, |
|---|
| 800 | 852 | struct dma_fence_cb *_waiter) |
|---|
| 801 | 853 | { |
|---|
| 854 | + int ret; |
|---|
| 855 | + unsigned long flags; |
|---|
| 856 | + struct mm_struct *current_mm; |
|---|
| 802 | 857 | struct rga_fence_waiter *waiter = (struct rga_fence_waiter *)_waiter; |
|---|
| 858 | + struct rga_request *request = (struct rga_request *)waiter->private; |
|---|
| 859 | + struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager; |
|---|
| 803 | 860 | |
|---|
| 804 | | - if (rga_request_commit((struct rga_request *)waiter->private)) |
|---|
| 805 | | - pr_err("rga request commit failed!\n"); |
|---|
| 861 | + ret = rga_request_commit(request); |
|---|
| 862 | + if (ret < 0) { |
|---|
| 863 | + pr_err("acquire_fence callback: rga request[%d] commit failed!\n", request->id); |
|---|
| 864 | + |
|---|
| 865 | + spin_lock_irqsave(&request->lock, flags); |
|---|
| 866 | + |
|---|
| 867 | + request->is_running = false; |
|---|
| 868 | + current_mm = request->current_mm; |
|---|
| 869 | + request->current_mm = NULL; |
|---|
| 870 | + |
|---|
| 871 | + spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 872 | + |
|---|
| 873 | + rga_request_put_current_mm(current_mm); |
|---|
| 874 | + |
|---|
| 875 | + /* |
|---|
| 876 | + * Since the callback is called while holding &dma_fence.lock, |
|---|
| 877 | + * the _locked API is used here. |
|---|
| 878 | + */ |
|---|
| 879 | + if (dma_fence_get_status_locked(request->release_fence) == 0) |
|---|
| 880 | + dma_fence_signal_locked(request->release_fence); |
|---|
| 881 | + } |
|---|
| 882 | + |
|---|
| 883 | + mutex_lock(&request_manager->lock); |
|---|
| 884 | + rga_request_put(request); |
|---|
| 885 | + mutex_unlock(&request_manager->lock); |
|---|
| 806 | 886 | |
|---|
| 807 | 887 | kfree(waiter); |
|---|
| 808 | 888 | } |
|---|
| .. | .. |
|---|
| 811 | 891 | { |
|---|
| 812 | 892 | struct rga_pending_request_manager *request_manager; |
|---|
| 813 | 893 | struct rga_request *request; |
|---|
| 894 | + struct mm_struct *current_mm; |
|---|
| 814 | 895 | int finished_count, failed_count; |
|---|
| 896 | + bool is_finished = false; |
|---|
| 815 | 897 | unsigned long flags; |
|---|
| 816 | 898 | |
|---|
| 817 | 899 | request_manager = rga_drvdata->pend_request_manager; |
|---|
| .. | .. |
|---|
| 832 | 914 | rga_request_get(request); |
|---|
| 833 | 915 | mutex_unlock(&request_manager->lock); |
|---|
| 834 | 916 | |
|---|
| 835 | | - rga_job_cleanup(job); |
|---|
| 836 | | - |
|---|
| 837 | 917 | spin_lock_irqsave(&request->lock, flags); |
|---|
| 838 | 918 | |
|---|
| 839 | 919 | if (job->ret < 0) { |
|---|
| .. | .. |
|---|
| 853 | 933 | |
|---|
| 854 | 934 | request->is_running = false; |
|---|
| 855 | 935 | request->is_done = true; |
|---|
| 856 | | - |
|---|
| 857 | | - rga_request_put_current_mm(request); |
|---|
| 936 | + current_mm = request->current_mm; |
|---|
| 937 | + request->current_mm = NULL; |
|---|
| 858 | 938 | |
|---|
| 859 | 939 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 860 | 940 | |
|---|
| 941 | + rga_request_put_current_mm(current_mm); |
|---|
| 942 | + |
|---|
| 861 | 943 | rga_dma_fence_signal(request->release_fence, request->ret); |
|---|
| 862 | 944 | |
|---|
| 863 | | - wake_up(&request->finished_wq); |
|---|
| 945 | + is_finished = true; |
|---|
| 864 | 946 | |
|---|
| 865 | 947 | if (DEBUGGER_EN(MSG)) |
|---|
| 866 | 948 | pr_info("request[%d] finished %d failed %d\n", |
|---|
| .. | .. |
|---|
| 873 | 955 | } |
|---|
| 874 | 956 | |
|---|
| 875 | 957 | mutex_lock(&request_manager->lock); |
|---|
| 958 | + |
|---|
| 959 | + if (is_finished) |
|---|
| 960 | + wake_up(&request->finished_wq); |
|---|
| 961 | + |
|---|
| 876 | 962 | rga_request_put(request); |
|---|
| 963 | + |
|---|
| 877 | 964 | mutex_unlock(&request_manager->lock); |
|---|
| 965 | + |
|---|
| 966 | + if (DEBUGGER_EN(TIME)) |
|---|
| 967 | + pr_info("request[%d], job done total cost time %lld us\n", |
|---|
| 968 | + job->request_id, |
|---|
| 969 | + ktime_us_delta(ktime_get(), job->timestamp)); |
|---|
| 970 | + |
|---|
| 971 | + rga_job_cleanup(job); |
|---|
| 878 | 972 | |
|---|
| 879 | 973 | return 0; |
|---|
| 880 | 974 | } |
|---|
| .. | .. |
|---|
| 927 | 1021 | request->sync_mode = user_request->sync_mode; |
|---|
| 928 | 1022 | request->mpi_config_flags = user_request->mpi_config_flags; |
|---|
| 929 | 1023 | request->acquire_fence_fd = user_request->acquire_fence_fd; |
|---|
| 1024 | + request->feature = task_list[0].feature; |
|---|
| 930 | 1025 | |
|---|
| 931 | 1026 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 932 | 1027 | |
|---|
| .. | .. |
|---|
| 1003 | 1098 | { |
|---|
| 1004 | 1099 | int ret = 0; |
|---|
| 1005 | 1100 | unsigned long flags; |
|---|
| 1101 | + struct dma_fence *release_fence; |
|---|
| 1102 | + struct mm_struct *current_mm; |
|---|
| 1103 | + |
|---|
| 1104 | + current_mm = rga_request_get_current_mm(request); |
|---|
| 1006 | 1105 | |
|---|
| 1007 | 1106 | spin_lock_irqsave(&request->lock, flags); |
|---|
| 1008 | 1107 | |
|---|
| 1009 | 1108 | if (request->is_running) { |
|---|
| 1010 | | - pr_err("can not re-config when request is running"); |
|---|
| 1011 | 1109 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 1012 | | - return -EFAULT; |
|---|
| 1110 | + |
|---|
| 1111 | + pr_err("can not re-config when request is running\n"); |
|---|
| 1112 | + ret = -EFAULT; |
|---|
| 1113 | + goto err_put_current_mm; |
|---|
| 1013 | 1114 | } |
|---|
| 1014 | 1115 | |
|---|
| 1015 | 1116 | if (request->task_list == NULL) { |
|---|
| 1016 | | - pr_err("can not find task list from id[%d]", request->id); |
|---|
| 1017 | 1117 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 1018 | | - return -EINVAL; |
|---|
| 1118 | + |
|---|
| 1119 | + pr_err("can not find task list from id[%d]\n", request->id); |
|---|
| 1120 | + ret = -EINVAL; |
|---|
| 1121 | + goto err_put_current_mm; |
|---|
| 1019 | 1122 | } |
|---|
| 1020 | 1123 | |
|---|
| 1021 | 1124 | /* Reset */ |
|---|
| .. | .. |
|---|
| 1023 | 1126 | request->is_done = false; |
|---|
| 1024 | 1127 | request->finished_task_count = 0; |
|---|
| 1025 | 1128 | request->failed_task_count = 0; |
|---|
| 1129 | + request->current_mm = current_mm; |
|---|
| 1026 | 1130 | |
|---|
| 1027 | | - rga_request_get_current_mm(request); |
|---|
| 1028 | | - |
|---|
| 1131 | + /* Unlock after ensuring that the current request will not be resubmitted. */ |
|---|
| 1029 | 1132 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 1030 | 1133 | |
|---|
| 1031 | 1134 | if (request->sync_mode == RGA_BLIT_ASYNC) { |
|---|
| 1032 | | - ret = rga_request_alloc_release_fence(&request->release_fence); |
|---|
| 1033 | | - if (ret < 0) { |
|---|
| 1034 | | - pr_err("Failed to alloc release fence fd!\n"); |
|---|
| 1035 | | - return ret; |
|---|
| 1135 | + release_fence = rga_dma_fence_alloc(); |
|---|
| 1136 | + if (IS_ERR(release_fence)) { |
|---|
| 1137 | + pr_err("Can not alloc release fence!\n"); |
|---|
| 1138 | + ret = IS_ERR(release_fence); |
|---|
| 1139 | + goto err_reset_request; |
|---|
| 1036 | 1140 | } |
|---|
| 1037 | | - request->release_fence_fd = ret; |
|---|
| 1141 | + request->release_fence = release_fence; |
|---|
| 1038 | 1142 | |
|---|
| 1039 | 1143 | if (request->acquire_fence_fd > 0) { |
|---|
| 1040 | 1144 | ret = rga_request_add_acquire_fence_callback( |
|---|
| 1041 | | - request->acquire_fence_fd, |
|---|
| 1042 | | - (void *)request, |
|---|
| 1145 | + request->acquire_fence_fd, request, |
|---|
| 1043 | 1146 | rga_request_acquire_fence_signaled_cb); |
|---|
| 1044 | 1147 | if (ret == 0) { |
|---|
| 1045 | | - return ret; |
|---|
| 1046 | | - } else if (ret == 1) { |
|---|
| 1148 | + /* acquire fence active */ |
|---|
| 1149 | + goto export_release_fence_fd; |
|---|
| 1150 | + } else if (ret > 0) { |
|---|
| 1151 | + /* acquire fence has been signaled */ |
|---|
| 1047 | 1152 | goto request_commit; |
|---|
| 1048 | 1153 | } else { |
|---|
| 1049 | 1154 | pr_err("Failed to add callback with acquire fence fd[%d]!\n", |
|---|
| 1050 | 1155 | request->acquire_fence_fd); |
|---|
| 1051 | | - goto error_release_fence_put; |
|---|
| 1156 | + goto err_put_release_fence; |
|---|
| 1052 | 1157 | } |
|---|
| 1053 | 1158 | } |
|---|
| 1054 | | - |
|---|
| 1055 | 1159 | } |
|---|
| 1056 | 1160 | |
|---|
| 1057 | 1161 | request_commit: |
|---|
| 1058 | 1162 | ret = rga_request_commit(request); |
|---|
| 1059 | 1163 | if (ret < 0) { |
|---|
| 1060 | | - pr_err("rga request commit failed!\n"); |
|---|
| 1061 | | - goto error_release_fence_put; |
|---|
| 1164 | + pr_err("rga request[%d] commit failed!\n", request->id); |
|---|
| 1165 | + goto err_put_release_fence; |
|---|
| 1166 | + } |
|---|
| 1167 | + |
|---|
| 1168 | +export_release_fence_fd: |
|---|
| 1169 | + if (request->release_fence != NULL) { |
|---|
| 1170 | + ret = rga_dma_fence_get_fd(request->release_fence); |
|---|
| 1171 | + if (ret < 0) { |
|---|
| 1172 | + pr_err("Failed to alloc release fence fd!\n"); |
|---|
| 1173 | + rga_request_release_abort(request, ret); |
|---|
| 1174 | + return ret; |
|---|
| 1175 | + } |
|---|
| 1176 | + |
|---|
| 1177 | + request->release_fence_fd = ret; |
|---|
| 1062 | 1178 | } |
|---|
| 1063 | 1179 | |
|---|
| 1064 | 1180 | return 0; |
|---|
| 1065 | 1181 | |
|---|
| 1066 | | -error_release_fence_put: |
|---|
| 1067 | | - rga_dma_fence_put(request->release_fence); |
|---|
| 1068 | | - request->release_fence = NULL; |
|---|
| 1182 | +err_put_release_fence: |
|---|
| 1183 | + if (request->release_fence != NULL) { |
|---|
| 1184 | + rga_dma_fence_put(request->release_fence); |
|---|
| 1185 | + request->release_fence = NULL; |
|---|
| 1186 | + } |
|---|
| 1187 | + |
|---|
| 1188 | +err_reset_request: |
|---|
| 1189 | + spin_lock_irqsave(&request->lock, flags); |
|---|
| 1190 | + |
|---|
| 1191 | + request->current_mm = NULL; |
|---|
| 1192 | + request->is_running = false; |
|---|
| 1193 | + |
|---|
| 1194 | + spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 1195 | + |
|---|
| 1196 | +err_put_current_mm: |
|---|
| 1197 | + rga_request_put_current_mm(current_mm); |
|---|
| 1198 | + |
|---|
| 1069 | 1199 | return ret; |
|---|
| 1070 | 1200 | } |
|---|
| 1071 | 1201 | |
|---|
| .. | .. |
|---|
| 1154 | 1284 | static void rga_request_kref_release(struct kref *ref) |
|---|
| 1155 | 1285 | { |
|---|
| 1156 | 1286 | struct rga_request *request; |
|---|
| 1287 | + struct mm_struct *current_mm; |
|---|
| 1157 | 1288 | unsigned long flags; |
|---|
| 1158 | 1289 | |
|---|
| 1159 | 1290 | request = container_of(ref, struct rga_request, refcount); |
|---|
| 1160 | 1291 | |
|---|
| 1161 | 1292 | if (rga_dma_fence_get_status(request->release_fence) == 0) |
|---|
| 1162 | | - rga_dma_fence_signal(request->release_fence, -EEXIST); |
|---|
| 1293 | + rga_dma_fence_signal(request->release_fence, -EFAULT); |
|---|
| 1163 | 1294 | |
|---|
| 1164 | 1295 | spin_lock_irqsave(&request->lock, flags); |
|---|
| 1165 | 1296 | |
|---|
| 1166 | 1297 | rga_dma_fence_put(request->release_fence); |
|---|
| 1298 | + current_mm = request->current_mm; |
|---|
| 1299 | + request->current_mm = NULL; |
|---|
| 1167 | 1300 | |
|---|
| 1168 | 1301 | if (!request->is_running || request->is_done) { |
|---|
| 1169 | 1302 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 1303 | + |
|---|
| 1304 | + rga_request_put_current_mm(current_mm); |
|---|
| 1305 | + |
|---|
| 1170 | 1306 | goto free_request; |
|---|
| 1171 | 1307 | } |
|---|
| 1172 | 1308 | |
|---|
| 1173 | 1309 | spin_unlock_irqrestore(&request->lock, flags); |
|---|
| 1310 | + |
|---|
| 1311 | + rga_request_put_current_mm(current_mm); |
|---|
| 1174 | 1312 | |
|---|
| 1175 | 1313 | rga_request_scheduler_job_abort(request); |
|---|
| 1176 | 1314 | |
|---|
| .. | .. |
|---|
| 1188 | 1326 | |
|---|
| 1189 | 1327 | int rga_request_alloc(uint32_t flags, struct rga_session *session) |
|---|
| 1190 | 1328 | { |
|---|
| 1329 | + int new_id; |
|---|
| 1191 | 1330 | struct rga_pending_request_manager *request_manager; |
|---|
| 1192 | 1331 | struct rga_request *request; |
|---|
| 1193 | 1332 | |
|---|
| .. | .. |
|---|
| 1218 | 1357 | mutex_lock(&request_manager->lock); |
|---|
| 1219 | 1358 | |
|---|
| 1220 | 1359 | idr_preload(GFP_KERNEL); |
|---|
| 1221 | | - request->id = idr_alloc(&request_manager->request_idr, request, 1, 0, GFP_KERNEL); |
|---|
| 1360 | + new_id = idr_alloc_cyclic(&request_manager->request_idr, request, 1, 0, GFP_NOWAIT); |
|---|
| 1222 | 1361 | idr_preload_end(); |
|---|
| 1223 | | - |
|---|
| 1224 | | - if (request->id <= 0) { |
|---|
| 1225 | | - pr_err("alloc request_id failed!\n"); |
|---|
| 1362 | + if (new_id < 0) { |
|---|
| 1363 | + pr_err("request alloc id failed!\n"); |
|---|
| 1226 | 1364 | |
|---|
| 1227 | 1365 | mutex_unlock(&request_manager->lock); |
|---|
| 1228 | 1366 | kfree(request); |
|---|
| 1229 | | - return -EFAULT; |
|---|
| 1367 | + return new_id; |
|---|
| 1230 | 1368 | } |
|---|
| 1231 | 1369 | |
|---|
| 1370 | + request->id = new_id; |
|---|
| 1232 | 1371 | request_manager->request_count++; |
|---|
| 1233 | 1372 | |
|---|
| 1234 | 1373 | mutex_unlock(&request_manager->lock); |
|---|