| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Memory-to-memory device framework for Video for Linux 2 and videobuf. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 7 | 8 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. |
|---|
| 8 | 9 | * Pawel Osciak, <pawel@osciak.com> |
|---|
| 9 | 10 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 12 | | - * it under the terms of the GNU General Public License as published by the |
|---|
| 13 | | - * Free Software Foundation; either version 2 of the License, or (at your |
|---|
| 14 | | - * option) any later version. |
|---|
| 15 | 11 | */ |
|---|
| 16 | 12 | #include <linux/module.h> |
|---|
| 17 | 13 | #include <linux/sched.h> |
|---|
| .. | .. |
|---|
| 45 | 41 | #define TRANS_RUNNING (1 << 1) |
|---|
| 46 | 42 | /* Instance is currently aborting */ |
|---|
| 47 | 43 | #define TRANS_ABORT (1 << 2) |
|---|
| 44 | + |
|---|
| 45 | + |
|---|
| 46 | +/* The job queue is not running new jobs */ |
|---|
| 47 | +#define QUEUE_PAUSED (1 << 0) |
|---|
| 48 | 48 | |
|---|
| 49 | 49 | |
|---|
| 50 | 50 | /* Offset base for buffers on the destination queue - used to distinguish |
|---|
| .. | .. |
|---|
| 87 | 87 | * @curr_ctx: currently running instance |
|---|
| 88 | 88 | * @job_queue: instances queued to run |
|---|
| 89 | 89 | * @job_spinlock: protects job_queue |
|---|
| 90 | + * @job_work: worker to run queued jobs. |
|---|
| 91 | + * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. |
|---|
| 90 | 92 | * @m2m_ops: driver callbacks |
|---|
| 91 | 93 | */ |
|---|
| 92 | 94 | struct v4l2_m2m_dev { |
|---|
| .. | .. |
|---|
| 103 | 105 | |
|---|
| 104 | 106 | struct list_head job_queue; |
|---|
| 105 | 107 | spinlock_t job_spinlock; |
|---|
| 108 | + struct work_struct job_work; |
|---|
| 109 | + unsigned long job_queue_flags; |
|---|
| 106 | 110 | |
|---|
| 107 | 111 | const struct v4l2_m2m_ops *m2m_ops; |
|---|
| 108 | 112 | }; |
|---|
| .. | .. |
|---|
| 129 | 133 | } |
|---|
| 130 | 134 | EXPORT_SYMBOL(v4l2_m2m_get_vq); |
|---|
| 131 | 135 | |
|---|
| 132 | | -void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
|---|
| 136 | +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
|---|
| 133 | 137 | { |
|---|
| 134 | 138 | struct v4l2_m2m_buffer *b; |
|---|
| 135 | 139 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 147 | 151 | } |
|---|
| 148 | 152 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); |
|---|
| 149 | 153 | |
|---|
| 150 | | -void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
|---|
| 154 | +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
|---|
| 151 | 155 | { |
|---|
| 152 | 156 | struct v4l2_m2m_buffer *b; |
|---|
| 153 | 157 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 165 | 169 | } |
|---|
| 166 | 170 | EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); |
|---|
| 167 | 171 | |
|---|
| 168 | | -void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
|---|
| 172 | +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
|---|
| 169 | 173 | { |
|---|
| 170 | 174 | struct v4l2_m2m_buffer *b; |
|---|
| 171 | 175 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 244 | 248 | * @m2m_dev: per-device context |
|---|
| 245 | 249 | * |
|---|
| 246 | 250 | * Get next transaction (if present) from the waiting jobs list and run it. |
|---|
| 251 | + * |
|---|
| 252 | + * Note that this function can run on a given v4l2_m2m_ctx context, |
|---|
| 253 | + * but call .device_run for another context. |
|---|
| 247 | 254 | */ |
|---|
| 248 | 255 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) |
|---|
| 249 | 256 | { |
|---|
| .. | .. |
|---|
| 259 | 266 | if (list_empty(&m2m_dev->job_queue)) { |
|---|
| 260 | 267 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 261 | 268 | dprintk("No job pending\n"); |
|---|
| 269 | + return; |
|---|
| 270 | + } |
|---|
| 271 | + |
|---|
| 272 | + if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { |
|---|
| 273 | + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 274 | + dprintk("Running new jobs is paused\n"); |
|---|
| 262 | 275 | return; |
|---|
| 263 | 276 | } |
|---|
| 264 | 277 | |
|---|
| .. | .. |
|---|
| 283 | 296 | static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, |
|---|
| 284 | 297 | struct v4l2_m2m_ctx *m2m_ctx) |
|---|
| 285 | 298 | { |
|---|
| 286 | | - unsigned long flags_job, flags_out, flags_cap; |
|---|
| 299 | + unsigned long flags_job; |
|---|
| 300 | + struct vb2_v4l2_buffer *dst, *src; |
|---|
| 287 | 301 | |
|---|
| 288 | 302 | dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); |
|---|
| 289 | 303 | |
|---|
| .. | .. |
|---|
| 297 | 311 | |
|---|
| 298 | 312 | /* If the context is aborted then don't schedule it */ |
|---|
| 299 | 313 | if (m2m_ctx->job_flags & TRANS_ABORT) { |
|---|
| 300 | | - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
|---|
| 301 | 314 | dprintk("Aborted context\n"); |
|---|
| 302 | | - return; |
|---|
| 315 | + goto job_unlock; |
|---|
| 303 | 316 | } |
|---|
| 304 | 317 | |
|---|
| 305 | 318 | if (m2m_ctx->job_flags & TRANS_QUEUED) { |
|---|
| 306 | | - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
|---|
| 307 | 319 | dprintk("On job queue already\n"); |
|---|
| 308 | | - return; |
|---|
| 320 | + goto job_unlock; |
|---|
| 309 | 321 | } |
|---|
| 310 | 322 | |
|---|
| 311 | | - spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); |
|---|
| 312 | | - if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) |
|---|
| 313 | | - && !m2m_ctx->out_q_ctx.buffered) { |
|---|
| 314 | | - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, |
|---|
| 315 | | - flags_out); |
|---|
| 316 | | - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
|---|
| 323 | + src = v4l2_m2m_next_src_buf(m2m_ctx); |
|---|
| 324 | + dst = v4l2_m2m_next_dst_buf(m2m_ctx); |
|---|
| 325 | + if (!src && !m2m_ctx->out_q_ctx.buffered) { |
|---|
| 317 | 326 | dprintk("No input buffers available\n"); |
|---|
| 318 | | - return; |
|---|
| 327 | + goto job_unlock; |
|---|
| 319 | 328 | } |
|---|
| 320 | | - spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); |
|---|
| 321 | | - if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) |
|---|
| 322 | | - && !m2m_ctx->cap_q_ctx.buffered) { |
|---|
| 323 | | - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, |
|---|
| 324 | | - flags_cap); |
|---|
| 325 | | - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, |
|---|
| 326 | | - flags_out); |
|---|
| 327 | | - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
|---|
| 329 | + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { |
|---|
| 328 | 330 | dprintk("No output buffers available\n"); |
|---|
| 329 | | - return; |
|---|
| 331 | + goto job_unlock; |
|---|
| 330 | 332 | } |
|---|
| 331 | | - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); |
|---|
| 332 | | - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); |
|---|
| 333 | + |
|---|
| 334 | + m2m_ctx->new_frame = true; |
|---|
| 335 | + |
|---|
| 336 | + if (src && dst && dst->is_held && |
|---|
| 337 | + dst->vb2_buf.copied_timestamp && |
|---|
| 338 | + dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { |
|---|
| 339 | + dst->is_held = false; |
|---|
| 340 | + v4l2_m2m_dst_buf_remove(m2m_ctx); |
|---|
| 341 | + v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); |
|---|
| 342 | + dst = v4l2_m2m_next_dst_buf(m2m_ctx); |
|---|
| 343 | + |
|---|
| 344 | + if (!dst && !m2m_ctx->cap_q_ctx.buffered) { |
|---|
| 345 | + dprintk("No output buffers available after returning held buffer\n"); |
|---|
| 346 | + goto job_unlock; |
|---|
| 347 | + } |
|---|
| 348 | + } |
|---|
| 349 | + |
|---|
| 350 | + if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & |
|---|
| 351 | + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) |
|---|
| 352 | + m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || |
|---|
| 353 | + dst->vb2_buf.timestamp != src->vb2_buf.timestamp; |
|---|
| 354 | + |
|---|
| 355 | + if (m2m_ctx->has_stopped) { |
|---|
| 356 | + dprintk("Device has stopped\n"); |
|---|
| 357 | + goto job_unlock; |
|---|
| 358 | + } |
|---|
| 333 | 359 | |
|---|
| 334 | 360 | if (m2m_dev->m2m_ops->job_ready |
|---|
| 335 | 361 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { |
|---|
| 336 | | - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
|---|
| 337 | 362 | dprintk("Driver not ready\n"); |
|---|
| 338 | | - return; |
|---|
| 363 | + goto job_unlock; |
|---|
| 339 | 364 | } |
|---|
| 340 | 365 | |
|---|
| 341 | 366 | list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); |
|---|
| 342 | 367 | m2m_ctx->job_flags |= TRANS_QUEUED; |
|---|
| 343 | 368 | |
|---|
| 369 | +job_unlock: |
|---|
| 344 | 370 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
|---|
| 345 | 371 | } |
|---|
| 346 | 372 | |
|---|
| .. | .. |
|---|
| 366 | 392 | EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); |
|---|
| 367 | 393 | |
|---|
| 368 | 394 | /** |
|---|
| 395 | + * v4l2_m2m_device_run_work() - run pending jobs for the context |
|---|
| 396 | + * @work: Work structure used for scheduling the execution of this function. |
|---|
| 397 | + */ |
|---|
| 398 | +static void v4l2_m2m_device_run_work(struct work_struct *work) |
|---|
| 399 | +{ |
|---|
| 400 | + struct v4l2_m2m_dev *m2m_dev = |
|---|
| 401 | + container_of(work, struct v4l2_m2m_dev, job_work); |
|---|
| 402 | + |
|---|
| 403 | + v4l2_m2m_try_run(m2m_dev); |
|---|
| 404 | +} |
|---|
| 405 | + |
|---|
| 406 | +/** |
|---|
| 369 | 407 | * v4l2_m2m_cancel_job() - cancel pending jobs for the context |
|---|
| 370 | 408 | * @m2m_ctx: m2m context with jobs to be canceled |
|---|
| 371 | 409 | * |
|---|
| .. | .. |
|---|
| 387 | 425 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 388 | 426 | if (m2m_dev->m2m_ops->job_abort) |
|---|
| 389 | 427 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); |
|---|
| 390 | | - dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); |
|---|
| 428 | + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); |
|---|
| 391 | 429 | wait_event(m2m_ctx->finished, |
|---|
| 392 | 430 | !(m2m_ctx->job_flags & TRANS_RUNNING)); |
|---|
| 393 | 431 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { |
|---|
| .. | .. |
|---|
| 402 | 440 | } |
|---|
| 403 | 441 | } |
|---|
| 404 | 442 | |
|---|
| 405 | | -void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
|---|
| 406 | | - struct v4l2_m2m_ctx *m2m_ctx) |
|---|
| 443 | +/* |
|---|
| 444 | + * Schedule the next job, called from v4l2_m2m_job_finish() or |
|---|
| 445 | + * v4l2_m2m_buf_done_and_job_finish(). |
|---|
| 446 | + */ |
|---|
| 447 | +static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, |
|---|
| 448 | + struct v4l2_m2m_ctx *m2m_ctx) |
|---|
| 407 | 449 | { |
|---|
| 408 | | - unsigned long flags; |
|---|
| 450 | + /* |
|---|
| 451 | + * This instance might have more buffers ready, but since we do not |
|---|
| 452 | + * allow more than one job on the job_queue per instance, each has |
|---|
| 453 | + * to be scheduled separately after the previous one finishes. |
|---|
| 454 | + */ |
|---|
| 455 | + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); |
|---|
| 409 | 456 | |
|---|
| 410 | | - spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
|---|
| 457 | + /* |
|---|
| 458 | + * We might be running in atomic context, |
|---|
| 459 | + * but the job must be run in non-atomic context. |
|---|
| 460 | + */ |
|---|
| 461 | + schedule_work(&m2m_dev->job_work); |
|---|
| 462 | +} |
|---|
| 463 | + |
|---|
| 464 | +/* |
|---|
| 465 | + * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or |
|---|
| 466 | + * v4l2_m2m_buf_done_and_job_finish(). |
|---|
| 467 | + */ |
|---|
| 468 | +static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
|---|
| 469 | + struct v4l2_m2m_ctx *m2m_ctx) |
|---|
| 470 | +{ |
|---|
| 411 | 471 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { |
|---|
| 412 | | - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 413 | 472 | dprintk("Called by an instance not currently running\n"); |
|---|
| 414 | | - return; |
|---|
| 473 | + return false; |
|---|
| 415 | 474 | } |
|---|
| 416 | 475 | |
|---|
| 417 | 476 | list_del(&m2m_dev->curr_ctx->queue); |
|---|
| 418 | 477 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
|---|
| 419 | 478 | wake_up(&m2m_dev->curr_ctx->finished); |
|---|
| 420 | 479 | m2m_dev->curr_ctx = NULL; |
|---|
| 480 | + return true; |
|---|
| 481 | +} |
|---|
| 421 | 482 | |
|---|
| 483 | +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
|---|
| 484 | + struct v4l2_m2m_ctx *m2m_ctx) |
|---|
| 485 | +{ |
|---|
| 486 | + unsigned long flags; |
|---|
| 487 | + bool schedule_next; |
|---|
| 488 | + |
|---|
| 489 | + /* |
|---|
| 490 | + * This function should not be used for drivers that support |
|---|
| 491 | + * holding capture buffers. Those should use |
|---|
| 492 | + * v4l2_m2m_buf_done_and_job_finish() instead. |
|---|
| 493 | + */ |
|---|
| 494 | + WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & |
|---|
| 495 | + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); |
|---|
| 496 | + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
|---|
| 497 | + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); |
|---|
| 422 | 498 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 423 | 499 | |
|---|
| 424 | | - /* This instance might have more buffers ready, but since we do not |
|---|
| 425 | | - * allow more than one job on the job_queue per instance, each has |
|---|
| 426 | | - * to be scheduled separately after the previous one finishes. */ |
|---|
| 427 | | - v4l2_m2m_try_schedule(m2m_ctx); |
|---|
| 500 | + if (schedule_next) |
|---|
| 501 | + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); |
|---|
| 428 | 502 | } |
|---|
| 429 | 503 | EXPORT_SYMBOL(v4l2_m2m_job_finish); |
|---|
| 504 | + |
|---|
| 505 | +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, |
|---|
| 506 | + struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 507 | + enum vb2_buffer_state state) |
|---|
| 508 | +{ |
|---|
| 509 | + struct vb2_v4l2_buffer *src_buf, *dst_buf; |
|---|
| 510 | + bool schedule_next = false; |
|---|
| 511 | + unsigned long flags; |
|---|
| 512 | + |
|---|
| 513 | + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
|---|
| 514 | + src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); |
|---|
| 515 | + dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); |
|---|
| 516 | + |
|---|
| 517 | + if (WARN_ON(!src_buf || !dst_buf)) |
|---|
| 518 | + goto unlock; |
|---|
| 519 | + dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; |
|---|
| 520 | + if (!dst_buf->is_held) { |
|---|
| 521 | + v4l2_m2m_dst_buf_remove(m2m_ctx); |
|---|
| 522 | + v4l2_m2m_buf_done(dst_buf, state); |
|---|
| 523 | + } |
|---|
| 524 | + /* |
|---|
| 525 | + * If the request API is being used, returning the OUTPUT |
|---|
| 526 | + * (src) buffer will wake-up any process waiting on the |
|---|
| 527 | + * request file descriptor. |
|---|
| 528 | + * |
|---|
| 529 | + * Therefore, return the CAPTURE (dst) buffer first, |
|---|
| 530 | + * to avoid signalling the request file descriptor |
|---|
| 531 | + * before the CAPTURE buffer is done. |
|---|
| 532 | + */ |
|---|
| 533 | + v4l2_m2m_buf_done(src_buf, state); |
|---|
| 534 | + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); |
|---|
| 535 | +unlock: |
|---|
| 536 | + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 537 | + |
|---|
| 538 | + if (schedule_next) |
|---|
| 539 | + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); |
|---|
| 540 | +} |
|---|
| 541 | +EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); |
|---|
| 542 | + |
|---|
| 543 | +void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) |
|---|
| 544 | +{ |
|---|
| 545 | + unsigned long flags; |
|---|
| 546 | + struct v4l2_m2m_ctx *curr_ctx; |
|---|
| 547 | + |
|---|
| 548 | + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
|---|
| 549 | + m2m_dev->job_queue_flags |= QUEUE_PAUSED; |
|---|
| 550 | + curr_ctx = m2m_dev->curr_ctx; |
|---|
| 551 | + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 552 | + |
|---|
| 553 | + if (curr_ctx) |
|---|
| 554 | + wait_event(curr_ctx->finished, |
|---|
| 555 | + !(curr_ctx->job_flags & TRANS_RUNNING)); |
|---|
| 556 | +} |
|---|
| 557 | +EXPORT_SYMBOL(v4l2_m2m_suspend); |
|---|
| 558 | + |
|---|
| 559 | +void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) |
|---|
| 560 | +{ |
|---|
| 561 | + unsigned long flags; |
|---|
| 562 | + |
|---|
| 563 | + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
|---|
| 564 | + m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; |
|---|
| 565 | + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 566 | + |
|---|
| 567 | + v4l2_m2m_try_run(m2m_dev); |
|---|
| 568 | +} |
|---|
| 569 | +EXPORT_SYMBOL(v4l2_m2m_resume); |
|---|
| 430 | 570 | |
|---|
| 431 | 571 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 432 | 572 | struct v4l2_requestbuffers *reqbufs) |
|---|
| .. | .. |
|---|
| 445 | 585 | } |
|---|
| 446 | 586 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); |
|---|
| 447 | 587 | |
|---|
| 448 | | -int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 449 | | - struct v4l2_buffer *buf) |
|---|
| 588 | +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, |
|---|
| 589 | + struct v4l2_buffer *buf) |
|---|
| 450 | 590 | { |
|---|
| 451 | | - struct vb2_queue *vq; |
|---|
| 452 | | - int ret = 0; |
|---|
| 453 | | - unsigned int i; |
|---|
| 454 | | - |
|---|
| 455 | | - vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
|---|
| 456 | | - ret = vb2_querybuf(vq, buf); |
|---|
| 457 | | - |
|---|
| 458 | 591 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
|---|
| 459 | | - if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { |
|---|
| 592 | + if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { |
|---|
| 460 | 593 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { |
|---|
| 594 | + unsigned int i; |
|---|
| 595 | + |
|---|
| 461 | 596 | for (i = 0; i < buf->length; ++i) |
|---|
| 462 | 597 | buf->m.planes[i].m.mem_offset |
|---|
| 463 | 598 | += DST_QUEUE_OFF_BASE; |
|---|
| .. | .. |
|---|
| 465 | 600 | buf->m.offset += DST_QUEUE_OFF_BASE; |
|---|
| 466 | 601 | } |
|---|
| 467 | 602 | } |
|---|
| 468 | | - |
|---|
| 469 | | - return ret; |
|---|
| 470 | 603 | } |
|---|
| 471 | | -EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); |
|---|
| 472 | 604 | |
|---|
| 473 | | -int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 474 | | - struct v4l2_buffer *buf) |
|---|
| 605 | +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 606 | + struct v4l2_buffer *buf) |
|---|
| 475 | 607 | { |
|---|
| 476 | 608 | struct vb2_queue *vq; |
|---|
| 477 | 609 | int ret; |
|---|
| 478 | 610 | |
|---|
| 479 | 611 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
|---|
| 480 | | - ret = vb2_qbuf(vq, buf); |
|---|
| 481 | | - if (!ret) |
|---|
| 612 | + ret = vb2_querybuf(vq, buf); |
|---|
| 613 | + if (ret) |
|---|
| 614 | + return ret; |
|---|
| 615 | + |
|---|
| 616 | + /* Adjust MMAP memory offsets for the CAPTURE queue */ |
|---|
| 617 | + v4l2_m2m_adjust_mem_offset(vq, buf); |
|---|
| 618 | + |
|---|
| 619 | + return 0; |
|---|
| 620 | +} |
|---|
| 621 | +EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); |
|---|
| 622 | + |
|---|
| 623 | +/* |
|---|
| 624 | + * This will add the LAST flag and mark the buffer management |
|---|
| 625 | + * state as stopped. |
|---|
| 626 | + * This is called when the last capture buffer must be flagged as LAST |
|---|
| 627 | + * in draining mode from the encoder/decoder driver buf_queue() callback |
|---|
| 628 | + * or from v4l2_update_last_buf_state() when a capture buffer is available. |
|---|
| 629 | + */ |
|---|
| 630 | +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 631 | + struct vb2_v4l2_buffer *vbuf) |
|---|
| 632 | +{ |
|---|
| 633 | + vbuf->flags |= V4L2_BUF_FLAG_LAST; |
|---|
| 634 | + vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); |
|---|
| 635 | + |
|---|
| 636 | + v4l2_m2m_mark_stopped(m2m_ctx); |
|---|
| 637 | +} |
|---|
| 638 | +EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); |
|---|
| 639 | + |
|---|
| 640 | +/* When stop command is issued, update buffer management state */ |
|---|
| 641 | +static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) |
|---|
| 642 | +{ |
|---|
| 643 | + struct vb2_v4l2_buffer *next_dst_buf; |
|---|
| 644 | + |
|---|
| 645 | + if (m2m_ctx->is_draining) |
|---|
| 646 | + return -EBUSY; |
|---|
| 647 | + |
|---|
| 648 | + if (m2m_ctx->has_stopped) |
|---|
| 649 | + return 0; |
|---|
| 650 | + |
|---|
| 651 | + m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); |
|---|
| 652 | + m2m_ctx->is_draining = true; |
|---|
| 653 | + |
|---|
| 654 | + /* |
|---|
| 655 | + * The processing of the last output buffer queued before |
|---|
| 656 | + * the STOP command is expected to mark the buffer management |
|---|
| 657 | + * state as stopped with v4l2_m2m_mark_stopped(). |
|---|
| 658 | + */ |
|---|
| 659 | + if (m2m_ctx->last_src_buf) |
|---|
| 660 | + return 0; |
|---|
| 661 | + |
|---|
| 662 | + /* |
|---|
| 663 | + * In case the output queue is empty, try to mark the last capture |
|---|
| 664 | + * buffer as LAST. |
|---|
| 665 | + */ |
|---|
| 666 | + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); |
|---|
| 667 | + if (!next_dst_buf) { |
|---|
| 668 | + /* |
|---|
| 669 | + * Wait for the next queued one in encoder/decoder driver |
|---|
| 670 | + * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() |
|---|
| 671 | + * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet |
|---|
| 672 | + * streaming. |
|---|
| 673 | + */ |
|---|
| 674 | + m2m_ctx->next_buf_last = true; |
|---|
| 675 | + return 0; |
|---|
| 676 | + } |
|---|
| 677 | + |
|---|
| 678 | + v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); |
|---|
| 679 | + |
|---|
| 680 | + return 0; |
|---|
| 681 | +} |
|---|
| 682 | + |
|---|
| 683 | +/* |
|---|
| 684 | + * Updates the encoding/decoding buffer management state, should |
|---|
| 685 | + * be called from encoder/decoder drivers start_streaming() |
|---|
| 686 | + */ |
|---|
| 687 | +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 688 | + struct vb2_queue *q) |
|---|
| 689 | +{ |
|---|
| 690 | + /* If start streaming again, untag the last output buffer */ |
|---|
| 691 | + if (V4L2_TYPE_IS_OUTPUT(q->type)) |
|---|
| 692 | + m2m_ctx->last_src_buf = NULL; |
|---|
| 693 | +} |
|---|
| 694 | +EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); |
|---|
| 695 | + |
|---|
| 696 | +/* |
|---|
| 697 | + * Updates the encoding/decoding buffer management state, should |
|---|
| 698 | + * be called from encoder/decoder driver stop_streaming() |
|---|
| 699 | + */ |
|---|
| 700 | +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 701 | + struct vb2_queue *q) |
|---|
| 702 | +{ |
|---|
| 703 | + if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
|---|
| 704 | + /* |
|---|
| 705 | + * If in draining state, either mark next dst buffer as |
|---|
| 706 | + * done or flag next one to be marked as done either |
|---|
| 707 | + * in encoder/decoder driver buf_queue() callback using |
|---|
| 708 | + * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() |
|---|
| 709 | + * if encoder/decoder is not yet streaming |
|---|
| 710 | + */ |
|---|
| 711 | + if (m2m_ctx->is_draining) { |
|---|
| 712 | + struct vb2_v4l2_buffer *next_dst_buf; |
|---|
| 713 | + |
|---|
| 714 | + m2m_ctx->last_src_buf = NULL; |
|---|
| 715 | + next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); |
|---|
| 716 | + if (!next_dst_buf) |
|---|
| 717 | + m2m_ctx->next_buf_last = true; |
|---|
| 718 | + else |
|---|
| 719 | + v4l2_m2m_last_buffer_done(m2m_ctx, |
|---|
| 720 | + next_dst_buf); |
|---|
| 721 | + } |
|---|
| 722 | + } else { |
|---|
| 723 | + v4l2_m2m_clear_state(m2m_ctx); |
|---|
| 724 | + } |
|---|
| 725 | +} |
|---|
| 726 | +EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); |
|---|
| 727 | + |
|---|
| 728 | +static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 729 | + struct vb2_queue *q) |
|---|
| 730 | +{ |
|---|
| 731 | + struct vb2_buffer *vb; |
|---|
| 732 | + struct vb2_v4l2_buffer *vbuf; |
|---|
| 733 | + unsigned int i; |
|---|
| 734 | + |
|---|
| 735 | + if (WARN_ON(q->is_output)) |
|---|
| 736 | + return; |
|---|
| 737 | + if (list_empty(&q->queued_list)) |
|---|
| 738 | + return; |
|---|
| 739 | + |
|---|
| 740 | + vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); |
|---|
| 741 | + for (i = 0; i < vb->num_planes; i++) |
|---|
| 742 | + vb2_set_plane_payload(vb, i, 0); |
|---|
| 743 | + |
|---|
| 744 | + /* |
|---|
| 745 | + * Since the buffer hasn't been queued to the ready queue, |
|---|
| 746 | + * mark is active and owned before marking it LAST and DONE |
|---|
| 747 | + */ |
|---|
| 748 | + vb->state = VB2_BUF_STATE_ACTIVE; |
|---|
| 749 | + atomic_inc(&q->owned_by_drv_count); |
|---|
| 750 | + |
|---|
| 751 | + vbuf = to_vb2_v4l2_buffer(vb); |
|---|
| 752 | + vbuf->field = V4L2_FIELD_NONE; |
|---|
| 753 | + |
|---|
| 754 | + v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); |
|---|
| 755 | +} |
|---|
| 756 | + |
|---|
| 757 | +int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 758 | + struct v4l2_buffer *buf) |
|---|
| 759 | +{ |
|---|
| 760 | + struct video_device *vdev = video_devdata(file); |
|---|
| 761 | + struct vb2_queue *vq; |
|---|
| 762 | + int ret; |
|---|
| 763 | + |
|---|
| 764 | + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
|---|
| 765 | + if (V4L2_TYPE_IS_CAPTURE(vq->type) && |
|---|
| 766 | + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { |
|---|
| 767 | + dprintk("%s: requests cannot be used with capture buffers\n", |
|---|
| 768 | + __func__); |
|---|
| 769 | + return -EPERM; |
|---|
| 770 | + } |
|---|
| 771 | + |
|---|
| 772 | + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); |
|---|
| 773 | + if (ret) |
|---|
| 774 | + return ret; |
|---|
| 775 | + |
|---|
| 776 | + /* Adjust MMAP memory offsets for the CAPTURE queue */ |
|---|
| 777 | + v4l2_m2m_adjust_mem_offset(vq, buf); |
|---|
| 778 | + |
|---|
| 779 | + /* |
|---|
| 780 | + * If the capture queue is streaming, but streaming hasn't started |
|---|
| 781 | + * on the device, but was asked to stop, mark the previously queued |
|---|
| 782 | + * buffer as DONE with LAST flag since it won't be queued on the |
|---|
| 783 | + * device. |
|---|
| 784 | + */ |
|---|
| 785 | + if (V4L2_TYPE_IS_CAPTURE(vq->type) && |
|---|
| 786 | + vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && |
|---|
| 787 | + (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) |
|---|
| 788 | + v4l2_m2m_force_last_buf_done(m2m_ctx, vq); |
|---|
| 789 | + else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) |
|---|
| 482 | 790 | v4l2_m2m_try_schedule(m2m_ctx); |
|---|
| 483 | 791 | |
|---|
| 484 | | - return ret; |
|---|
| 792 | + return 0; |
|---|
| 485 | 793 | } |
|---|
| 486 | 794 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); |
|---|
| 487 | 795 | |
|---|
| .. | .. |
|---|
| 489 | 797 | struct v4l2_buffer *buf) |
|---|
| 490 | 798 | { |
|---|
| 491 | 799 | struct vb2_queue *vq; |
|---|
| 800 | + int ret; |
|---|
| 492 | 801 | |
|---|
| 493 | 802 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
|---|
| 494 | | - return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); |
|---|
| 803 | + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); |
|---|
| 804 | + if (ret) |
|---|
| 805 | + return ret; |
|---|
| 806 | + |
|---|
| 807 | + /* Adjust MMAP memory offsets for the CAPTURE queue */ |
|---|
| 808 | + v4l2_m2m_adjust_mem_offset(vq, buf); |
|---|
| 809 | + |
|---|
| 810 | + return 0; |
|---|
| 495 | 811 | } |
|---|
| 496 | 812 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); |
|---|
| 497 | 813 | |
|---|
| 498 | 814 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 499 | 815 | struct v4l2_buffer *buf) |
|---|
| 500 | 816 | { |
|---|
| 817 | + struct video_device *vdev = video_devdata(file); |
|---|
| 501 | 818 | struct vb2_queue *vq; |
|---|
| 502 | 819 | int ret; |
|---|
| 503 | 820 | |
|---|
| 504 | 821 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
|---|
| 505 | | - ret = vb2_prepare_buf(vq, buf); |
|---|
| 506 | | - if (!ret) |
|---|
| 507 | | - v4l2_m2m_try_schedule(m2m_ctx); |
|---|
| 822 | + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); |
|---|
| 823 | + if (ret) |
|---|
| 824 | + return ret; |
|---|
| 508 | 825 | |
|---|
| 509 | | - return ret; |
|---|
| 826 | + /* Adjust MMAP memory offsets for the CAPTURE queue */ |
|---|
| 827 | + v4l2_m2m_adjust_mem_offset(vq, buf); |
|---|
| 828 | + |
|---|
| 829 | + return 0; |
|---|
| 510 | 830 | } |
|---|
| 511 | 831 | EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); |
|---|
| 512 | 832 | |
|---|
| .. | .. |
|---|
| 585 | 905 | } |
|---|
| 586 | 906 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); |
|---|
| 587 | 907 | |
|---|
| 588 | | -__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 589 | | - struct poll_table_struct *wait) |
|---|
| 908 | +static __poll_t v4l2_m2m_poll_for_data(struct file *file, |
|---|
| 909 | + struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 910 | + struct poll_table_struct *wait) |
|---|
| 590 | 911 | { |
|---|
| 591 | | - struct video_device *vfd = video_devdata(file); |
|---|
| 592 | | - __poll_t req_events = poll_requested_events(wait); |
|---|
| 593 | 912 | struct vb2_queue *src_q, *dst_q; |
|---|
| 594 | | - struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; |
|---|
| 595 | 913 | __poll_t rc = 0; |
|---|
| 596 | 914 | unsigned long flags; |
|---|
| 597 | 915 | |
|---|
| 598 | | - if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
|---|
| 599 | | - struct v4l2_fh *fh = file->private_data; |
|---|
| 600 | | - |
|---|
| 601 | | - if (v4l2_event_pending(fh)) |
|---|
| 602 | | - rc = EPOLLPRI; |
|---|
| 603 | | - else if (req_events & EPOLLPRI) |
|---|
| 604 | | - poll_wait(file, &fh->wait, wait); |
|---|
| 605 | | - if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) |
|---|
| 606 | | - return rc; |
|---|
| 607 | | - } |
|---|
| 608 | | - |
|---|
| 609 | 916 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); |
|---|
| 610 | 917 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); |
|---|
| 918 | + |
|---|
| 919 | + poll_wait(file, &src_q->done_wq, wait); |
|---|
| 920 | + poll_wait(file, &dst_q->done_wq, wait); |
|---|
| 611 | 921 | |
|---|
| 612 | 922 | /* |
|---|
| 613 | 923 | * There has to be at least one buffer queued on each queued_list, which |
|---|
| 614 | 924 | * means either in driver already or waiting for driver to claim it |
|---|
| 615 | 925 | * and start processing. |
|---|
| 616 | 926 | */ |
|---|
| 617 | | - if ((!src_q->streaming || list_empty(&src_q->queued_list)) |
|---|
| 618 | | - && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { |
|---|
| 619 | | - rc |= EPOLLERR; |
|---|
| 620 | | - goto end; |
|---|
| 621 | | - } |
|---|
| 622 | | - |
|---|
| 623 | | - spin_lock_irqsave(&src_q->done_lock, flags); |
|---|
| 624 | | - if (list_empty(&src_q->done_list)) |
|---|
| 625 | | - poll_wait(file, &src_q->done_wq, wait); |
|---|
| 626 | | - spin_unlock_irqrestore(&src_q->done_lock, flags); |
|---|
| 627 | | - |
|---|
| 628 | | - spin_lock_irqsave(&dst_q->done_lock, flags); |
|---|
| 629 | | - if (list_empty(&dst_q->done_list)) { |
|---|
| 630 | | - /* |
|---|
| 631 | | - * If the last buffer was dequeued from the capture queue, |
|---|
| 632 | | - * return immediately. DQBUF will return -EPIPE. |
|---|
| 633 | | - */ |
|---|
| 634 | | - if (dst_q->last_buffer_dequeued) { |
|---|
| 635 | | - spin_unlock_irqrestore(&dst_q->done_lock, flags); |
|---|
| 636 | | - return rc | EPOLLIN | EPOLLRDNORM; |
|---|
| 637 | | - } |
|---|
| 638 | | - |
|---|
| 639 | | - poll_wait(file, &dst_q->done_wq, wait); |
|---|
| 640 | | - } |
|---|
| 641 | | - spin_unlock_irqrestore(&dst_q->done_lock, flags); |
|---|
| 927 | + if ((!src_q->streaming || src_q->error || |
|---|
| 928 | + list_empty(&src_q->queued_list)) && |
|---|
| 929 | + (!dst_q->streaming || dst_q->error || |
|---|
| 930 | + (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) |
|---|
| 931 | + return EPOLLERR; |
|---|
| 642 | 932 | |
|---|
| 643 | 933 | spin_lock_irqsave(&src_q->done_lock, flags); |
|---|
| 644 | 934 | if (!list_empty(&src_q->done_list)) |
|---|
| 645 | | - src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, |
|---|
| 646 | | - done_entry); |
|---|
| 647 | | - if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE |
|---|
| 648 | | - || src_vb->state == VB2_BUF_STATE_ERROR)) |
|---|
| 649 | 935 | rc |= EPOLLOUT | EPOLLWRNORM; |
|---|
| 650 | 936 | spin_unlock_irqrestore(&src_q->done_lock, flags); |
|---|
| 651 | 937 | |
|---|
| 652 | 938 | spin_lock_irqsave(&dst_q->done_lock, flags); |
|---|
| 653 | | - if (!list_empty(&dst_q->done_list)) |
|---|
| 654 | | - dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, |
|---|
| 655 | | - done_entry); |
|---|
| 656 | | - if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE |
|---|
| 657 | | - || dst_vb->state == VB2_BUF_STATE_ERROR)) |
|---|
| 939 | + /* |
|---|
| 940 | + * If the last buffer was dequeued from the capture queue, signal |
|---|
| 941 | + * userspace. DQBUF(CAPTURE) will return -EPIPE. |
|---|
| 942 | + */ |
|---|
| 943 | + if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) |
|---|
| 658 | 944 | rc |= EPOLLIN | EPOLLRDNORM; |
|---|
| 659 | 945 | spin_unlock_irqrestore(&dst_q->done_lock, flags); |
|---|
| 660 | 946 | |
|---|
| 661 | | -end: |
|---|
| 947 | + return rc; |
|---|
| 948 | +} |
|---|
| 949 | + |
|---|
| 950 | +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 951 | + struct poll_table_struct *wait) |
|---|
| 952 | +{ |
|---|
| 953 | + struct video_device *vfd = video_devdata(file); |
|---|
| 954 | + __poll_t req_events = poll_requested_events(wait); |
|---|
| 955 | + __poll_t rc = 0; |
|---|
| 956 | + |
|---|
| 957 | + if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) |
|---|
| 958 | + rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); |
|---|
| 959 | + |
|---|
| 960 | + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
|---|
| 961 | + struct v4l2_fh *fh = file->private_data; |
|---|
| 962 | + |
|---|
| 963 | + poll_wait(file, &fh->wait, wait); |
|---|
| 964 | + if (v4l2_event_pending(fh)) |
|---|
| 965 | + rc |= EPOLLPRI; |
|---|
| 966 | + } |
|---|
| 967 | + |
|---|
| 662 | 968 | return rc; |
|---|
| 663 | 969 | } |
|---|
| 664 | 970 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); |
|---|
| .. | .. |
|---|
| 863 | 1169 | m2m_dev->m2m_ops = m2m_ops; |
|---|
| 864 | 1170 | INIT_LIST_HEAD(&m2m_dev->job_queue); |
|---|
| 865 | 1171 | spin_lock_init(&m2m_dev->job_spinlock); |
|---|
| 1172 | + INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); |
|---|
| 866 | 1173 | |
|---|
| 867 | 1174 | return m2m_dev; |
|---|
| 868 | 1175 | } |
|---|
| .. | .. |
|---|
| 905 | 1212 | if (ret) |
|---|
| 906 | 1213 | goto err; |
|---|
| 907 | 1214 | /* |
|---|
| 908 | | - * If both queues use same mutex assign it as the common buffer |
|---|
| 909 | | - * queues lock to the m2m context. This lock is used in the |
|---|
| 910 | | - * v4l2_m2m_ioctl_* helpers. |
|---|
| 1215 | + * Both queues should use same the mutex to lock the m2m context. |
|---|
| 1216 | + * This lock is used in some v4l2_m2m_* helpers. |
|---|
| 911 | 1217 | */ |
|---|
| 912 | | - if (out_q_ctx->q.lock == cap_q_ctx->q.lock) |
|---|
| 913 | | - m2m_ctx->q_lock = out_q_ctx->q.lock; |
|---|
| 1218 | + if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { |
|---|
| 1219 | + ret = -EINVAL; |
|---|
| 1220 | + goto err; |
|---|
| 1221 | + } |
|---|
| 1222 | + m2m_ctx->q_lock = out_q_ctx->q.lock; |
|---|
| 914 | 1223 | |
|---|
| 915 | 1224 | return m2m_ctx; |
|---|
| 916 | 1225 | err: |
|---|
| .. | .. |
|---|
| 949 | 1258 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
|---|
| 950 | 1259 | } |
|---|
| 951 | 1260 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); |
|---|
| 1261 | + |
|---|
| 1262 | +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, |
|---|
| 1263 | + struct vb2_v4l2_buffer *cap_vb, |
|---|
| 1264 | + bool copy_frame_flags) |
|---|
| 1265 | +{ |
|---|
| 1266 | + u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; |
|---|
| 1267 | + |
|---|
| 1268 | + if (copy_frame_flags) |
|---|
| 1269 | + mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | |
|---|
| 1270 | + V4L2_BUF_FLAG_BFRAME; |
|---|
| 1271 | + |
|---|
| 1272 | + cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; |
|---|
| 1273 | + |
|---|
| 1274 | + if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) |
|---|
| 1275 | + cap_vb->timecode = out_vb->timecode; |
|---|
| 1276 | + cap_vb->field = out_vb->field; |
|---|
| 1277 | + cap_vb->flags &= ~mask; |
|---|
| 1278 | + cap_vb->flags |= out_vb->flags & mask; |
|---|
| 1279 | + cap_vb->vb2_buf.copied_timestamp = 1; |
|---|
| 1280 | +} |
|---|
| 1281 | +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); |
|---|
| 1282 | + |
|---|
| 1283 | +void v4l2_m2m_request_queue(struct media_request *req) |
|---|
| 1284 | +{ |
|---|
| 1285 | + struct media_request_object *obj, *obj_safe; |
|---|
| 1286 | + struct v4l2_m2m_ctx *m2m_ctx = NULL; |
|---|
| 1287 | + |
|---|
| 1288 | + /* |
|---|
| 1289 | + * Queue all objects. Note that buffer objects are at the end of the |
|---|
| 1290 | + * objects list, after all other object types. Once buffer objects |
|---|
| 1291 | + * are queued, the driver might delete them immediately (if the driver |
|---|
| 1292 | + * processes the buffer at once), so we have to use |
|---|
| 1293 | + * list_for_each_entry_safe() to handle the case where the object we |
|---|
| 1294 | + * queue is deleted. |
|---|
| 1295 | + */ |
|---|
| 1296 | + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { |
|---|
| 1297 | + struct v4l2_m2m_ctx *m2m_ctx_obj; |
|---|
| 1298 | + struct vb2_buffer *vb; |
|---|
| 1299 | + |
|---|
| 1300 | + if (!obj->ops->queue) |
|---|
| 1301 | + continue; |
|---|
| 1302 | + |
|---|
| 1303 | + if (vb2_request_object_is_buffer(obj)) { |
|---|
| 1304 | + /* Sanity checks */ |
|---|
| 1305 | + vb = container_of(obj, struct vb2_buffer, req_obj); |
|---|
| 1306 | + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); |
|---|
| 1307 | + m2m_ctx_obj = container_of(vb->vb2_queue, |
|---|
| 1308 | + struct v4l2_m2m_ctx, |
|---|
| 1309 | + out_q_ctx.q); |
|---|
| 1310 | + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); |
|---|
| 1311 | + m2m_ctx = m2m_ctx_obj; |
|---|
| 1312 | + } |
|---|
| 1313 | + |
|---|
| 1314 | + /* |
|---|
| 1315 | + * The buffer we queue here can in theory be immediately |
|---|
| 1316 | + * unbound, hence the use of list_for_each_entry_safe() |
|---|
| 1317 | + * above and why we call the queue op last. |
|---|
| 1318 | + */ |
|---|
| 1319 | + obj->ops->queue(obj); |
|---|
| 1320 | + } |
|---|
| 1321 | + |
|---|
| 1322 | + WARN_ON(!m2m_ctx); |
|---|
| 1323 | + |
|---|
| 1324 | + if (m2m_ctx) |
|---|
| 1325 | + v4l2_m2m_try_schedule(m2m_ctx); |
|---|
| 1326 | +} |
|---|
| 1327 | +EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); |
|---|
| 952 | 1328 | |
|---|
| 953 | 1329 | /* Videobuf2 ioctl helpers */ |
|---|
| 954 | 1330 | |
|---|
| .. | .. |
|---|
| 1033 | 1409 | } |
|---|
| 1034 | 1410 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); |
|---|
| 1035 | 1411 | |
|---|
| 1412 | +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, |
|---|
| 1413 | + struct v4l2_encoder_cmd *ec) |
|---|
| 1414 | +{ |
|---|
| 1415 | + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) |
|---|
| 1416 | + return -EINVAL; |
|---|
| 1417 | + |
|---|
| 1418 | + ec->flags = 0; |
|---|
| 1419 | + return 0; |
|---|
| 1420 | +} |
|---|
| 1421 | +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); |
|---|
| 1422 | + |
|---|
| 1423 | +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, |
|---|
| 1424 | + struct v4l2_decoder_cmd *dc) |
|---|
| 1425 | +{ |
|---|
| 1426 | + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) |
|---|
| 1427 | + return -EINVAL; |
|---|
| 1428 | + |
|---|
| 1429 | + dc->flags = 0; |
|---|
| 1430 | + |
|---|
| 1431 | + if (dc->cmd == V4L2_DEC_CMD_STOP) { |
|---|
| 1432 | + dc->stop.pts = 0; |
|---|
| 1433 | + } else if (dc->cmd == V4L2_DEC_CMD_START) { |
|---|
| 1434 | + dc->start.speed = 0; |
|---|
| 1435 | + dc->start.format = V4L2_DEC_START_FMT_NONE; |
|---|
| 1436 | + } |
|---|
| 1437 | + return 0; |
|---|
| 1438 | +} |
|---|
| 1439 | +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); |
|---|
| 1440 | + |
|---|
| 1441 | +/* |
|---|
| 1442 | + * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START |
|---|
| 1443 | + * Should be called from the encoder driver encoder_cmd() callback |
|---|
| 1444 | + */ |
|---|
| 1445 | +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 1446 | + struct v4l2_encoder_cmd *ec) |
|---|
| 1447 | +{ |
|---|
| 1448 | + if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) |
|---|
| 1449 | + return -EINVAL; |
|---|
| 1450 | + |
|---|
| 1451 | + if (ec->cmd == V4L2_ENC_CMD_STOP) |
|---|
| 1452 | + return v4l2_update_last_buf_state(m2m_ctx); |
|---|
| 1453 | + |
|---|
| 1454 | + if (m2m_ctx->is_draining) |
|---|
| 1455 | + return -EBUSY; |
|---|
| 1456 | + |
|---|
| 1457 | + if (m2m_ctx->has_stopped) |
|---|
| 1458 | + m2m_ctx->has_stopped = false; |
|---|
| 1459 | + |
|---|
| 1460 | + return 0; |
|---|
| 1461 | +} |
|---|
| 1462 | +EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); |
|---|
| 1463 | + |
|---|
| 1464 | +/* |
|---|
| 1465 | + * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START |
|---|
| 1466 | + * Should be called from the decoder driver decoder_cmd() callback |
|---|
| 1467 | + */ |
|---|
| 1468 | +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
|---|
| 1469 | + struct v4l2_decoder_cmd *dc) |
|---|
| 1470 | +{ |
|---|
| 1471 | + if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) |
|---|
| 1472 | + return -EINVAL; |
|---|
| 1473 | + |
|---|
| 1474 | + if (dc->cmd == V4L2_DEC_CMD_STOP) |
|---|
| 1475 | + return v4l2_update_last_buf_state(m2m_ctx); |
|---|
| 1476 | + |
|---|
| 1477 | + if (m2m_ctx->is_draining) |
|---|
| 1478 | + return -EBUSY; |
|---|
| 1479 | + |
|---|
| 1480 | + if (m2m_ctx->has_stopped) |
|---|
| 1481 | + m2m_ctx->has_stopped = false; |
|---|
| 1482 | + |
|---|
| 1483 | + return 0; |
|---|
| 1484 | +} |
|---|
| 1485 | +EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); |
|---|
| 1486 | + |
|---|
| 1487 | +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, |
|---|
| 1488 | + struct v4l2_encoder_cmd *ec) |
|---|
| 1489 | +{ |
|---|
| 1490 | + struct v4l2_fh *fh = file->private_data; |
|---|
| 1491 | + |
|---|
| 1492 | + return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); |
|---|
| 1493 | +} |
|---|
| 1494 | +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); |
|---|
| 1495 | + |
|---|
| 1496 | +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, |
|---|
| 1497 | + struct v4l2_decoder_cmd *dc) |
|---|
| 1498 | +{ |
|---|
| 1499 | + struct v4l2_fh *fh = file->private_data; |
|---|
| 1500 | + |
|---|
| 1501 | + return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); |
|---|
| 1502 | +} |
|---|
| 1503 | +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); |
|---|
| 1504 | + |
|---|
| 1505 | +int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, |
|---|
| 1506 | + struct v4l2_decoder_cmd *dc) |
|---|
| 1507 | +{ |
|---|
| 1508 | + if (dc->cmd != V4L2_DEC_CMD_FLUSH) |
|---|
| 1509 | + return -EINVAL; |
|---|
| 1510 | + |
|---|
| 1511 | + dc->flags = 0; |
|---|
| 1512 | + |
|---|
| 1513 | + return 0; |
|---|
| 1514 | +} |
|---|
| 1515 | +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); |
|---|
| 1516 | + |
|---|
| 1517 | +int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, |
|---|
| 1518 | + struct v4l2_decoder_cmd *dc) |
|---|
| 1519 | +{ |
|---|
| 1520 | + struct v4l2_fh *fh = file->private_data; |
|---|
| 1521 | + struct vb2_v4l2_buffer *out_vb, *cap_vb; |
|---|
| 1522 | + struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; |
|---|
| 1523 | + unsigned long flags; |
|---|
| 1524 | + int ret; |
|---|
| 1525 | + |
|---|
| 1526 | + ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); |
|---|
| 1527 | + if (ret < 0) |
|---|
| 1528 | + return ret; |
|---|
| 1529 | + |
|---|
| 1530 | + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
|---|
| 1531 | + out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); |
|---|
| 1532 | + cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); |
|---|
| 1533 | + |
|---|
| 1534 | + /* |
|---|
| 1535 | + * If there is an out buffer pending, then clear any HOLD flag. |
|---|
| 1536 | + * |
|---|
| 1537 | + * By clearing this flag we ensure that when this output |
|---|
| 1538 | + * buffer is processed any held capture buffer will be released. |
|---|
| 1539 | + */ |
|---|
| 1540 | + if (out_vb) { |
|---|
| 1541 | + out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; |
|---|
| 1542 | + } else if (cap_vb && cap_vb->is_held) { |
|---|
| 1543 | + /* |
|---|
| 1544 | + * If there were no output buffers, but there is a |
|---|
| 1545 | + * capture buffer that is held, then release that |
|---|
| 1546 | + * buffer. |
|---|
| 1547 | + */ |
|---|
| 1548 | + cap_vb->is_held = false; |
|---|
| 1549 | + v4l2_m2m_dst_buf_remove(fh->m2m_ctx); |
|---|
| 1550 | + v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); |
|---|
| 1551 | + } |
|---|
| 1552 | + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
|---|
| 1553 | + |
|---|
| 1554 | + return 0; |
|---|
| 1555 | +} |
|---|
| 1556 | +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); |
|---|
| 1557 | + |
|---|
| 1036 | 1558 | /* |
|---|
| 1037 | 1559 | * v4l2_file_operations helpers. It is assumed here same lock is used |
|---|
| 1038 | 1560 | * for the output and the capture buffer queue. |
|---|