hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/media/v4l2-core/v4l2-mem2mem.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
34 *
....@@ -7,11 +8,6 @@
78 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
89 * Pawel Osciak, <pawel@osciak.com>
910 * Marek Szyprowski, <m.szyprowski@samsung.com>
10
- *
11
- * This program is free software; you can redistribute it and/or modify
12
- * it under the terms of the GNU General Public License as published by the
13
- * Free Software Foundation; either version 2 of the License, or (at your
14
- * option) any later version.
1511 */
1612 #include <linux/module.h>
1713 #include <linux/sched.h>
....@@ -45,6 +41,10 @@
4541 #define TRANS_RUNNING (1 << 1)
4642 /* Instance is currently aborting */
4743 #define TRANS_ABORT (1 << 2)
44
+
45
+
46
+/* The job queue is not running new jobs */
47
+#define QUEUE_PAUSED (1 << 0)
4848
4949
5050 /* Offset base for buffers on the destination queue - used to distinguish
....@@ -87,6 +87,8 @@
8787 * @curr_ctx: currently running instance
8888 * @job_queue: instances queued to run
8989 * @job_spinlock: protects job_queue
90
+ * @job_work: worker to run queued jobs.
91
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
9092 * @m2m_ops: driver callbacks
9193 */
9294 struct v4l2_m2m_dev {
....@@ -103,6 +105,8 @@
103105
104106 struct list_head job_queue;
105107 spinlock_t job_spinlock;
108
+ struct work_struct job_work;
109
+ unsigned long job_queue_flags;
106110
107111 const struct v4l2_m2m_ops *m2m_ops;
108112 };
....@@ -129,7 +133,7 @@
129133 }
130134 EXPORT_SYMBOL(v4l2_m2m_get_vq);
131135
132
-void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
136
+struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
133137 {
134138 struct v4l2_m2m_buffer *b;
135139 unsigned long flags;
....@@ -147,7 +151,7 @@
147151 }
148152 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
149153
150
-void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
154
+struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
151155 {
152156 struct v4l2_m2m_buffer *b;
153157 unsigned long flags;
....@@ -165,7 +169,7 @@
165169 }
166170 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
167171
168
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
172
+struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
169173 {
170174 struct v4l2_m2m_buffer *b;
171175 unsigned long flags;
....@@ -244,6 +248,9 @@
244248 * @m2m_dev: per-device context
245249 *
246250 * Get next transaction (if present) from the waiting jobs list and run it.
251
+ *
252
+ * Note that this function can run on a given v4l2_m2m_ctx context,
253
+ * but call .device_run for another context.
247254 */
248255 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
249256 {
....@@ -259,6 +266,12 @@
259266 if (list_empty(&m2m_dev->job_queue)) {
260267 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
261268 dprintk("No job pending\n");
269
+ return;
270
+ }
271
+
272
+ if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
273
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
274
+ dprintk("Running new jobs is paused\n");
262275 return;
263276 }
264277
....@@ -283,7 +296,8 @@
283296 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
284297 struct v4l2_m2m_ctx *m2m_ctx)
285298 {
286
- unsigned long flags_job, flags_out, flags_cap;
299
+ unsigned long flags_job;
300
+ struct vb2_v4l2_buffer *dst, *src;
287301
288302 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
289303
....@@ -297,50 +311,62 @@
297311
298312 /* If the context is aborted then don't schedule it */
299313 if (m2m_ctx->job_flags & TRANS_ABORT) {
300
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
301314 dprintk("Aborted context\n");
302
- return;
315
+ goto job_unlock;
303316 }
304317
305318 if (m2m_ctx->job_flags & TRANS_QUEUED) {
306
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
307319 dprintk("On job queue already\n");
308
- return;
320
+ goto job_unlock;
309321 }
310322
311
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
312
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
313
- && !m2m_ctx->out_q_ctx.buffered) {
314
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
315
- flags_out);
316
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
323
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
324
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
325
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
317326 dprintk("No input buffers available\n");
318
- return;
327
+ goto job_unlock;
319328 }
320
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
321
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
322
- && !m2m_ctx->cap_q_ctx.buffered) {
323
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
324
- flags_cap);
325
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
326
- flags_out);
327
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
329
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
328330 dprintk("No output buffers available\n");
329
- return;
331
+ goto job_unlock;
330332 }
331
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
332
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
333
+
334
+ m2m_ctx->new_frame = true;
335
+
336
+ if (src && dst && dst->is_held &&
337
+ dst->vb2_buf.copied_timestamp &&
338
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
339
+ dst->is_held = false;
340
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
341
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
342
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
343
+
344
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
345
+ dprintk("No output buffers available after returning held buffer\n");
346
+ goto job_unlock;
347
+ }
348
+ }
349
+
350
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
351
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
352
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
353
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
354
+
355
+ if (m2m_ctx->has_stopped) {
356
+ dprintk("Device has stopped\n");
357
+ goto job_unlock;
358
+ }
333359
334360 if (m2m_dev->m2m_ops->job_ready
335361 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
336
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
337362 dprintk("Driver not ready\n");
338
- return;
363
+ goto job_unlock;
339364 }
340365
341366 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
342367 m2m_ctx->job_flags |= TRANS_QUEUED;
343368
369
+job_unlock:
344370 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
345371 }
346372
....@@ -366,6 +392,18 @@
366392 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
367393
368394 /**
395
+ * v4l2_m2m_device_run_work() - run pending jobs for the context
396
+ * @work: Work structure used for scheduling the execution of this function.
397
+ */
398
+static void v4l2_m2m_device_run_work(struct work_struct *work)
399
+{
400
+ struct v4l2_m2m_dev *m2m_dev =
401
+ container_of(work, struct v4l2_m2m_dev, job_work);
402
+
403
+ v4l2_m2m_try_run(m2m_dev);
404
+}
405
+
406
+/**
369407 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
370408 * @m2m_ctx: m2m context with jobs to be canceled
371409 *
....@@ -387,7 +425,7 @@
387425 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
388426 if (m2m_dev->m2m_ops->job_abort)
389427 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
390
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
428
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
391429 wait_event(m2m_ctx->finished,
392430 !(m2m_ctx->job_flags & TRANS_RUNNING));
393431 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
....@@ -402,31 +440,133 @@
402440 }
403441 }
404442
405
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
406
- struct v4l2_m2m_ctx *m2m_ctx)
443
+/*
444
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
445
+ * v4l2_m2m_buf_done_and_job_finish().
446
+ */
447
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
448
+ struct v4l2_m2m_ctx *m2m_ctx)
407449 {
408
- unsigned long flags;
450
+ /*
451
+ * This instance might have more buffers ready, but since we do not
452
+ * allow more than one job on the job_queue per instance, each has
453
+ * to be scheduled separately after the previous one finishes.
454
+ */
455
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
409456
410
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
457
+ /*
458
+ * We might be running in atomic context,
459
+ * but the job must be run in non-atomic context.
460
+ */
461
+ schedule_work(&m2m_dev->job_work);
462
+}
463
+
464
+/*
465
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
466
+ * v4l2_m2m_buf_done_and_job_finish().
467
+ */
468
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
469
+ struct v4l2_m2m_ctx *m2m_ctx)
470
+{
411471 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
412
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
413472 dprintk("Called by an instance not currently running\n");
414
- return;
473
+ return false;
415474 }
416475
417476 list_del(&m2m_dev->curr_ctx->queue);
418477 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
419478 wake_up(&m2m_dev->curr_ctx->finished);
420479 m2m_dev->curr_ctx = NULL;
480
+ return true;
481
+}
421482
483
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
484
+ struct v4l2_m2m_ctx *m2m_ctx)
485
+{
486
+ unsigned long flags;
487
+ bool schedule_next;
488
+
489
+ /*
490
+ * This function should not be used for drivers that support
491
+ * holding capture buffers. Those should use
492
+ * v4l2_m2m_buf_done_and_job_finish() instead.
493
+ */
494
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
495
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
496
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
497
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
422498 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
423499
424
- /* This instance might have more buffers ready, but since we do not
425
- * allow more than one job on the job_queue per instance, each has
426
- * to be scheduled separately after the previous one finishes. */
427
- v4l2_m2m_try_schedule(m2m_ctx);
500
+ if (schedule_next)
501
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
428502 }
429503 EXPORT_SYMBOL(v4l2_m2m_job_finish);
504
+
505
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
506
+ struct v4l2_m2m_ctx *m2m_ctx,
507
+ enum vb2_buffer_state state)
508
+{
509
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
510
+ bool schedule_next = false;
511
+ unsigned long flags;
512
+
513
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
514
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
515
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
516
+
517
+ if (WARN_ON(!src_buf || !dst_buf))
518
+ goto unlock;
519
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
520
+ if (!dst_buf->is_held) {
521
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
522
+ v4l2_m2m_buf_done(dst_buf, state);
523
+ }
524
+ /*
525
+ * If the request API is being used, returning the OUTPUT
526
+ * (src) buffer will wake-up any process waiting on the
527
+ * request file descriptor.
528
+ *
529
+ * Therefore, return the CAPTURE (dst) buffer first,
530
+ * to avoid signalling the request file descriptor
531
+ * before the CAPTURE buffer is done.
532
+ */
533
+ v4l2_m2m_buf_done(src_buf, state);
534
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
535
+unlock:
536
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
537
+
538
+ if (schedule_next)
539
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
540
+}
541
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
542
+
543
+void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
544
+{
545
+ unsigned long flags;
546
+ struct v4l2_m2m_ctx *curr_ctx;
547
+
548
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
549
+ m2m_dev->job_queue_flags |= QUEUE_PAUSED;
550
+ curr_ctx = m2m_dev->curr_ctx;
551
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
552
+
553
+ if (curr_ctx)
554
+ wait_event(curr_ctx->finished,
555
+ !(curr_ctx->job_flags & TRANS_RUNNING));
556
+}
557
+EXPORT_SYMBOL(v4l2_m2m_suspend);
558
+
559
+void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
560
+{
561
+ unsigned long flags;
562
+
563
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
564
+ m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
565
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
566
+
567
+ v4l2_m2m_try_run(m2m_dev);
568
+}
569
+EXPORT_SYMBOL(v4l2_m2m_resume);
430570
431571 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
432572 struct v4l2_requestbuffers *reqbufs)
....@@ -445,19 +585,14 @@
445585 }
446586 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
447587
448
-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
449
- struct v4l2_buffer *buf)
588
+static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
589
+ struct v4l2_buffer *buf)
450590 {
451
- struct vb2_queue *vq;
452
- int ret = 0;
453
- unsigned int i;
454
-
455
- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
456
- ret = vb2_querybuf(vq, buf);
457
-
458591 /* Adjust MMAP memory offsets for the CAPTURE queue */
459
- if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
592
+ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
460593 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
594
+ unsigned int i;
595
+
461596 for (i = 0; i < buf->length; ++i)
462597 buf->m.planes[i].m.mem_offset
463598 += DST_QUEUE_OFF_BASE;
....@@ -465,23 +600,196 @@
465600 buf->m.offset += DST_QUEUE_OFF_BASE;
466601 }
467602 }
468
-
469
- return ret;
470603 }
471
-EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
472604
473
-int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
474
- struct v4l2_buffer *buf)
605
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
606
+ struct v4l2_buffer *buf)
475607 {
476608 struct vb2_queue *vq;
477609 int ret;
478610
479611 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
480
- ret = vb2_qbuf(vq, buf);
481
- if (!ret)
612
+ ret = vb2_querybuf(vq, buf);
613
+ if (ret)
614
+ return ret;
615
+
616
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
617
+ v4l2_m2m_adjust_mem_offset(vq, buf);
618
+
619
+ return 0;
620
+}
621
+EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
622
+
623
+/*
624
+ * This will add the LAST flag and mark the buffer management
625
+ * state as stopped.
626
+ * This is called when the last capture buffer must be flagged as LAST
627
+ * in draining mode from the encoder/decoder driver buf_queue() callback
628
+ * or from v4l2_update_last_buf_state() when a capture buffer is available.
629
+ */
630
+void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
631
+ struct vb2_v4l2_buffer *vbuf)
632
+{
633
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
634
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
635
+
636
+ v4l2_m2m_mark_stopped(m2m_ctx);
637
+}
638
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done);
639
+
640
+/* When stop command is issued, update buffer management state */
641
+static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
642
+{
643
+ struct vb2_v4l2_buffer *next_dst_buf;
644
+
645
+ if (m2m_ctx->is_draining)
646
+ return -EBUSY;
647
+
648
+ if (m2m_ctx->has_stopped)
649
+ return 0;
650
+
651
+ m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
652
+ m2m_ctx->is_draining = true;
653
+
654
+ /*
655
+ * The processing of the last output buffer queued before
656
+ * the STOP command is expected to mark the buffer management
657
+ * state as stopped with v4l2_m2m_mark_stopped().
658
+ */
659
+ if (m2m_ctx->last_src_buf)
660
+ return 0;
661
+
662
+ /*
663
+ * In case the output queue is empty, try to mark the last capture
664
+ * buffer as LAST.
665
+ */
666
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
667
+ if (!next_dst_buf) {
668
+ /*
669
+ * Wait for the next queued one in encoder/decoder driver
670
+ * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
671
+ * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
672
+ * streaming.
673
+ */
674
+ m2m_ctx->next_buf_last = true;
675
+ return 0;
676
+ }
677
+
678
+ v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf);
679
+
680
+ return 0;
681
+}
682
+
683
+/*
684
+ * Updates the encoding/decoding buffer management state, should
685
+ * be called from encoder/decoder drivers start_streaming()
686
+ */
687
+void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
688
+ struct vb2_queue *q)
689
+{
690
+ /* If start streaming again, untag the last output buffer */
691
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
692
+ m2m_ctx->last_src_buf = NULL;
693
+}
694
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state);
695
+
696
+/*
697
+ * Updates the encoding/decoding buffer management state, should
698
+ * be called from encoder/decoder driver stop_streaming()
699
+ */
700
+void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
701
+ struct vb2_queue *q)
702
+{
703
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
704
+ /*
705
+ * If in draining state, either mark next dst buffer as
706
+ * done or flag next one to be marked as done either
707
+ * in encoder/decoder driver buf_queue() callback using
708
+ * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
709
+ * if encoder/decoder is not yet streaming
710
+ */
711
+ if (m2m_ctx->is_draining) {
712
+ struct vb2_v4l2_buffer *next_dst_buf;
713
+
714
+ m2m_ctx->last_src_buf = NULL;
715
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
716
+ if (!next_dst_buf)
717
+ m2m_ctx->next_buf_last = true;
718
+ else
719
+ v4l2_m2m_last_buffer_done(m2m_ctx,
720
+ next_dst_buf);
721
+ }
722
+ } else {
723
+ v4l2_m2m_clear_state(m2m_ctx);
724
+ }
725
+}
726
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state);
727
+
728
+static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
729
+ struct vb2_queue *q)
730
+{
731
+ struct vb2_buffer *vb;
732
+ struct vb2_v4l2_buffer *vbuf;
733
+ unsigned int i;
734
+
735
+ if (WARN_ON(q->is_output))
736
+ return;
737
+ if (list_empty(&q->queued_list))
738
+ return;
739
+
740
+ vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
741
+ for (i = 0; i < vb->num_planes; i++)
742
+ vb2_set_plane_payload(vb, i, 0);
743
+
744
+ /*
745
+ * Since the buffer hasn't been queued to the ready queue,
746
+ * mark is active and owned before marking it LAST and DONE
747
+ */
748
+ vb->state = VB2_BUF_STATE_ACTIVE;
749
+ atomic_inc(&q->owned_by_drv_count);
750
+
751
+ vbuf = to_vb2_v4l2_buffer(vb);
752
+ vbuf->field = V4L2_FIELD_NONE;
753
+
754
+ v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
755
+}
756
+
757
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
758
+ struct v4l2_buffer *buf)
759
+{
760
+ struct video_device *vdev = video_devdata(file);
761
+ struct vb2_queue *vq;
762
+ int ret;
763
+
764
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
765
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
766
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
767
+ dprintk("%s: requests cannot be used with capture buffers\n",
768
+ __func__);
769
+ return -EPERM;
770
+ }
771
+
772
+ ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
773
+ if (ret)
774
+ return ret;
775
+
776
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
777
+ v4l2_m2m_adjust_mem_offset(vq, buf);
778
+
779
+ /*
780
+ * If the capture queue is streaming, but streaming hasn't started
781
+ * on the device, but was asked to stop, mark the previously queued
782
+ * buffer as DONE with LAST flag since it won't be queued on the
783
+ * device.
784
+ */
785
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
786
+ vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) &&
787
+ (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx)))
788
+ v4l2_m2m_force_last_buf_done(m2m_ctx, vq);
789
+ else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
482790 v4l2_m2m_try_schedule(m2m_ctx);
483791
484
- return ret;
792
+ return 0;
485793 }
486794 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
487795
....@@ -489,24 +797,36 @@
489797 struct v4l2_buffer *buf)
490798 {
491799 struct vb2_queue *vq;
800
+ int ret;
492801
493802 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
494
- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
803
+ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
804
+ if (ret)
805
+ return ret;
806
+
807
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
808
+ v4l2_m2m_adjust_mem_offset(vq, buf);
809
+
810
+ return 0;
495811 }
496812 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
497813
498814 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
499815 struct v4l2_buffer *buf)
500816 {
817
+ struct video_device *vdev = video_devdata(file);
501818 struct vb2_queue *vq;
502819 int ret;
503820
504821 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
505
- ret = vb2_prepare_buf(vq, buf);
506
- if (!ret)
507
- v4l2_m2m_try_schedule(m2m_ctx);
822
+ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
823
+ if (ret)
824
+ return ret;
508825
509
- return ret;
826
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
827
+ v4l2_m2m_adjust_mem_offset(vq, buf);
828
+
829
+ return 0;
510830 }
511831 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
512832
....@@ -585,80 +905,66 @@
585905 }
586906 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
587907
588
-__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
589
- struct poll_table_struct *wait)
908
+static __poll_t v4l2_m2m_poll_for_data(struct file *file,
909
+ struct v4l2_m2m_ctx *m2m_ctx,
910
+ struct poll_table_struct *wait)
590911 {
591
- struct video_device *vfd = video_devdata(file);
592
- __poll_t req_events = poll_requested_events(wait);
593912 struct vb2_queue *src_q, *dst_q;
594
- struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
595913 __poll_t rc = 0;
596914 unsigned long flags;
597915
598
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
599
- struct v4l2_fh *fh = file->private_data;
600
-
601
- if (v4l2_event_pending(fh))
602
- rc = EPOLLPRI;
603
- else if (req_events & EPOLLPRI)
604
- poll_wait(file, &fh->wait, wait);
605
- if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
606
- return rc;
607
- }
608
-
609916 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
610917 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
918
+
919
+ poll_wait(file, &src_q->done_wq, wait);
920
+ poll_wait(file, &dst_q->done_wq, wait);
611921
612922 /*
613923 * There has to be at least one buffer queued on each queued_list, which
614924 * means either in driver already or waiting for driver to claim it
615925 * and start processing.
616926 */
617
- if ((!src_q->streaming || list_empty(&src_q->queued_list))
618
- && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
619
- rc |= EPOLLERR;
620
- goto end;
621
- }
622
-
623
- spin_lock_irqsave(&src_q->done_lock, flags);
624
- if (list_empty(&src_q->done_list))
625
- poll_wait(file, &src_q->done_wq, wait);
626
- spin_unlock_irqrestore(&src_q->done_lock, flags);
627
-
628
- spin_lock_irqsave(&dst_q->done_lock, flags);
629
- if (list_empty(&dst_q->done_list)) {
630
- /*
631
- * If the last buffer was dequeued from the capture queue,
632
- * return immediately. DQBUF will return -EPIPE.
633
- */
634
- if (dst_q->last_buffer_dequeued) {
635
- spin_unlock_irqrestore(&dst_q->done_lock, flags);
636
- return rc | EPOLLIN | EPOLLRDNORM;
637
- }
638
-
639
- poll_wait(file, &dst_q->done_wq, wait);
640
- }
641
- spin_unlock_irqrestore(&dst_q->done_lock, flags);
927
+ if ((!src_q->streaming || src_q->error ||
928
+ list_empty(&src_q->queued_list)) &&
929
+ (!dst_q->streaming || dst_q->error ||
930
+ (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued)))
931
+ return EPOLLERR;
642932
643933 spin_lock_irqsave(&src_q->done_lock, flags);
644934 if (!list_empty(&src_q->done_list))
645
- src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
646
- done_entry);
647
- if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
648
- || src_vb->state == VB2_BUF_STATE_ERROR))
649935 rc |= EPOLLOUT | EPOLLWRNORM;
650936 spin_unlock_irqrestore(&src_q->done_lock, flags);
651937
652938 spin_lock_irqsave(&dst_q->done_lock, flags);
653
- if (!list_empty(&dst_q->done_list))
654
- dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
655
- done_entry);
656
- if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
657
- || dst_vb->state == VB2_BUF_STATE_ERROR))
939
+ /*
940
+ * If the last buffer was dequeued from the capture queue, signal
941
+ * userspace. DQBUF(CAPTURE) will return -EPIPE.
942
+ */
943
+ if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued)
658944 rc |= EPOLLIN | EPOLLRDNORM;
659945 spin_unlock_irqrestore(&dst_q->done_lock, flags);
660946
661
-end:
947
+ return rc;
948
+}
949
+
950
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
951
+ struct poll_table_struct *wait)
952
+{
953
+ struct video_device *vfd = video_devdata(file);
954
+ __poll_t req_events = poll_requested_events(wait);
955
+ __poll_t rc = 0;
956
+
957
+ if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
958
+ rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
959
+
960
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
961
+ struct v4l2_fh *fh = file->private_data;
962
+
963
+ poll_wait(file, &fh->wait, wait);
964
+ if (v4l2_event_pending(fh))
965
+ rc |= EPOLLPRI;
966
+ }
967
+
662968 return rc;
663969 }
664970 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
....@@ -863,6 +1169,7 @@
8631169 m2m_dev->m2m_ops = m2m_ops;
8641170 INIT_LIST_HEAD(&m2m_dev->job_queue);
8651171 spin_lock_init(&m2m_dev->job_spinlock);
1172
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
8661173
8671174 return m2m_dev;
8681175 }
....@@ -905,12 +1212,14 @@
9051212 if (ret)
9061213 goto err;
9071214 /*
908
- * If both queues use same mutex assign it as the common buffer
909
- * queues lock to the m2m context. This lock is used in the
910
- * v4l2_m2m_ioctl_* helpers.
1215
+ * Both queues should use same the mutex to lock the m2m context.
1216
+ * This lock is used in some v4l2_m2m_* helpers.
9111217 */
912
- if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
913
- m2m_ctx->q_lock = out_q_ctx->q.lock;
1218
+ if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
1219
+ ret = -EINVAL;
1220
+ goto err;
1221
+ }
1222
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
9141223
9151224 return m2m_ctx;
9161225 err:
....@@ -949,6 +1258,73 @@
9491258 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
9501259 }
9511260 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
1261
+
1262
+void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
1263
+ struct vb2_v4l2_buffer *cap_vb,
1264
+ bool copy_frame_flags)
1265
+{
1266
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1267
+
1268
+ if (copy_frame_flags)
1269
+ mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
1270
+ V4L2_BUF_FLAG_BFRAME;
1271
+
1272
+ cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
1273
+
1274
+ if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1275
+ cap_vb->timecode = out_vb->timecode;
1276
+ cap_vb->field = out_vb->field;
1277
+ cap_vb->flags &= ~mask;
1278
+ cap_vb->flags |= out_vb->flags & mask;
1279
+ cap_vb->vb2_buf.copied_timestamp = 1;
1280
+}
1281
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
1282
+
1283
+void v4l2_m2m_request_queue(struct media_request *req)
1284
+{
1285
+ struct media_request_object *obj, *obj_safe;
1286
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
1287
+
1288
+ /*
1289
+ * Queue all objects. Note that buffer objects are at the end of the
1290
+ * objects list, after all other object types. Once buffer objects
1291
+ * are queued, the driver might delete them immediately (if the driver
1292
+ * processes the buffer at once), so we have to use
1293
+ * list_for_each_entry_safe() to handle the case where the object we
1294
+ * queue is deleted.
1295
+ */
1296
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
1297
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
1298
+ struct vb2_buffer *vb;
1299
+
1300
+ if (!obj->ops->queue)
1301
+ continue;
1302
+
1303
+ if (vb2_request_object_is_buffer(obj)) {
1304
+ /* Sanity checks */
1305
+ vb = container_of(obj, struct vb2_buffer, req_obj);
1306
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
1307
+ m2m_ctx_obj = container_of(vb->vb2_queue,
1308
+ struct v4l2_m2m_ctx,
1309
+ out_q_ctx.q);
1310
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
1311
+ m2m_ctx = m2m_ctx_obj;
1312
+ }
1313
+
1314
+ /*
1315
+ * The buffer we queue here can in theory be immediately
1316
+ * unbound, hence the use of list_for_each_entry_safe()
1317
+ * above and why we call the queue op last.
1318
+ */
1319
+ obj->ops->queue(obj);
1320
+ }
1321
+
1322
+ WARN_ON(!m2m_ctx);
1323
+
1324
+ if (m2m_ctx)
1325
+ v4l2_m2m_try_schedule(m2m_ctx);
1326
+}
1327
+EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
9521328
9531329 /* Videobuf2 ioctl helpers */
9541330
....@@ -1033,6 +1409,152 @@
10331409 }
10341410 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
10351411
1412
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
1413
+ struct v4l2_encoder_cmd *ec)
1414
+{
1415
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
1416
+ return -EINVAL;
1417
+
1418
+ ec->flags = 0;
1419
+ return 0;
1420
+}
1421
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
1422
+
1423
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
1424
+ struct v4l2_decoder_cmd *dc)
1425
+{
1426
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
1427
+ return -EINVAL;
1428
+
1429
+ dc->flags = 0;
1430
+
1431
+ if (dc->cmd == V4L2_DEC_CMD_STOP) {
1432
+ dc->stop.pts = 0;
1433
+ } else if (dc->cmd == V4L2_DEC_CMD_START) {
1434
+ dc->start.speed = 0;
1435
+ dc->start.format = V4L2_DEC_START_FMT_NONE;
1436
+ }
1437
+ return 0;
1438
+}
1439
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
1440
+
1441
+/*
1442
+ * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
1443
+ * Should be called from the encoder driver encoder_cmd() callback
1444
+ */
1445
+int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
1446
+ struct v4l2_encoder_cmd *ec)
1447
+{
1448
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
1449
+ return -EINVAL;
1450
+
1451
+ if (ec->cmd == V4L2_ENC_CMD_STOP)
1452
+ return v4l2_update_last_buf_state(m2m_ctx);
1453
+
1454
+ if (m2m_ctx->is_draining)
1455
+ return -EBUSY;
1456
+
1457
+ if (m2m_ctx->has_stopped)
1458
+ m2m_ctx->has_stopped = false;
1459
+
1460
+ return 0;
1461
+}
1462
+EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd);
1463
+
1464
+/*
1465
+ * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
1466
+ * Should be called from the decoder driver decoder_cmd() callback
1467
+ */
1468
+int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
1469
+ struct v4l2_decoder_cmd *dc)
1470
+{
1471
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
1472
+ return -EINVAL;
1473
+
1474
+ if (dc->cmd == V4L2_DEC_CMD_STOP)
1475
+ return v4l2_update_last_buf_state(m2m_ctx);
1476
+
1477
+ if (m2m_ctx->is_draining)
1478
+ return -EBUSY;
1479
+
1480
+ if (m2m_ctx->has_stopped)
1481
+ m2m_ctx->has_stopped = false;
1482
+
1483
+ return 0;
1484
+}
1485
+EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
1486
+
1487
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
1488
+ struct v4l2_encoder_cmd *ec)
1489
+{
1490
+ struct v4l2_fh *fh = file->private_data;
1491
+
1492
+ return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
1493
+}
1494
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
1495
+
1496
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
1497
+ struct v4l2_decoder_cmd *dc)
1498
+{
1499
+ struct v4l2_fh *fh = file->private_data;
1500
+
1501
+ return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
1502
+}
1503
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
1504
+
1505
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
1506
+ struct v4l2_decoder_cmd *dc)
1507
+{
1508
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
1509
+ return -EINVAL;
1510
+
1511
+ dc->flags = 0;
1512
+
1513
+ return 0;
1514
+}
1515
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
1516
+
1517
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
1518
+ struct v4l2_decoder_cmd *dc)
1519
+{
1520
+ struct v4l2_fh *fh = file->private_data;
1521
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
1522
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
1523
+ unsigned long flags;
1524
+ int ret;
1525
+
1526
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
1527
+ if (ret < 0)
1528
+ return ret;
1529
+
1530
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
1531
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
1532
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
1533
+
1534
+ /*
1535
+ * If there is an out buffer pending, then clear any HOLD flag.
1536
+ *
1537
+ * By clearing this flag we ensure that when this output
1538
+ * buffer is processed any held capture buffer will be released.
1539
+ */
1540
+ if (out_vb) {
1541
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
1542
+ } else if (cap_vb && cap_vb->is_held) {
1543
+ /*
1544
+ * If there were no output buffers, but there is a
1545
+ * capture buffer that is held, then release that
1546
+ * buffer.
1547
+ */
1548
+ cap_vb->is_held = false;
1549
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
1550
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
1551
+ }
1552
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
1553
+
1554
+ return 0;
1555
+}
1556
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
1557
+
10361558 /*
10371559 * v4l2_file_operations helpers. It is assumed here same lock is used
10381560 * for the output and the capture buffer queue.