hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/media/platform/rockchip/hdmirx/rk_hdmirx.c
....@@ -5,11 +5,11 @@
55 * Author: Dingxian Wen <shawn.wen@rock-chips.com>
66 */
77
8
-#include <dt-bindings/soc/rockchip-system-status.h>
98 #include <linux/clk.h>
109 #include <linux/cpufreq.h>
1110 #include <linux/debugfs.h>
1211 #include <linux/delay.h>
12
+#include <linux/dma-fence.h>
1313 #include <linux/dma-mapping.h>
1414 #include <linux/extcon-provider.h>
1515 #include <linux/fs.h>
....@@ -30,11 +30,13 @@
3030 #include <linux/rk_hdmirx_config.h>
3131 #include <linux/rockchip/rockchip_sip.h>
3232 #include <linux/seq_file.h>
33
+#include <linux/sync_file.h>
3334 #include <linux/v4l2-dv-timings.h>
3435 #include <linux/workqueue.h>
3536 #include <media/cec.h>
3637 #include <media/cec-notifier.h>
3738 #include <media/v4l2-common.h>
39
+#include <media/v4l2-controls_rockchip.h>
3840 #include <media/v4l2-ctrls.h>
3941 #include <media/v4l2-device.h>
4042 #include <media/v4l2-dv-timings.h>
....@@ -52,7 +54,11 @@
5254
5355 static int debug;
5456 module_param(debug, int, 0644);
55
-MODULE_PARM_DESC(debug, "debug level (0-3)");
57
+MODULE_PARM_DESC(debug, "debug level (0-4)");
58
+
59
+static bool low_latency;
60
+module_param(low_latency, bool, 0644);
61
+MODULE_PARM_DESC(low_latency, "low_latency en(0-1)");
5662
5763 #define RK_HDMIRX_DRVNAME "rk_hdmirx"
5864 #define EDID_NUM_BLOCKS_MAX 2
....@@ -143,6 +149,12 @@
143149 enum hdmirx_reg_attr attr;
144150 };
145151
152
+struct hdmirx_fence_context {
153
+ u64 context;
154
+ u64 seqno;
155
+ spinlock_t spinlock;
156
+};
157
+
146158 struct hdmirx_buffer {
147159 struct vb2_v4l2_buffer vb;
148160 struct list_head queue;
....@@ -177,6 +189,12 @@
177189 u32 irq_stat;
178190 };
179191
192
+struct hdmirx_fence {
193
+ struct list_head fence_list;
194
+ struct dma_fence *fence;
195
+ int fence_fd;
196
+};
197
+
180198 struct rk_hdmirx_dev {
181199 struct cec_notifier *cec_notifier;
182200 struct cpufreq_policy *policy;
....@@ -188,6 +206,8 @@
188206 struct v4l2_device v4l2_dev;
189207 struct v4l2_ctrl_handler hdl;
190208 struct v4l2_ctrl *detect_tx_5v_ctrl;
209
+ struct v4l2_ctrl *audio_sampling_rate_ctrl;
210
+ struct v4l2_ctrl *audio_present_ctrl;
191211 struct v4l2_dv_timings timings;
192212 struct gpio_desc *hdmirx_det_gpio;
193213 struct work_struct work_wdt_config;
....@@ -201,6 +221,7 @@
201221 struct hdmirx_audiostate audio_state;
202222 struct extcon_dev *extcon;
203223 struct hdmirx_cec *cec;
224
+ struct hdmirx_fence_context fence_ctx;
204225 struct mutex stream_lock;
205226 struct mutex work_lock;
206227 struct pm_qos_request pm_qos;
....@@ -212,6 +233,9 @@
212233 struct regmap *grf;
213234 struct regmap *vo1_grf;
214235 struct rk_hdmirx_hdcp *hdcp;
236
+ struct hdmirx_fence *hdmirx_fence;
237
+ struct list_head qbuf_fence_list_head;
238
+ struct list_head done_fence_list_head;
215239 void __iomem *regs;
216240 int edid_version;
217241 int audio_present;
....@@ -243,11 +267,13 @@
243267 u32 color_depth;
244268 u32 cpu_freq_khz;
245269 u32 bound_cpu;
270
+ u32 phy_cpuid;
246271 u32 fps;
247272 u32 wdt_cfg_bound_cpu;
248273 u8 edid[EDID_BLOCK_SIZE * 2];
249274 hdmi_codec_plugged_cb plugged_cb;
250275 spinlock_t rst_lock;
276
+ spinlock_t fence_lock;
251277 };
252278
253279 static const unsigned int hdmirx_extcon_cable[] = {
....@@ -410,6 +436,64 @@
410436 return val;
411437 }
412438
439
+static const char *hdmirx_fence_get_name(struct dma_fence *fence)
440
+{
441
+ return RK_HDMIRX_DRVNAME;
442
+}
443
+
444
+static const struct dma_fence_ops hdmirx_fence_ops = {
445
+ .get_driver_name = hdmirx_fence_get_name,
446
+ .get_timeline_name = hdmirx_fence_get_name,
447
+};
448
+
449
+static void hdmirx_fence_context_init(struct hdmirx_fence_context *fence_ctx)
450
+{
451
+ fence_ctx->context = dma_fence_context_alloc(1);
452
+ spin_lock_init(&fence_ctx->spinlock);
453
+}
454
+
455
+static struct dma_fence *hdmirx_dma_fence_alloc(struct hdmirx_fence_context *fence_ctx)
456
+{
457
+ struct dma_fence *fence = NULL;
458
+
459
+ if (fence_ctx == NULL) {
460
+ pr_err("fence_context is NULL!\n");
461
+ return ERR_PTR(-EINVAL);
462
+ }
463
+
464
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
465
+ if (!fence)
466
+ return ERR_PTR(-ENOMEM);
467
+
468
+ dma_fence_init(fence, &hdmirx_fence_ops, &fence_ctx->spinlock,
469
+ fence_ctx->context, ++fence_ctx->seqno);
470
+
471
+ return fence;
472
+}
473
+
474
+static int hdmirx_dma_fence_get_fd(struct dma_fence *fence)
475
+{
476
+ struct sync_file *sync_file = NULL;
477
+ int fence_fd = -1;
478
+
479
+ if (!fence)
480
+ return -EINVAL;
481
+
482
+ fence_fd = get_unused_fd_flags(O_CLOEXEC);
483
+ if (fence_fd < 0)
484
+ return fence_fd;
485
+
486
+ sync_file = sync_file_create(fence);
487
+ if (!sync_file) {
488
+ put_unused_fd(fence_fd);
489
+ return -ENOMEM;
490
+ }
491
+
492
+ fd_install(fence_fd, sync_file->file);
493
+
494
+ return fence_fd;
495
+}
496
+
413497 static void hdmirx_reset_dma(struct rk_hdmirx_dev *hdmirx_dev)
414498 {
415499 unsigned long lock_flags = 0;
....@@ -471,6 +555,7 @@
471555 case V4L2_EVENT_CTRL:
472556 return v4l2_ctrl_subscribe_event(fh, sub);
473557 case RK_HDMIRX_V4L2_EVENT_SIGNAL_LOST:
558
+ case RK_HDMIRX_V4L2_EVENT_AUDIOINFO:
474559 return v4l2_event_subscribe(fh, sub, 0, NULL);
475560
476561 default:
....@@ -1931,6 +2016,39 @@
19312016 return 0;
19322017 }
19332018
2019
+static void hdmirx_qbuf_alloc_fence(struct rk_hdmirx_dev *hdmirx_dev)
2020
+{
2021
+ struct dma_fence *fence;
2022
+ int fence_fd;
2023
+ struct hdmirx_fence *hdmirx_fence;
2024
+ unsigned long lock_flags = 0;
2025
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
2026
+
2027
+ fence = hdmirx_dma_fence_alloc(&hdmirx_dev->fence_ctx);
2028
+ if (!IS_ERR(fence)) {
2029
+ fence_fd = hdmirx_dma_fence_get_fd(fence);
2030
+ if (fence_fd >= 0) {
2031
+ hdmirx_fence = kzalloc(sizeof(struct hdmirx_fence), GFP_KERNEL);
2032
+ if (!hdmirx_fence) {
2033
+ v4l2_err(v4l2_dev, "%s: failed to alloc hdmirx_fence!\n", __func__);
2034
+ return;
2035
+ }
2036
+ hdmirx_fence->fence = fence;
2037
+ hdmirx_fence->fence_fd = fence_fd;
2038
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2039
+ list_add_tail(&hdmirx_fence->fence_list, &hdmirx_dev->qbuf_fence_list_head);
2040
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2041
+ v4l2_dbg(3, debug, v4l2_dev, "%s: fence:%p, fence_fd:%d\n",
2042
+ __func__, fence, fence_fd);
2043
+ } else {
2044
+ dma_fence_put(fence);
2045
+ v4l2_err(v4l2_dev, "%s: failed to get fence fd!\n", __func__);
2046
+ }
2047
+ } else {
2048
+ v4l2_err(v4l2_dev, "%s: alloc fence failed!\n", __func__);
2049
+ }
2050
+}
2051
+
19342052 /*
19352053 * The vb2_buffer are stored in hdmirx_buffer, in order to unify
19362054 * mplane buffer and none-mplane buffer.
....@@ -1945,6 +2063,8 @@
19452063 const struct hdmirx_output_fmt *out_fmt;
19462064 unsigned long lock_flags = 0;
19472065 int i;
2066
+ struct rk_hdmirx_dev *hdmirx_dev;
2067
+ struct v4l2_device *v4l2_dev;
19482068
19492069 if (vb == NULL) {
19502070 pr_err("%s: vb null pointer err!\n", __func__);
....@@ -1957,6 +2077,9 @@
19572077 stream = vb2_get_drv_priv(queue);
19582078 pixm = &stream->pixm;
19592079 out_fmt = stream->out_fmt;
2080
+
2081
+ hdmirx_dev = stream->hdmirx_dev;
2082
+ v4l2_dev = &hdmirx_dev->v4l2_dev;
19602083
19612084 memset(hdmirx_buf->buff_addr, 0, sizeof(hdmirx_buf->buff_addr));
19622085 /*
....@@ -1979,9 +2102,58 @@
19792102 }
19802103 }
19812104
2105
+ v4l2_dbg(4, debug, v4l2_dev, "qbuf fd:%d\n", vb->planes[0].m.fd);
2106
+
19822107 spin_lock_irqsave(&stream->vbq_lock, lock_flags);
19832108 list_add_tail(&hdmirx_buf->queue, &stream->buf_head);
19842109 spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
2110
+
2111
+ if (low_latency)
2112
+ hdmirx_qbuf_alloc_fence(hdmirx_dev);
2113
+}
2114
+
2115
+static void hdmirx_free_fence(struct rk_hdmirx_dev *hdmirx_dev)
2116
+{
2117
+ unsigned long lock_flags = 0;
2118
+ struct hdmirx_fence *vb_fence, *done_fence;
2119
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
2120
+ LIST_HEAD(local_list);
2121
+
2122
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2123
+ if (hdmirx_dev->hdmirx_fence) {
2124
+ v4l2_dbg(2, debug, v4l2_dev, "%s: signal hdmirx_fence fd:%d\n",
2125
+ __func__, hdmirx_dev->hdmirx_fence->fence_fd);
2126
+ dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
2127
+ dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
2128
+ kfree(hdmirx_dev->hdmirx_fence);
2129
+ hdmirx_dev->hdmirx_fence = NULL;
2130
+ }
2131
+
2132
+ list_replace_init(&hdmirx_dev->qbuf_fence_list_head, &local_list);
2133
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2134
+
2135
+ while (!list_empty(&local_list)) {
2136
+ vb_fence = list_first_entry(&local_list, struct hdmirx_fence, fence_list);
2137
+ list_del(&vb_fence->fence_list);
2138
+ v4l2_dbg(2, debug, v4l2_dev, "%s: free qbuf_fence fd:%d\n",
2139
+ __func__, vb_fence->fence_fd);
2140
+ dma_fence_put(vb_fence->fence);
2141
+ put_unused_fd(vb_fence->fence_fd);
2142
+ kfree(vb_fence);
2143
+ }
2144
+
2145
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2146
+ list_replace_init(&hdmirx_dev->done_fence_list_head, &local_list);
2147
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2148
+ while (!list_empty(&local_list)) {
2149
+ done_fence = list_first_entry(&local_list, struct hdmirx_fence, fence_list);
2150
+ list_del(&done_fence->fence_list);
2151
+ v4l2_dbg(2, debug, v4l2_dev, "%s: free done_fence fd:%d\n",
2152
+ __func__, done_fence->fence_fd);
2153
+ dma_fence_put(done_fence->fence);
2154
+ put_unused_fd(done_fence->fence_fd);
2155
+ kfree(done_fence);
2156
+ }
19852157 }
19862158
19872159 static void return_all_buffers(struct hdmirx_stream *stream,
....@@ -1989,6 +2161,7 @@
19892161 {
19902162 struct hdmirx_buffer *buf;
19912163 unsigned long flags;
2164
+ struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
19922165
19932166 spin_lock_irqsave(&stream->vbq_lock, flags);
19942167 if (stream->curr_buf)
....@@ -2007,6 +2180,8 @@
20072180 spin_lock_irqsave(&stream->vbq_lock, flags);
20082181 }
20092182 spin_unlock_irqrestore(&stream->vbq_lock, flags);
2183
+
2184
+ hdmirx_free_fence(hdmirx_dev);
20102185 }
20112186
20122187 static void hdmirx_stop_streaming(struct vb2_queue *queue)
....@@ -2055,6 +2230,7 @@
20552230 struct v4l2_dv_timings timings = hdmirx_dev->timings;
20562231 struct v4l2_bt_timings *bt = &timings.bt;
20572232 int line_flag;
2233
+ int delay_line;
20582234 uint32_t touch_flag;
20592235
20602236 if (!hdmirx_dev->get_timing) {
....@@ -2069,7 +2245,7 @@
20692245 }
20702246
20712247 mutex_lock(&hdmirx_dev->stream_lock);
2072
- touch_flag = (hdmirx_dev->bound_cpu << 1) | 0x1;
2248
+ touch_flag = (hdmirx_dev->phy_cpuid << 1) | 0x1;
20732249 sip_hdmirx_config(HDMIRX_AUTO_TOUCH_EN, 0, touch_flag, 100);
20742250 stream->frame_idx = 0;
20752251 stream->line_flag_int_cnt = 0;
....@@ -2106,12 +2282,19 @@
21062282
21072283 if (bt->height) {
21082284 if (bt->interlaced == V4L2_DV_INTERLACED)
2109
- line_flag = bt->height / 4;
2110
- else
21112285 line_flag = bt->height / 2;
2286
+ else
2287
+ line_flag = bt->height;
2288
+
2289
+ if (low_latency && hdmirx_dev->fps >= 59)
2290
+ delay_line = 10;
2291
+ else
2292
+ delay_line = line_flag * 2 / 3;
2293
+
2294
+ v4l2_info(v4l2_dev, "%s: delay_line:%d\n", __func__, delay_line);
21122295 hdmirx_update_bits(hdmirx_dev, DMA_CONFIG7,
21132296 LINE_FLAG_NUM_MASK,
2114
- LINE_FLAG_NUM(line_flag));
2297
+ LINE_FLAG_NUM(delay_line));
21152298 } else {
21162299 v4l2_err(v4l2_dev, "height err: %d\n", bt->height);
21172300 }
....@@ -2170,6 +2353,57 @@
21702353 val = hdmirx_readl(hdmirx_dev, HDCP_INT_STATUS) & 0x40;
21712354
21722355 return val ? 1 : 0;
2356
+}
2357
+
2358
+static void hdmirx_dqbuf_get_done_fence(struct rk_hdmirx_dev *hdmirx_dev)
2359
+{
2360
+ unsigned long lock_flags = 0;
2361
+ struct hdmirx_fence *done_fence;
2362
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
2363
+
2364
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2365
+ if (!list_empty(&hdmirx_dev->done_fence_list_head)) {
2366
+ done_fence = list_first_entry(&hdmirx_dev->done_fence_list_head,
2367
+ struct hdmirx_fence, fence_list);
2368
+ list_del(&done_fence->fence_list);
2369
+ } else {
2370
+ done_fence = NULL;
2371
+ }
2372
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2373
+
2374
+ if (done_fence) {
2375
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2376
+ if (hdmirx_dev->hdmirx_fence) {
2377
+ v4l2_err(v4l2_dev, "%s: last fence not signal, signal now!\n", __func__);
2378
+ dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
2379
+ dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
2380
+ v4l2_dbg(2, debug, v4l2_dev, "%s: signal fence:%p, old_fd:%d\n",
2381
+ __func__,
2382
+ hdmirx_dev->hdmirx_fence->fence,
2383
+ hdmirx_dev->hdmirx_fence->fence_fd);
2384
+ kfree(hdmirx_dev->hdmirx_fence);
2385
+ hdmirx_dev->hdmirx_fence = NULL;
2386
+ }
2387
+ hdmirx_dev->hdmirx_fence = done_fence;
2388
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2389
+ v4l2_dbg(3, debug, v4l2_dev, "%s: fence:%p, fence_fd:%d\n",
2390
+ __func__, done_fence->fence, done_fence->fence_fd);
2391
+ }
2392
+}
2393
+
2394
+static int hdmirx_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2395
+{
2396
+ int ret;
2397
+ struct hdmirx_stream *stream = video_drvdata(file);
2398
+ struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
2399
+
2400
+ if (!hdmirx_dev->get_timing)
2401
+ return -EINVAL;
2402
+
2403
+ ret = vb2_ioctl_dqbuf(file, priv, p);
2404
+ hdmirx_dqbuf_get_done_fence(hdmirx_dev);
2405
+
2406
+ return ret;
21732407 }
21742408
21752409 static long hdmirx_ioctl_default(struct file *file, void *fh,
....@@ -2290,7 +2524,7 @@
22902524 .vidioc_create_bufs = vb2_ioctl_create_bufs,
22912525 .vidioc_qbuf = vb2_ioctl_qbuf,
22922526 .vidioc_expbuf = vb2_ioctl_expbuf,
2293
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
2527
+ .vidioc_dqbuf = hdmirx_dqbuf,
22942528 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
22952529 .vidioc_streamon = vb2_ioctl_streamon,
22962530 .vidioc_streamoff = vb2_ioctl_streamoff,
....@@ -2353,12 +2587,23 @@
23532587 return 0;
23542588 }
23552589
2590
+static void process_audio_change(struct rk_hdmirx_dev *hdmirx_dev)
2591
+{
2592
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
2593
+ const struct v4l2_event evt_audio_info = {
2594
+ .type = RK_HDMIRX_V4L2_EVENT_AUDIOINFO,
2595
+ };
2596
+ v4l2_event_queue(&stream->vdev, &evt_audio_info);
2597
+}
2598
+
23562599 static void process_signal_change(struct rk_hdmirx_dev *hdmirx_dev)
23572600 {
2601
+ unsigned long lock_flags = 0;
23582602 struct hdmirx_stream *stream = &hdmirx_dev->stream;
23592603 const struct v4l2_event evt_signal_lost = {
23602604 .type = RK_HDMIRX_V4L2_EVENT_SIGNAL_LOST,
23612605 };
2606
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
23622607
23632608 hdmirx_dev->get_timing = false;
23642609 sip_hdmirx_config(HDMIRX_INFO_NOTIFY, 0, DMA_CONFIG6, 0);
....@@ -2376,6 +2621,18 @@
23762621 v4l2_event_queue(&stream->vdev, &evt_signal_lost);
23772622 if (hdmirx_dev->hdcp && hdmirx_dev->hdcp->hdcp_stop)
23782623 hdmirx_dev->hdcp->hdcp_stop(hdmirx_dev->hdcp);
2624
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2625
+ if (hdmirx_dev->hdmirx_fence) {
2626
+ dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
2627
+ dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
2628
+ v4l2_dbg(2, debug, v4l2_dev, "%s: signal fence:%p, old_fd:%d\n",
2629
+ __func__,
2630
+ hdmirx_dev->hdmirx_fence->fence,
2631
+ hdmirx_dev->hdmirx_fence->fence_fd);
2632
+ kfree(hdmirx_dev->hdmirx_fence);
2633
+ hdmirx_dev->hdmirx_fence = NULL;
2634
+ }
2635
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
23792636 schedule_delayed_work_on(hdmirx_dev->bound_cpu,
23802637 &hdmirx_dev->delayed_work_res_change,
23812638 msecs_to_jiffies(1000));
....@@ -2608,6 +2865,8 @@
26082865 {
26092866 const struct hdmirx_output_fmt *fmt = stream->out_fmt;
26102867 u32 i;
2868
+ struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
2869
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
26112870
26122871 /* Dequeue a filled buffer */
26132872 for (i = 0; i < fmt->mplanes; i++) {
....@@ -2617,10 +2876,12 @@
26172876
26182877 vb_done->vb2_buf.timestamp = ktime_get_ns();
26192878 vb2_buffer_done(&vb_done->vb2_buf, VB2_BUF_STATE_DONE);
2879
+ v4l2_dbg(4, debug, v4l2_dev, "vb_done fd:%d", vb_done->vb2_buf.planes[0].m.fd);
26202880 }
26212881
26222882 static void dma_idle_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handled)
26232883 {
2884
+ unsigned long lock_flags = 0;
26242885 struct hdmirx_stream *stream = &hdmirx_dev->stream;
26252886 struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
26262887 struct v4l2_dv_timings timings = hdmirx_dev->timings;
....@@ -2630,6 +2891,22 @@
26302891 if (!(stream->irq_stat) && !(stream->irq_stat & LINE_FLAG_INT_EN))
26312892 v4l2_dbg(1, debug, v4l2_dev,
26322893 "%s: last time have no line_flag_irq\n", __func__);
2894
+
2895
+ if (low_latency) {
2896
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2897
+ if (hdmirx_dev->hdmirx_fence) {
2898
+ dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
2899
+ dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
2900
+ v4l2_dbg(3, debug, v4l2_dev, "%s: signal fence:%p, old_fd:%d\n",
2901
+ __func__,
2902
+ hdmirx_dev->hdmirx_fence->fence,
2903
+ hdmirx_dev->hdmirx_fence->fence_fd);
2904
+ kfree(hdmirx_dev->hdmirx_fence);
2905
+ hdmirx_dev->hdmirx_fence = NULL;
2906
+ }
2907
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2908
+ goto DMA_IDLE_OUT;
2909
+ }
26332910
26342911 if (stream->line_flag_int_cnt <= FILTER_FRAME_CNT)
26352912 goto DMA_IDLE_OUT;
....@@ -2643,6 +2920,9 @@
26432920 if (vb_done) {
26442921 vb_done->vb2_buf.timestamp = ktime_get_ns();
26452922 vb_done->sequence = stream->frame_idx;
2923
+ /* config userbits 0 or 0xffffffff as invalid fence_fd*/
2924
+ memset(vb_done->timecode.userbits, 0xff,
2925
+ sizeof(vb_done->timecode.userbits));
26462926 hdmirx_vb_done(stream, vb_done);
26472927 stream->frame_idx++;
26482928 if (stream->frame_idx == 30)
....@@ -2664,13 +2944,50 @@
26642944 *handled = true;
26652945 }
26662946
2947
+static void hdmirx_add_fence_to_vb_done(struct hdmirx_stream *stream,
2948
+ struct vb2_v4l2_buffer *vb_done)
2949
+{
2950
+ unsigned long lock_flags = 0;
2951
+ struct hdmirx_fence *vb_fence;
2952
+ struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
2953
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
2954
+
2955
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
2956
+ if (!list_empty(&hdmirx_dev->qbuf_fence_list_head)) {
2957
+ vb_fence = list_first_entry(&hdmirx_dev->qbuf_fence_list_head,
2958
+ struct hdmirx_fence, fence_list);
2959
+ list_del(&vb_fence->fence_list);
2960
+ } else {
2961
+ vb_fence = NULL;
2962
+ }
2963
+
2964
+ if (vb_fence)
2965
+ list_add_tail(&vb_fence->fence_list, &hdmirx_dev->done_fence_list_head);
2966
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
2967
+
2968
+ if (vb_fence) {
2969
+ /* pass the fence_fd to userspace through timecode.userbits */
2970
+ if (put_user(vb_fence->fence_fd, vb_done->timecode.userbits))
2971
+ v4l2_err(v4l2_dev, "%s: failed to trans fence fd!\n", __func__);
2972
+
2973
+ v4l2_dbg(3, debug, v4l2_dev, "%s: fence:%p, fence_fd:%d\n",
2974
+ __func__, vb_fence->fence, vb_fence->fence_fd);
2975
+ } else {
2976
+ /* config userbits 0 or 0xffffffff as invalid fence_fd*/
2977
+ memset(vb_done->timecode.userbits, 0xff, sizeof(vb_done->timecode.userbits));
2978
+ v4l2_err(v4l2_dev, "%s: failed to get fence fd!\n", __func__);
2979
+ }
2980
+}
2981
+
26672982 static void line_flag_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handled)
26682983 {
2984
+ unsigned long lock_flags = 0;
26692985 struct hdmirx_stream *stream = &hdmirx_dev->stream;
26702986 struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
26712987 struct v4l2_dv_timings timings = hdmirx_dev->timings;
26722988 struct v4l2_bt_timings *bt = &timings.bt;
26732989 u32 dma_cfg6;
2990
+ struct vb2_v4l2_buffer *vb_done = NULL;
26742991
26752992 stream->line_flag_int_cnt++;
26762993 if (!(stream->irq_stat) && !(stream->irq_stat & HDMIRX_DMA_IDLE_INT))
....@@ -2687,6 +3004,19 @@
26873004
26883005 if ((bt->interlaced != V4L2_DV_INTERLACED) ||
26893006 (stream->line_flag_int_cnt % 2 == 0)) {
3007
+ spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
3008
+ if (hdmirx_dev->hdmirx_fence) {
3009
+ dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
3010
+ dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
3011
+ v4l2_dbg(2, debug, v4l2_dev, "%s: signal last fence:%p, old_fd:%d\n",
3012
+ __func__,
3013
+ hdmirx_dev->hdmirx_fence->fence,
3014
+ hdmirx_dev->hdmirx_fence->fence_fd);
3015
+ kfree(hdmirx_dev->hdmirx_fence);
3016
+ hdmirx_dev->hdmirx_fence = NULL;
3017
+ }
3018
+ spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
3019
+
26903020 if (!stream->next_buf) {
26913021 spin_lock(&stream->vbq_lock);
26923022 if (!list_empty(&stream->buf_head)) {
....@@ -2698,15 +3028,44 @@
26983028 }
26993029 spin_unlock(&stream->vbq_lock);
27003030
2701
- if (stream->next_buf) {
2702
- hdmirx_writel(hdmirx_dev, DMA_CONFIG2,
2703
- stream->next_buf->buff_addr[HDMIRX_PLANE_Y]);
2704
- hdmirx_writel(hdmirx_dev, DMA_CONFIG3,
2705
- stream->next_buf->buff_addr[HDMIRX_PLANE_CBCR]);
2706
- } else {
2707
- v4l2_dbg(3, debug, v4l2_dev,
2708
- "%s: No buffer is available\n", __func__);
3031
+ }
3032
+
3033
+ if (stream->next_buf) {
3034
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG2,
3035
+ stream->next_buf->buff_addr[HDMIRX_PLANE_Y]);
3036
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG3,
3037
+ stream->next_buf->buff_addr[HDMIRX_PLANE_CBCR]);
3038
+
3039
+ if (low_latency) {
3040
+ if (stream->curr_buf)
3041
+ vb_done = &stream->curr_buf->vb;
3042
+
3043
+ if (vb_done) {
3044
+ hdmirx_add_fence_to_vb_done(stream, vb_done);
3045
+ vb_done->vb2_buf.timestamp = ktime_get_ns();
3046
+ vb_done->sequence = stream->frame_idx;
3047
+ hdmirx_vb_done(stream, vb_done);
3048
+ stream->frame_idx++;
3049
+ if (stream->frame_idx == 30)
3050
+ v4l2_info(v4l2_dev, "rcv frames\n");
3051
+ }
3052
+
3053
+ stream->curr_buf = stream->next_buf;
3054
+ stream->next_buf = NULL;
27093055 }
3056
+ } else {
3057
+ v4l2_dbg(3, debug, v4l2_dev,
3058
+ "%s: next_buf NULL, drop the frame!\n", __func__);
3059
+ }
3060
+
3061
+ if (stream->curr_buf) {
3062
+ v4l2_dbg(4, debug, v4l2_dev, "%s: curr_fd:%d\n",
3063
+ __func__, stream->curr_buf->vb.vb2_buf.planes[0].m.fd);
3064
+ }
3065
+
3066
+ if (stream->next_buf) {
3067
+ v4l2_dbg(4, debug, v4l2_dev, "%s: next_fd:%d\n",
3068
+ __func__, stream->next_buf->vb.vb2_buf.planes[0].m.fd);
27103069 }
27113070 } else {
27123071 v4l2_dbg(3, debug, v4l2_dev, "%s: interlace:%d, line_flag_int_cnt:%d\n",
....@@ -3248,6 +3607,7 @@
32483607 if (!hdmirx_dev->audio_present) {
32493608 dev_info(hdmirx_dev->dev, "audio on");
32503609 hdmirx_audio_handle_plugged_change(hdmirx_dev, 1);
3610
+ process_audio_change(hdmirx_dev);
32513611 hdmirx_dev->audio_present = true;
32523612 }
32533613 if (cur_state - init_state > 16 && cur_state - pre_state > 0)
....@@ -3258,6 +3618,7 @@
32583618 if (hdmirx_dev->audio_present) {
32593619 dev_info(hdmirx_dev->dev, "audio off");
32603620 hdmirx_audio_handle_plugged_change(hdmirx_dev, 0);
3621
+ process_audio_change(hdmirx_dev);
32613622 hdmirx_dev->audio_present = false;
32623623 }
32633624 }
....@@ -4175,6 +4536,50 @@
41754536 dev_err(hdmirx_dev->dev, "%s freq qos nod add\n", __func__);
41764537 }
41774538
4539
+static int hdmirx_get_custom_ctrl(struct v4l2_ctrl *ctrl)
4540
+{
4541
+ struct rk_hdmirx_dev *hdmirx_dev = container_of(ctrl->handler, struct rk_hdmirx_dev, hdl);
4542
+ int ret = 0;
4543
+
4544
+ if (ctrl->id == RK_V4L2_CID_AUDIO_SAMPLING_RATE) {
4545
+ *ctrl->p_new.p_s32 = hdmirx_dev->audio_state.fs_audio;
4546
+ } else if (ctrl->id == RK_V4L2_CID_AUDIO_PRESENT) {
4547
+ *ctrl->p_new.p_s32 = tx_5v_power_present(hdmirx_dev) ?
4548
+ hdmirx_dev->audio_present : 0;
4549
+ } else {
4550
+ ret = -EINVAL;
4551
+ }
4552
+ return ret;
4553
+}
4554
+
4555
+static const struct v4l2_ctrl_ops hdmirx_custom_ctrl_ops = {
4556
+ .g_volatile_ctrl = hdmirx_get_custom_ctrl,
4557
+};
4558
+
4559
+static const struct v4l2_ctrl_config hdmirx_ctrl_audio_sampling_rate = {
4560
+ .ops = &hdmirx_custom_ctrl_ops,
4561
+ .id = RK_V4L2_CID_AUDIO_SAMPLING_RATE,
4562
+ .name = "Audio sampling rate",
4563
+ .type = V4L2_CTRL_TYPE_INTEGER,
4564
+ .min = 0,
4565
+ .max = 768000,
4566
+ .step = 1,
4567
+ .def = 0,
4568
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
4569
+};
4570
+
4571
+static const struct v4l2_ctrl_config hdmirx_ctrl_audio_present = {
4572
+ .ops = &hdmirx_custom_ctrl_ops,
4573
+ .id = RK_V4L2_CID_AUDIO_PRESENT,
4574
+ .name = "Audio present",
4575
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
4576
+ .min = 0,
4577
+ .max = 1,
4578
+ .step = 1,
4579
+ .def = 0,
4580
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
4581
+};
4582
+
41784583 static int hdmirx_probe(struct platform_device *pdev)
41794584 {
41804585 const struct v4l2_dv_timings timings_def = HDMIRX_DEFAULT_TIMING;
....@@ -4208,23 +4613,35 @@
42084613 return PTR_ERR(hdmirx_dev->regs);
42094614 }
42104615
4211
- if (sip_cpu_logical_map_mpidr(0) == 0)
4212
- cpu_aff = sip_cpu_logical_map_mpidr(4); // big cpu0
4213
- else
4214
- cpu_aff = sip_cpu_logical_map_mpidr(1); // big cpu1
4616
+ /*
4617
+ * Bind HDMIRX's FIQ and driver interrupt processing to big cpu1
4618
+ * in order to quickly respond to FIQ and prevent them from affecting
4619
+ * each other.
4620
+ */
4621
+ if (sip_cpu_logical_map_mpidr(0) == 0) {
4622
+ cpu_aff = sip_cpu_logical_map_mpidr(5);
4623
+ hdmirx_dev->bound_cpu = 5;
4624
+ } else {
4625
+ cpu_aff = sip_cpu_logical_map_mpidr(1);
4626
+ hdmirx_dev->bound_cpu = 1;
4627
+ }
42154628
42164629 sip_fiq_control(RK_SIP_FIQ_CTRL_SET_AFF, RK_IRQ_HDMIRX_HDMI, cpu_aff);
4217
- hdmirx_dev->bound_cpu = (cpu_aff >> 8) & 0xf;
4630
+ hdmirx_dev->phy_cpuid = (cpu_aff >> 8) & 0xf;
42184631 hdmirx_dev->wdt_cfg_bound_cpu = hdmirx_dev->bound_cpu + 1;
4219
- dev_info(dev, "%s: cpu_aff:%#x, Bound_cpu:%d, wdt_cfg_bound_cpu:%d\n",
4632
+ dev_info(dev, "%s: cpu_aff:%#x, Bound_cpu:%d, wdt_cfg_bound_cpu:%d, phy_cpuid:%d\n",
42204633 __func__, cpu_aff,
42214634 hdmirx_dev->bound_cpu,
4222
- hdmirx_dev->wdt_cfg_bound_cpu);
4635
+ hdmirx_dev->wdt_cfg_bound_cpu,
4636
+ hdmirx_dev->phy_cpuid);
42234637 cpu_latency_qos_add_request(&hdmirx_dev->pm_qos, PM_QOS_DEFAULT_VALUE);
42244638
42254639 mutex_init(&hdmirx_dev->stream_lock);
42264640 mutex_init(&hdmirx_dev->work_lock);
42274641 spin_lock_init(&hdmirx_dev->rst_lock);
4642
+ spin_lock_init(&hdmirx_dev->fence_lock);
4643
+ INIT_LIST_HEAD(&hdmirx_dev->qbuf_fence_list_head);
4644
+ INIT_LIST_HEAD(&hdmirx_dev->done_fence_list_head);
42284645 INIT_WORK(&hdmirx_dev->work_wdt_config,
42294646 hdmirx_work_wdt_config);
42304647 INIT_DELAYED_WORK(&hdmirx_dev->delayed_work_hotplug,
....@@ -4288,10 +4705,19 @@
42884705 strscpy(v4l2_dev->name, dev_name(dev), sizeof(v4l2_dev->name));
42894706
42904707 hdl = &hdmirx_dev->hdl;
4291
- v4l2_ctrl_handler_init(hdl, 1);
4708
+ v4l2_ctrl_handler_init(hdl, 3);
42924709 hdmirx_dev->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl,
42934710 NULL, V4L2_CID_DV_RX_POWER_PRESENT,
42944711 0, 1, 0, 0);
4712
+ /* custom controls */
4713
+ hdmirx_dev->audio_sampling_rate_ctrl = v4l2_ctrl_new_custom(hdl,
4714
+ &hdmirx_ctrl_audio_sampling_rate, NULL);
4715
+ if (hdmirx_dev->audio_sampling_rate_ctrl)
4716
+ hdmirx_dev->audio_sampling_rate_ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
4717
+ hdmirx_dev->audio_present_ctrl = v4l2_ctrl_new_custom(hdl,
4718
+ &hdmirx_ctrl_audio_present, NULL);
4719
+ if (hdmirx_dev->audio_present_ctrl)
4720
+ hdmirx_dev->audio_present_ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
42954721 if (hdl->error) {
42964722 dev_err(dev, "v4l2 ctrl handler init failed!\n");
42974723 ret = hdl->error;
....@@ -4399,7 +4825,8 @@
43994825 hdmirx_register_hdcp(dev, hdmirx_dev, hdmirx_dev->hdcp_enable);
44004826
44014827 hdmirx_register_debugfs(hdmirx_dev->dev, hdmirx_dev);
4402
-
4828
+ hdmirx_fence_context_init(&hdmirx_dev->fence_ctx);
4829
+ hdmirx_dev->hdmirx_fence = NULL;
44034830 hdmirx_dev->initialized = true;
44044831 dev_info(dev, "%s driver probe ok!\n", dev_name(dev));
44054832
....@@ -4430,7 +4857,6 @@
44304857 struct rk_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
44314858
44324859 debugfs_remove_recursive(hdmirx_dev->debugfs_dir);
4433
-
44344860 cpu_latency_qos_remove_request(&hdmirx_dev->pm_qos);
44354861 cancel_delayed_work(&hdmirx_dev->delayed_work_hotplug);
44364862 cancel_delayed_work(&hdmirx_dev->delayed_work_res_change);