From 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:44:59 +0000 Subject: [PATCH] gmac get mac form eeprom --- kernel/drivers/media/platform/ti-vpe/vpe.c | 384 ++++++++++++++++++++++++++++++------------------------ 1 files changed, 210 insertions(+), 174 deletions(-) diff --git a/kernel/drivers/media/platform/ti-vpe/vpe.c b/kernel/drivers/media/platform/ti-vpe/vpe.c index 70a8371..779dd74 100644 --- a/kernel/drivers/media/platform/ti-vpe/vpe.c +++ b/kernel/drivers/media/platform/ti-vpe/vpe.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver * @@ -11,10 +12,6 @@ * Marek Szyprowski, <m.szyprowski@samsung.com> * * Based on the virtual v4l2-mem2mem example device - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation */ #include <linux/delay.h> @@ -55,7 +52,7 @@ #define MIN_W 32 #define MIN_H 32 #define MAX_W 2048 -#define MAX_H 1184 +#define MAX_H 2048 /* required alignments */ #define S_ALIGN 0 /* multiple of 1 */ @@ -227,7 +224,6 @@ /* driver info for each of the supported video formats */ struct vpe_fmt { - char *name; /* human-readable name */ u32 fourcc; /* standard format identifier */ u8 types; /* CAPTURE and/or OUTPUT */ u8 coplanar; /* set for unpacked Luma and Chroma */ @@ -237,7 +233,6 @@ static struct vpe_fmt vpe_formats[] = { { - .name = "NV16 YUV 422 co-planar", .fourcc = V4L2_PIX_FMT_NV16, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 1, @@ -246,7 +241,6 @@ }, }, { - .name = "NV12 YUV 420 co-planar", .fourcc = V4L2_PIX_FMT_NV12, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 1, @@ -255,7 +249,14 @@ }, }, { - .name = "YUYV 422 packed", + .fourcc = V4L2_PIX_FMT_NV21, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 1, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], + &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420], + }, + }, + { .fourcc = V4L2_PIX_FMT_YUYV, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 0, @@ -263,7 +264,6 @@ }, }, { - .name = "UYVY 422 packed", .fourcc = V4L2_PIX_FMT_UYVY, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 0, @@ -271,7 +271,6 @@ }, }, { - .name = "RGB888 packed", .fourcc = V4L2_PIX_FMT_RGB24, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, @@ -279,7 +278,6 @@ }, }, { - .name = "ARGB32", .fourcc = V4L2_PIX_FMT_RGB32, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, @@ -287,7 +285,6 @@ }, }, { - .name = "BGR888 packed", .fourcc = V4L2_PIX_FMT_BGR24, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, @@ -295,7 +292,6 @@ }, }, { - .name = "ABGR32", .fourcc = V4L2_PIX_FMT_BGR32, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, @@ -303,7 +299,6 @@ }, }, { - .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, @@ -311,7 +306,6 @@ }, }, { - .name = "RGB5551", .fourcc = V4L2_PIX_FMT_RGB555, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, @@ -325,14 +319,9 @@ * there is one source queue and one destination queue for each m2m context. */ struct vpe_q_data { - unsigned int width; /* frame width */ - unsigned int height; /* frame height */ - unsigned int nplanes; /* Current number of planes */ - unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */ - enum v4l2_colorspace colorspace; - enum v4l2_field field; /* supported field value */ + /* current v4l2 format info */ + struct v4l2_format format; unsigned int flags; - unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */ struct v4l2_rect c_rect; /* crop/compose rectangle */ struct vpe_fmt *fmt; /* format info */ }; @@ -342,9 +331,14 @@ #define Q_DATA_MODE_TILED BIT(1) #define Q_DATA_INTERLACED_ALTERNATE BIT(2) #define Q_DATA_INTERLACED_SEQ_TB BIT(3) +#define Q_DATA_INTERLACED_SEQ_BT BIT(4) + +#define Q_IS_SEQ_XX (Q_DATA_INTERLACED_SEQ_TB | \ + Q_DATA_INTERLACED_SEQ_BT) #define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \ - Q_DATA_INTERLACED_SEQ_TB) + Q_DATA_INTERLACED_SEQ_TB | \ + Q_DATA_INTERLACED_SEQ_BT) enum { Q_DATA_SRC = 0, @@ -700,7 +694,8 @@ * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing. */ - if (fmt->fourcc == V4L2_PIX_FMT_NV12) + if (fmt->fourcc == V4L2_PIX_FMT_NV12 || + fmt->fourcc == V4L2_PIX_FMT_NV21) cfg_mode = 0; write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); @@ -715,7 +710,8 @@ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; int line_mode = 1; - if (fmt->fourcc == V4L2_PIX_FMT_NV12) + if (fmt->fourcc == V4L2_PIX_FMT_NV12 || + fmt->fourcc == V4L2_PIX_FMT_NV21) line_mode = 0; /* double lines to line buffer */ /* regs for now */ @@ -760,11 +756,12 @@ static void set_dst_registers(struct vpe_ctx *ctx) { struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; - enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace; struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; + const struct v4l2_format_info *finfo; u32 val = 0; - if (clrspc == V4L2_COLORSPACE_SRGB) { + finfo = v4l2_format_info(fmt->fourcc); + if (v4l2_is_format_rgb(finfo)) { val |= VPE_RGB_OUT_SELECT; vpdma_set_bg_color(ctx->dev->vpdma, (struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff); @@ -777,7 +774,8 @@ */ val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER; - if (fmt->fourcc != V4L2_PIX_FMT_NV12) + if (fmt->fourcc != V4L2_PIX_FMT_NV12 && + fmt->fourcc != V4L2_PIX_FMT_NV21) val |= VPE_DS_BYPASS; mmr_adb->out_fmt_reg[0] = val; @@ -866,11 +864,13 @@ unsigned int src_h = s_q_data->c_rect.height; unsigned int dst_w = d_q_data->c_rect.width; unsigned int dst_h = d_q_data->c_rect.height; + struct v4l2_pix_format_mplane *spix; size_t mv_buf_size; int ret; ctx->sequence = 0; ctx->field = V4L2_FIELD_TOP; + spix = &s_q_data->format.fmt.pix_mp; if ((s_q_data->flags & Q_IS_INTERLACED) && !(d_q_data->flags & Q_IS_INTERLACED)) { @@ -881,13 +881,13 @@ /* * we make sure that the source image has a 16 byte aligned * stride, we need to do the same for the motion vector buffer - * by aligning it's stride to the next 16 byte boundry. this + * by aligning it's stride to the next 16 byte boundary. this * extra space will not be used by the de-interlacer, but will * ensure that vpdma operates correctly */ - bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3, - VPDMA_STRIDE_ALIGN); - mv_buf_size = bytes_per_line * s_q_data->height; + bytes_per_line = ALIGN((spix->width * mv->depth) >> 3, + VPDMA_STRIDE_ALIGN); + mv_buf_size = bytes_per_line * spix->height; ctx->deinterlacing = true; src_h <<= 1; @@ -907,7 +907,7 @@ set_dei_regs(ctx); csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0], - s_q_data->colorspace, d_q_data->colorspace); + &s_q_data->format, &d_q_data->format); sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w); sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h); @@ -917,14 +917,6 @@ src_w, src_h, dst_w, dst_h); return 0; -} - -/* - * Return the vpe_ctx structure for a given struct file - */ -static struct vpe_ctx *file2ctx(struct file *file) -{ - return container_of(file->private_data, struct vpe_ctx, fh); } /* @@ -1029,6 +1021,7 @@ struct vpe_fmt *fmt = q_data->fmt; const struct vpdma_data_format *vpdma_fmt; int mv_buf_selector = !ctx->src_mv_buf_selector; + struct v4l2_pix_format_mplane *pix; dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; @@ -1038,21 +1031,23 @@ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; q_data = &ctx->q_data[Q_DATA_SRC]; - stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3, + pix = &q_data->format.fmt.pix_mp; + stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3, VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; + pix = &q_data->format.fmt.pix_mp; vpdma_fmt = fmt->vpdma_fmt[plane]; /* * If we are using a single plane buffer and * we need to set a separate vpdma chroma channel. */ - if (q_data->nplanes == 1 && plane) { + if (pix->num_planes == 1 && plane) { dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); /* Compute required offset */ - offset = q_data->bytesperline[0] * q_data->height; + offset = pix->plane_fmt[0].bytesperline * pix->height; } else { dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); /* Use address as is, no offset */ @@ -1066,7 +1061,7 @@ } /* Apply the offset */ dma_addr += offset; - stride = q_data->bytesperline[VPE_LUMA]; + stride = pix->plane_fmt[VPE_LUMA].bytesperline; } if (q_data->flags & Q_DATA_FRAME_1D) @@ -1077,7 +1072,7 @@ vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1, MAX_W, MAX_H); - vpdma_add_out_dtd(&ctx->desc_list, q_data->width, + vpdma_add_out_dtd(&ctx->desc_list, pix->width, stride, &q_data->c_rect, vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1, MAX_OUT_HEIGHT_REG1, p_data->channel, flags); @@ -1090,6 +1085,7 @@ struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpe_fmt *fmt = q_data->fmt; + struct v4l2_pix_format_mplane *pix; const struct vpdma_data_format *vpdma_fmt; int mv_buf_selector = ctx->src_mv_buf_selector; int field = vbuf->field == V4L2_FIELD_BOTTOM; @@ -1099,10 +1095,11 @@ u32 offset = 0; u32 stride; + pix = &q_data->format.fmt.pix_mp; if (port == VPE_PORT_MV_IN) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; - stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3, + stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3, VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ @@ -1113,10 +1110,10 @@ * If we are using a single plane buffer and * we need to set a separate vpdma chroma channel. */ - if (q_data->nplanes == 1 && plane) { + if (pix->num_planes == 1 && plane) { dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); /* Compute required offset */ - offset = q_data->bytesperline[0] * q_data->height; + offset = pix->plane_fmt[0].bytesperline * pix->height; } else { dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); /* Use address as is, no offset */ @@ -1130,27 +1127,39 @@ } /* Apply the offset */ dma_addr += offset; - stride = q_data->bytesperline[VPE_LUMA]; + stride = pix->plane_fmt[VPE_LUMA].bytesperline; - if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) { - /* - * Use top or bottom field from same vb alternately - * f,f-1,f-2 = TBT when seq is even - * f,f-1,f-2 = BTB when seq is odd - */ - field = (p_data->vb_index + (ctx->sequence % 2)) % 2; + /* + * field used in VPDMA desc = 0 (top) / 1 (bottom) + * Use top or bottom field from same vb alternately + * For each de-interlacing operation, f,f-1,f-2 should be one + * of TBT or BTB + */ + if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB || + q_data->flags & Q_DATA_INTERLACED_SEQ_BT) { + /* Select initial value based on format */ + if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT) + field = 1; + else + field = 0; + + /* Toggle for each vb_index and each operation */ + field = (field + p_data->vb_index + ctx->sequence) % 2; if (field) { - /* - * bottom field of a SEQ_TB buffer - * Skip the top field data by - */ - int height = q_data->height / 2; - int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ? - 1 : (vpdma_fmt->depth >> 3); + int height = pix->height / 2; + int bpp; + + if (fmt->fourcc == V4L2_PIX_FMT_NV12 || + fmt->fourcc == V4L2_PIX_FMT_NV21) + bpp = 1; + else + bpp = vpdma_fmt->depth >> 3; + if (plane) height /= 2; - dma_addr += q_data->width * height * bpp; + + dma_addr += pix->width * height * bpp; } } } @@ -1163,10 +1172,11 @@ frame_width = q_data->c_rect.width; frame_height = q_data->c_rect.height; - if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12) + if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 || + fmt->fourcc == V4L2_PIX_FMT_NV21)) frame_height /= 2; - vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride, + vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride, &q_data->c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width, frame_height, 0, 0); @@ -1203,13 +1213,18 @@ struct sc_data *sc = ctx->dev->sc; struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + const struct v4l2_format_info *d_finfo; - if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB && - ctx->sequence % 2 == 0) { - /* When using SEQ_TB buffers, When using it first time, - * No need to remove the buffer as the next field is present - * in the same buffer. (so that job_ready won't fail) - * It will be removed when using bottom field + d_finfo = v4l2_format_info(d_q_data->fmt->fourcc); + + if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX && + ctx->sequence % 2 == 0) { + /* When using SEQ_XX type buffers, each buffer has two fields + * each buffer has two fields (top & bottom) + * Removing one buffer is actually getting two fields + * Alternate between two operations:- + * Even : consume one field but DO NOT REMOVE from queue + * Odd : consume other field and REMOVE from queue */ ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); WARN_ON(ctx->src_vbs[0] == NULL); @@ -1273,7 +1288,7 @@ if (ctx->deinterlacing) add_out_dtd(ctx, VPE_PORT_MV_OUT); - if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) { + if (v4l2_is_format_rgb(d_finfo)) { add_out_dtd(ctx, VPE_PORT_RGB_OUT); } else { add_out_dtd(ctx, VPE_PORT_LUMA_OUT); @@ -1315,7 +1330,7 @@ } /* sync on channel control descriptors for output ports */ - if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) { + if (v4l2_is_format_rgb(d_finfo)) { vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_RGB_OUT); } else { @@ -1505,12 +1520,10 @@ static int vpe_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { - strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1); - strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1); + strscpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver)); + strscpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", VPE_MODULE_NAME); - cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } @@ -1533,7 +1546,6 @@ if (!fmt) return -EINVAL; - strncpy(f->description, fmt->name, sizeof(f->description) - 1); f->pixelformat = fmt->fourcc; return 0; } @@ -1550,38 +1562,32 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; - struct vpe_ctx *ctx = file2ctx(file); + struct vpe_ctx *ctx = file->private_data; struct vb2_queue *vq; struct vpe_q_data *q_data; - int i; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; - pix->width = q_data->width; - pix->height = q_data->height; - pix->pixelformat = q_data->fmt->fourcc; - pix->field = q_data->field; + *f = q_data->format; - if (V4L2_TYPE_IS_OUTPUT(f->type)) { - pix->colorspace = q_data->colorspace; - } else { + if (V4L2_TYPE_IS_CAPTURE(f->type)) { struct vpe_q_data *s_q_data; + struct v4l2_pix_format_mplane *spix; - /* get colorspace from the source queue */ + /* get colorimetry from the source queue */ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + spix = &s_q_data->format.fmt.pix_mp; - pix->colorspace = s_q_data->colorspace; - } - - pix->num_planes = q_data->nplanes; - - for (i = 0; i < pix->num_planes; i++) { - pix->plane_fmt[i].bytesperline = q_data->bytesperline[i]; - pix->plane_fmt[i].sizeimage = q_data->sizeimage[i]; + pix->colorspace = spix->colorspace; + pix->xfer_func = spix->xfer_func; + pix->ycbcr_enc = spix->ycbcr_enc; + pix->quantization = spix->quantization; } return 0; @@ -1595,6 +1601,7 @@ unsigned int w_align; int i, depth, depth_bytes, height; unsigned int stride = 0; + const struct v4l2_format_info *finfo; if (!fmt || !(fmt->types & type)) { vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n", @@ -1602,8 +1609,10 @@ fmt = __find_format(V4L2_PIX_FMT_YUYV); } - if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE - && pix->field != V4L2_FIELD_SEQ_TB) + if (pix->field != V4L2_FIELD_NONE && + pix->field != V4L2_FIELD_ALTERNATE && + pix->field != V4L2_FIELD_SEQ_TB && + pix->field != V4L2_FIELD_SEQ_BT) pix->field = V4L2_FIELD_NONE; depth = fmt->vpdma_fmt[VPE_LUMA]->depth; @@ -1652,21 +1661,19 @@ pix->num_planes = 1; pix->pixelformat = fmt->fourcc; + finfo = v4l2_format_info(fmt->fourcc); /* * For the actual image parameters, we need to consider the field - * height of the image for SEQ_TB buffers. + * height of the image for SEQ_XX buffers. */ - if (pix->field == V4L2_FIELD_SEQ_TB) + if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT) height = pix->height / 2; else height = pix->height; if (!pix->colorspace) { - if (fmt->fourcc == V4L2_PIX_FMT_RGB24 || - fmt->fourcc == V4L2_PIX_FMT_BGR24 || - fmt->fourcc == V4L2_PIX_FMT_RGB32 || - fmt->fourcc == V4L2_PIX_FMT_BGR32) { + if (v4l2_is_format_rgb(finfo)) { pix->colorspace = V4L2_COLORSPACE_SRGB; } else { if (height > 1280) /* HD */ @@ -1714,7 +1721,7 @@ static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { - struct vpe_ctx *ctx = file2ctx(file); + struct vpe_ctx *ctx = file->private_data; struct vpe_fmt *fmt = find_format(f); if (V4L2_TYPE_IS_OUTPUT(f->type)) @@ -1726,10 +1733,9 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; - struct v4l2_plane_pix_format *plane_fmt; + struct v4l2_pix_format_mplane *qpix; struct vpe_q_data *q_data; struct vb2_queue *vq; - int i; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (!vq) @@ -1744,42 +1750,34 @@ if (!q_data) return -EINVAL; + qpix = &q_data->format.fmt.pix_mp; q_data->fmt = find_format(f); - q_data->width = pix->width; - q_data->height = pix->height; - q_data->colorspace = pix->colorspace; - q_data->field = pix->field; - q_data->nplanes = pix->num_planes; - - for (i = 0; i < pix->num_planes; i++) { - plane_fmt = &pix->plane_fmt[i]; - - q_data->bytesperline[i] = plane_fmt->bytesperline; - q_data->sizeimage[i] = plane_fmt->sizeimage; - } + q_data->format = *f; q_data->c_rect.left = 0; q_data->c_rect.top = 0; - q_data->c_rect.width = q_data->width; - q_data->c_rect.height = q_data->height; + q_data->c_rect.width = pix->width; + q_data->c_rect.height = pix->height; - if (q_data->field == V4L2_FIELD_ALTERNATE) + if (qpix->field == V4L2_FIELD_ALTERNATE) q_data->flags |= Q_DATA_INTERLACED_ALTERNATE; - else if (q_data->field == V4L2_FIELD_SEQ_TB) + else if (qpix->field == V4L2_FIELD_SEQ_TB) q_data->flags |= Q_DATA_INTERLACED_SEQ_TB; + else if (qpix->field == V4L2_FIELD_SEQ_BT) + q_data->flags |= Q_DATA_INTERLACED_SEQ_BT; else q_data->flags &= ~Q_IS_INTERLACED; - /* the crop height is halved for the case of SEQ_TB buffers */ - if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) + /* the crop height is halved for the case of SEQ_XX buffers */ + if (q_data->flags & Q_IS_SEQ_XX) q_data->c_rect.height /= 2; vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", - f->type, q_data->width, q_data->height, q_data->fmt->fourcc, - q_data->bytesperline[VPE_LUMA]); - if (q_data->nplanes == 2) + f->type, pix->width, pix->height, pix->pixelformat, + pix->plane_fmt[0].bytesperline); + if (pix->num_planes == 2) vpe_dbg(ctx->dev, " bpl_uv %d\n", - q_data->bytesperline[VPE_CHROMA]); + pix->plane_fmt[1].bytesperline); return 0; } @@ -1787,7 +1785,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { int ret; - struct vpe_ctx *ctx = file2ctx(file); + struct vpe_ctx *ctx = file->private_data; ret = vpe_try_fmt(file, priv, f); if (ret) @@ -1808,6 +1806,7 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s) { struct vpe_q_data *q_data; + struct v4l2_pix_format_mplane *pix; int height; if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && @@ -1817,6 +1816,8 @@ q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; + + pix = &q_data->format.fmt.pix_mp; switch (s->target) { case V4L2_SEL_TGT_COMPOSE: @@ -1844,27 +1845,27 @@ } /* - * For SEQ_TB buffers, crop height should be less than the height of + * For SEQ_XX buffers, crop height should be less than the height of * the field height, not the buffer height */ - if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) - height = q_data->height / 2; + if (q_data->flags & Q_IS_SEQ_XX) + height = pix->height / 2; else - height = q_data->height; + height = pix->height; if (s->r.top < 0 || s->r.left < 0) { vpe_err(ctx->dev, "negative values for top and left\n"); s->r.top = s->r.left = 0; } - v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1, + v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1, &s->r.height, MIN_H, height, H_ALIGN, S_ALIGN); /* adjust left/top if cropping rectangle is out of bounds */ - if (s->r.left + s->r.width > q_data->width) - s->r.left = q_data->width - s->r.width; - if (s->r.top + s->r.height > q_data->height) - s->r.top = q_data->height - s->r.height; + if (s->r.left + s->r.width > pix->width) + s->r.left = pix->width - s->r.width; + if (s->r.top + s->r.height > pix->height) + s->r.top = pix->height - s->r.height; return 0; } @@ -1872,8 +1873,9 @@ static int vpe_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { - struct vpe_ctx *ctx = file2ctx(file); + struct vpe_ctx *ctx = file->private_data; struct vpe_q_data *q_data; + struct v4l2_pix_format_mplane *pix; bool use_c_rect = false; if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && @@ -1883,6 +1885,8 @@ q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; + + pix = &q_data->format.fmt.pix_mp; switch (s->target) { case V4L2_SEL_TGT_COMPOSE_DEFAULT: @@ -1922,8 +1926,8 @@ */ s->r.left = 0; s->r.top = 0; - s->r.width = q_data->width; - s->r.height = q_data->height; + s->r.width = pix->width; + s->r.height = pix->height; } return 0; @@ -1933,7 +1937,7 @@ static int vpe_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { - struct vpe_ctx *ctx = file2ctx(file); + struct vpe_ctx *ctx = file->private_data; struct vpe_q_data *q_data; struct v4l2_selection sel = *s; int ret; @@ -1991,12 +1995,12 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = { .vidioc_querycap = vpe_querycap, - .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt, + .vidioc_enum_fmt_vid_cap = vpe_enum_fmt, .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt, .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt, .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt, - .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt, + .vidioc_enum_fmt_vid_out = vpe_enum_fmt, .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt, .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt, .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt, @@ -2026,17 +2030,21 @@ int i; struct vpe_ctx *ctx = vb2_get_drv_priv(vq); struct vpe_q_data *q_data; + struct v4l2_pix_format_mplane *pix; q_data = get_q_data(ctx, vq->type); + if (!q_data) + return -EINVAL; - *nplanes = q_data->nplanes; + pix = &q_data->format.fmt.pix_mp; + *nplanes = pix->num_planes; for (i = 0; i < *nplanes; i++) - sizes[i] = q_data->sizeimage[i]; + sizes[i] = pix->plane_fmt[i].sizeimage; vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, sizes[VPE_LUMA]); - if (q_data->nplanes == 2) + if (*nplanes == 2) vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); return 0; @@ -2047,12 +2055,16 @@ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vpe_q_data *q_data; - int i, num_planes; + struct v4l2_pix_format_mplane *pix; + int i; vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); q_data = get_q_data(ctx, vb->vb2_queue->type); - num_planes = q_data->nplanes; + if (!q_data) + return -EINVAL; + + pix = &q_data->format.fmt.pix_mp; if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (!(q_data->flags & Q_IS_INTERLACED)) { @@ -2060,23 +2072,24 @@ } else { if (vbuf->field != V4L2_FIELD_TOP && vbuf->field != V4L2_FIELD_BOTTOM && - vbuf->field != V4L2_FIELD_SEQ_TB) + vbuf->field != V4L2_FIELD_SEQ_TB && + vbuf->field != V4L2_FIELD_SEQ_BT) return -EINVAL; } } - for (i = 0; i < num_planes; i++) { - if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { + for (i = 0; i < pix->num_planes; i++) { + if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) { vpe_err(ctx->dev, "data will not fit into plane (%lu < %lu)\n", vb2_plane_size(vb, i), - (long) q_data->sizeimage[i]); + (long)pix->plane_fmt[i].sizeimage); return -EINVAL; } } - for (i = 0; i < num_planes; i++) - vb2_set_plane_payload(vb, i, q_data->sizeimage[i]); + for (i = 0; i < pix->num_planes; i++) + vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage); return 0; } @@ -2261,6 +2274,7 @@ struct vpe_q_data *s_q_data; struct v4l2_ctrl_handler *hdl; struct vpe_ctx *ctx; + struct v4l2_pix_format_mplane *pix; int ret; vpe_dbg(dev, "vpe_open\n"); @@ -2296,7 +2310,7 @@ init_adb_hdrs(ctx); v4l2_fh_init(&ctx->fh, video_devdata(file)); - file->private_data = &ctx->fh; + file->private_data = ctx; hdl = &ctx->hdl; v4l2_ctrl_handler_init(hdl, 1); @@ -2309,23 +2323,32 @@ v4l2_ctrl_handler_setup(hdl); s_q_data = &ctx->q_data[Q_DATA_SRC]; + pix = &s_q_data->format.fmt.pix_mp; s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV); - s_q_data->width = 1920; - s_q_data->height = 1080; - s_q_data->nplanes = 1; - s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width * + pix->pixelformat = s_q_data->fmt->fourcc; + s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + pix->width = 1920; + pix->height = 1080; + pix->num_planes = 1; + pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width * s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; - s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] * - s_q_data->height); - s_q_data->colorspace = V4L2_COLORSPACE_REC709; - s_q_data->field = V4L2_FIELD_NONE; + pix->plane_fmt[VPE_LUMA].sizeimage = + pix->plane_fmt[VPE_LUMA].bytesperline * + pix->height; + pix->colorspace = V4L2_COLORSPACE_REC709; + pix->xfer_func = V4L2_XFER_FUNC_DEFAULT; + pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + pix->quantization = V4L2_QUANTIZATION_DEFAULT; + pix->field = V4L2_FIELD_NONE; s_q_data->c_rect.left = 0; s_q_data->c_rect.top = 0; - s_q_data->c_rect.width = s_q_data->width; - s_q_data->c_rect.height = s_q_data->height; + s_q_data->c_rect.width = pix->width; + s_q_data->c_rect.height = pix->height; s_q_data->flags = 0; ctx->q_data[Q_DATA_DST] = *s_q_data; + ctx->q_data[Q_DATA_DST].format.type = + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; set_dei_shadow_registers(ctx); set_src_registers(ctx); @@ -2381,7 +2404,7 @@ static int vpe_release(struct file *file) { struct vpe_dev *dev = video_drvdata(file); - struct vpe_ctx *ctx = file2ctx(file); + struct vpe_ctx *ctx = file->private_data; vpe_dbg(dev, "releasing instance %p\n", ctx); @@ -2435,6 +2458,7 @@ .minor = -1, .release = video_device_release_empty, .vfl_dir = VFL_DIR_M2M, + .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING, }; static const struct v4l2_m2m_ops m2m_ops = { @@ -2478,7 +2502,7 @@ vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { vpe_err(dev, "Failed to register video device\n"); @@ -2501,6 +2525,13 @@ struct vpe_dev *dev; int ret, irq, func; + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, + "32-bit consistent DMA enable failed\n"); + return ret; + } + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -2515,7 +2546,12 @@ mutex_init(&dev->dev_mutex); dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "vpe_top"); + "vpe_top"); + if (!dev->res) { + dev_err(&pdev->dev, "missing 'vpe_top' resources data\n"); + return -ENODEV; + } + /* * HACK: we get resource info from device tree in the form of a list of * VPE sub blocks, the driver currently uses only the base of vpe_top @@ -2610,7 +2646,7 @@ #if defined(CONFIG_OF) static const struct of_device_id vpe_of_match[] = { { - .compatible = "ti,vpe", + .compatible = "ti,dra7-vpe", }, {}, }; -- Gitblit v1.6.2