.. | .. |
---|
20 | 20 | #include <media/videobuf2-vmalloc.h> |
---|
21 | 21 | |
---|
22 | 22 | #include "uvc.h" |
---|
| 23 | +#include "u_uvc.h" |
---|
23 | 24 | |
---|
24 | 25 | /* ------------------------------------------------------------------------ |
---|
25 | 26 | * Video buffers queue management. |
---|
.. | .. |
---|
43 | 44 | { |
---|
44 | 45 | struct uvc_video_queue *queue = vb2_get_drv_priv(vq); |
---|
45 | 46 | struct uvc_video *video = container_of(queue, struct uvc_video, queue); |
---|
| 47 | +#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) |
---|
| 48 | + struct uvc_device *uvc = container_of(video, struct uvc_device, video); |
---|
| 49 | + struct f_uvc_opts *opts = fi_to_f_uvc_opts(uvc->func.fi); |
---|
| 50 | +#endif |
---|
| 51 | + unsigned int req_size; |
---|
| 52 | + unsigned int nreq; |
---|
46 | 53 | |
---|
47 | 54 | if (*nbuffers > UVC_MAX_VIDEO_BUFFERS) |
---|
48 | 55 | *nbuffers = UVC_MAX_VIDEO_BUFFERS; |
---|
.. | .. |
---|
51 | 58 | |
---|
52 | 59 | sizes[0] = video->imagesize; |
---|
53 | 60 | |
---|
| 61 | +#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) |
---|
| 62 | + if (opts && opts->uvc_num_request > 0) { |
---|
| 63 | + video->uvc_num_requests = opts->uvc_num_request; |
---|
| 64 | + return 0; |
---|
| 65 | + } |
---|
| 66 | +#endif |
---|
| 67 | + |
---|
| 68 | + req_size = video->ep->maxpacket |
---|
| 69 | + * max_t(unsigned int, video->ep->maxburst, 1) |
---|
| 70 | + * (video->ep->mult); |
---|
| 71 | + |
---|
| 72 | + /* We divide by two, to increase the chance to run |
---|
| 73 | + * into fewer requests for smaller framesizes. |
---|
| 74 | + */ |
---|
| 75 | + nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size); |
---|
| 76 | + nreq = clamp(nreq, 4U, 64U); |
---|
| 77 | + video->uvc_num_requests = nreq; |
---|
| 78 | + |
---|
54 | 79 | return 0; |
---|
55 | 80 | } |
---|
| 81 | + |
---|
| 82 | +#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) |
---|
| 83 | +/* |
---|
| 84 | + * uvc_dma_buf_phys_to_virt - Get the physical address of the dma_buf and |
---|
| 85 | + * translate it to virtual address. |
---|
| 86 | + * |
---|
| 87 | + * @dbuf: the dma_buf of vb2_plane |
---|
| 88 | + * @dev: the device to the actual usb controller |
---|
| 89 | + * |
---|
| 90 | + * This function is used for dma buf allocated by Contiguous Memory Allocator. |
---|
| 91 | + * |
---|
| 92 | + * Returns: |
---|
| 93 | + * The virtual addresses of the dma_buf. |
---|
| 94 | + */ |
---|
| 95 | +static void *uvc_dma_buf_phys_to_virt(struct uvc_device *uvc, |
---|
| 96 | + struct dma_buf *dbuf) |
---|
| 97 | +{ |
---|
| 98 | + struct usb_gadget *gadget = uvc->func.config->cdev->gadget; |
---|
| 99 | + struct dma_buf_attachment *attachment; |
---|
| 100 | + struct sg_table *table; |
---|
| 101 | + struct scatterlist *sgl; |
---|
| 102 | + dma_addr_t phys = 0; |
---|
| 103 | + int i; |
---|
| 104 | + |
---|
| 105 | + attachment = dma_buf_attach(dbuf, gadget->dev.parent); |
---|
| 106 | + if (IS_ERR(attachment)) |
---|
| 107 | + return ERR_PTR(-ENOMEM); |
---|
| 108 | + |
---|
| 109 | + table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); |
---|
| 110 | + if (IS_ERR(table)) { |
---|
| 111 | + dma_buf_detach(dbuf, attachment); |
---|
| 112 | + return ERR_PTR(-ENOMEM); |
---|
| 113 | + } |
---|
| 114 | + |
---|
| 115 | + for_each_sgtable_sg(table, sgl, i) |
---|
| 116 | + phys = sg_phys(sgl); |
---|
| 117 | + |
---|
| 118 | + dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL); |
---|
| 119 | + dma_buf_detach(dbuf, attachment); |
---|
| 120 | + |
---|
| 121 | + if (i > 1) { |
---|
| 122 | + uvcg_err(&uvc->func, "Not support mult sgl for uvc zero copy\n"); |
---|
| 123 | + return ERR_PTR(-ENOMEM); |
---|
| 124 | + } |
---|
| 125 | + |
---|
| 126 | + return phys_to_virt(phys); |
---|
| 127 | +} |
---|
| 128 | + |
---|
| 129 | +static void *uvc_buffer_mem_prepare(struct vb2_buffer *vb, |
---|
| 130 | + struct uvc_video_queue *queue) |
---|
| 131 | +{ |
---|
| 132 | + struct uvc_video *video = container_of(queue, struct uvc_video, queue); |
---|
| 133 | + struct uvc_device *uvc = container_of(video, struct uvc_device, video); |
---|
| 134 | + struct f_uvc_opts *opts = fi_to_f_uvc_opts(uvc->func.fi); |
---|
| 135 | + void *mem; |
---|
| 136 | + |
---|
| 137 | + if (!opts->uvc_zero_copy || video->fcc == V4L2_PIX_FMT_YUYV) |
---|
| 138 | + return (vb2_plane_vaddr(vb, 0) + vb2_plane_data_offset(vb, 0)); |
---|
| 139 | + |
---|
| 140 | + mem = uvc_dma_buf_phys_to_virt(uvc, vb->planes[0].dbuf); |
---|
| 141 | + if (IS_ERR(mem)) |
---|
| 142 | + return ERR_PTR(-ENOMEM); |
---|
| 143 | + |
---|
| 144 | + return (mem + vb2_plane_data_offset(vb, 0)); |
---|
| 145 | +} |
---|
| 146 | +#endif |
---|
56 | 147 | |
---|
57 | 148 | static int uvc_buffer_prepare(struct vb2_buffer *vb) |
---|
58 | 149 | { |
---|
.. | .. |
---|
70 | 161 | return -ENODEV; |
---|
71 | 162 | |
---|
72 | 163 | buf->state = UVC_BUF_STATE_QUEUED; |
---|
| 164 | +#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) |
---|
| 165 | + buf->mem = uvc_buffer_mem_prepare(vb, queue); |
---|
| 166 | + if (IS_ERR(buf->mem)) |
---|
| 167 | + return -ENOMEM; |
---|
| 168 | +#else |
---|
73 | 169 | buf->mem = vb2_plane_vaddr(vb, 0); |
---|
| 170 | +#endif |
---|
74 | 171 | buf->length = vb2_plane_size(vb, 0); |
---|
75 | 172 | if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) |
---|
76 | 173 | buf->bytesused = 0; |
---|
.. | .. |
---|
102 | 199 | spin_unlock_irqrestore(&queue->irqlock, flags); |
---|
103 | 200 | } |
---|
104 | 201 | |
---|
105 | | -static struct vb2_ops uvc_queue_qops = { |
---|
| 202 | +static const struct vb2_ops uvc_queue_qops = { |
---|
106 | 203 | .queue_setup = uvc_queue_setup, |
---|
107 | 204 | .buf_prepare = uvc_buffer_prepare, |
---|
108 | 205 | .buf_queue = uvc_buffer_queue, |
---|
.. | .. |
---|
171 | 268 | |
---|
172 | 269 | int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) |
---|
173 | 270 | { |
---|
174 | | - unsigned long flags; |
---|
175 | | - int ret; |
---|
176 | | - |
---|
177 | | - ret = vb2_qbuf(&queue->queue, buf); |
---|
178 | | - if (ret < 0) |
---|
179 | | - return ret; |
---|
180 | | - |
---|
181 | | - spin_lock_irqsave(&queue->irqlock, flags); |
---|
182 | | - ret = (queue->flags & UVC_QUEUE_PAUSED) != 0; |
---|
183 | | - queue->flags &= ~UVC_QUEUE_PAUSED; |
---|
184 | | - spin_unlock_irqrestore(&queue->irqlock, flags); |
---|
185 | | - return ret; |
---|
| 271 | + return vb2_qbuf(&queue->queue, NULL, buf); |
---|
186 | 272 | } |
---|
187 | 273 | |
---|
188 | 274 | /* |
---|
.. | .. |
---|
250 | 336 | buf->state = UVC_BUF_STATE_ERROR; |
---|
251 | 337 | vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); |
---|
252 | 338 | } |
---|
| 339 | + queue->buf_used = 0; |
---|
| 340 | + |
---|
253 | 341 | /* This must be protected by the irqlock spinlock to avoid race |
---|
254 | 342 | * conditions between uvc_queue_buffer and the disconnection event that |
---|
255 | 343 | * could result in an interruptible wait in uvc_dequeue_buffer. Do not |
---|
.. | .. |
---|
348 | 436 | if (!list_empty(&queue->irqqueue)) |
---|
349 | 437 | buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, |
---|
350 | 438 | queue); |
---|
351 | | - else |
---|
352 | | - queue->flags |= UVC_QUEUE_PAUSED; |
---|
353 | 439 | |
---|
354 | 440 | return buf; |
---|
355 | 441 | } |
---|