FFmpeg  4.3.7
v4l2_buffers.c
Go to the documentation of this file.
1 /*
2  * V4L2 buffer helper functions.
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <poll.h>
30 #include "libavcodec/avcodec.h"
31 #include "libavcodec/internal.h"
32 #include "libavutil/pixdesc.h"
33 #include "v4l2_context.h"
34 #include "v4l2_buffers.h"
35 #include "v4l2_m2m.h"
36 
37 #define USEC_PER_SEC 1000000
39 
41 {
42  return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
43  container_of(buf->context, V4L2m2mContext, output) :
44  container_of(buf->context, V4L2m2mContext, capture);
45 }
46 
47 static inline AVCodecContext *logger(V4L2Buffer *buf)
48 {
49  return buf_to_m2mctx(buf)->avctx;
50 }
51 
53 {
54  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
55 
56  if (s->avctx->pkt_timebase.num)
57  return s->avctx->pkt_timebase;
58  return s->avctx->time_base;
59 }
60 
61 static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
62 {
63  int64_t v4l2_pts;
64 
65  if (pts == AV_NOPTS_VALUE)
66  pts = 0;
67 
68  /* convert pts to v4l2 timebase */
69  v4l2_pts = av_rescale_q(pts, v4l2_get_timebase(out), v4l2_timebase);
70  out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
71  out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
72 }
73 
74 static inline int64_t v4l2_get_pts(V4L2Buffer *avbuf)
75 {
76  int64_t v4l2_pts;
77 
78  /* convert pts back to encoder timebase */
79  v4l2_pts = (int64_t)avbuf->buf.timestamp.tv_sec * USEC_PER_SEC +
80  avbuf->buf.timestamp.tv_usec;
81 
82  return av_rescale_q(v4l2_pts, v4l2_timebase, v4l2_get_timebase(avbuf));
83 }
84 
86 {
87  enum v4l2_ycbcr_encoding ycbcr;
88  enum v4l2_colorspace cs;
89 
90  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
91  buf->context->format.fmt.pix_mp.colorspace :
92  buf->context->format.fmt.pix.colorspace;
93 
94  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
95  buf->context->format.fmt.pix_mp.ycbcr_enc:
96  buf->context->format.fmt.pix.ycbcr_enc;
97 
98  switch(ycbcr) {
99  case V4L2_YCBCR_ENC_XV709:
100  case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
101  case V4L2_YCBCR_ENC_XV601:
102  case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
103  default:
104  break;
105  }
106 
107  switch(cs) {
108  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
109  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
110  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
111  case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
112  default:
113  break;
114  }
115 
116  return AVCOL_PRI_UNSPECIFIED;
117 }
118 
120 {
121  enum v4l2_quantization qt;
122 
123  qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
124  buf->context->format.fmt.pix_mp.quantization :
125  buf->context->format.fmt.pix.quantization;
126 
127  switch (qt) {
128  case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
129  case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
130  default:
131  break;
132  }
133 
135 }
136 
138 {
139  enum v4l2_ycbcr_encoding ycbcr;
140  enum v4l2_colorspace cs;
141 
142  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
143  buf->context->format.fmt.pix_mp.colorspace :
144  buf->context->format.fmt.pix.colorspace;
145 
146  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
147  buf->context->format.fmt.pix_mp.ycbcr_enc:
148  buf->context->format.fmt.pix.ycbcr_enc;
149 
150  switch(cs) {
151  case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
152  case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
153  case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
154  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
155  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
156  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
157  case V4L2_COLORSPACE_BT2020:
158  if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
159  return AVCOL_SPC_BT2020_CL;
160  else
161  return AVCOL_SPC_BT2020_NCL;
162  default:
163  break;
164  }
165 
166  return AVCOL_SPC_UNSPECIFIED;
167 }
168 
170 {
171  enum v4l2_ycbcr_encoding ycbcr;
172  enum v4l2_xfer_func xfer;
173  enum v4l2_colorspace cs;
174 
175  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
176  buf->context->format.fmt.pix_mp.colorspace :
177  buf->context->format.fmt.pix.colorspace;
178 
179  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
180  buf->context->format.fmt.pix_mp.ycbcr_enc:
181  buf->context->format.fmt.pix.ycbcr_enc;
182 
183  xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
184  buf->context->format.fmt.pix_mp.xfer_func:
185  buf->context->format.fmt.pix.xfer_func;
186 
187  switch (xfer) {
188  case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
189  case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
190  default:
191  break;
192  }
193 
194  switch (cs) {
195  case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
196  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
197  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
198  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
199  default:
200  break;
201  }
202 
203  switch (ycbcr) {
204  case V4L2_YCBCR_ENC_XV709:
205  case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
206  default:
207  break;
208  }
209 
210  return AVCOL_TRC_UNSPECIFIED;
211 }
212 
213 static void v4l2_free_buffer(void *opaque, uint8_t *unused)
214 {
215  V4L2Buffer* avbuf = opaque;
216  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
217 
218  if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
219  atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
220 
221  if (s->reinit) {
222  if (!atomic_load(&s->refcount))
223  sem_post(&s->refsync);
224  } else {
225  if (s->draining && V4L2_TYPE_IS_OUTPUT(avbuf->context->type)) {
226  /* no need to queue more buffers to the driver */
227  avbuf->status = V4L2BUF_AVAILABLE;
228  }
229  else if (avbuf->context->streamon)
230  ff_v4l2_buffer_enqueue(avbuf);
231  }
232 
233  av_buffer_unref(&avbuf->context_ref);
234  }
235 }
236 
238 {
240 
241  if (in->context_ref)
243  else {
245  if (!in->context_ref)
246  return AVERROR(ENOMEM);
247 
248  in->context_refcount = 1;
249  }
250 
251  in->status = V4L2BUF_RET_USER;
252  atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
253 
254  return 0;
255 }
256 
257 static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
258 {
259  int ret;
260 
261  if (plane >= in->num_planes)
262  return AVERROR(EINVAL);
263 
264  /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
265  *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
266  in->plane_info[plane].length, v4l2_free_buffer, in, 0);
267  if (!*buf)
268  return AVERROR(ENOMEM);
269 
270  ret = v4l2_buf_increase_ref(in);
271  if (ret)
272  av_buffer_unref(buf);
273 
274  return ret;
275 }
276 
277 static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, int offset, AVBufferRef* bref)
278 {
279  unsigned int bytesused, length;
280 
281  if (plane >= out->num_planes)
282  return AVERROR(EINVAL);
283 
284  length = out->plane_info[plane].length;
285  bytesused = FFMIN(size+offset, length);
286 
287  memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, FFMIN(size, length-offset));
288 
289  if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
290  out->planes[plane].bytesused = bytesused;
291  out->planes[plane].length = length;
292  } else {
293  out->buf.bytesused = bytesused;
294  out->buf.length = length;
295  }
296 
297  return 0;
298 }
299 
301 {
302  int i, ret;
303 
304  frame->format = avbuf->context->av_pix_fmt;
305 
306  for (i = 0; i < avbuf->num_planes; i++) {
307  ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
308  if (ret)
309  return ret;
310 
311  frame->linesize[i] = avbuf->plane_info[i].bytesperline;
312  frame->data[i] = frame->buf[i]->data;
313  }
314 
315  /* fixup special cases */
316  switch (avbuf->context->av_pix_fmt) {
317  case AV_PIX_FMT_NV12:
318  case AV_PIX_FMT_NV21:
319  if (avbuf->num_planes > 1)
320  break;
321  frame->linesize[1] = avbuf->plane_info[0].bytesperline;
322  frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
323  break;
324 
325  case AV_PIX_FMT_YUV420P:
326  if (avbuf->num_planes > 1)
327  break;
328  frame->linesize[1] = avbuf->plane_info[0].bytesperline >> 1;
329  frame->linesize[2] = avbuf->plane_info[0].bytesperline >> 1;
330  frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
331  frame->data[2] = frame->data[1] + ((avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height) >> 2);
332  break;
333 
334  default:
335  break;
336  }
337 
338  return 0;
339 }
340 
342 {
343  int i, ret;
344  struct v4l2_format fmt = out->context->format;
345  int pixel_format = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
346  fmt.fmt.pix_mp.pixelformat : fmt.fmt.pix.pixelformat;
347  int height = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
348  fmt.fmt.pix_mp.height : fmt.fmt.pix.height;
349  int is_planar_format = 0;
350 
351  switch (pixel_format) {
352  case V4L2_PIX_FMT_YUV420M:
353  case V4L2_PIX_FMT_YVU420M:
354 #ifdef V4L2_PIX_FMT_YUV422M
355  case V4L2_PIX_FMT_YUV422M:
356 #endif
357 #ifdef V4L2_PIX_FMT_YVU422M
358  case V4L2_PIX_FMT_YVU422M:
359 #endif
360 #ifdef V4L2_PIX_FMT_YUV444M
361  case V4L2_PIX_FMT_YUV444M:
362 #endif
363 #ifdef V4L2_PIX_FMT_YVU444M
364  case V4L2_PIX_FMT_YVU444M:
365 #endif
366  case V4L2_PIX_FMT_NV12M:
367  case V4L2_PIX_FMT_NV21M:
368  case V4L2_PIX_FMT_NV12MT_16X16:
369  case V4L2_PIX_FMT_NV12MT:
370  case V4L2_PIX_FMT_NV16M:
371  case V4L2_PIX_FMT_NV61M:
372  is_planar_format = 1;
373  }
374 
375  if (!is_planar_format) {
377  int planes_nb = 0;
378  int offset = 0;
379 
380  for (i = 0; i < desc->nb_components; i++)
381  planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
382 
383  for (i = 0; i < planes_nb; i++) {
384  int size, h = height;
385  if (i == 1 || i == 2) {
386  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
387  }
388  size = frame->linesize[i] * h;
389  ret = v4l2_bufref_to_buf(out, 0, frame->data[i], size, offset, frame->buf[i]);
390  if (ret)
391  return ret;
392  offset += size;
393  }
394  return 0;
395  }
396 
397  for (i = 0; i < out->num_planes; i++) {
398  ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, 0, frame->buf[i]);
399  if (ret)
400  return ret;
401  }
402 
403  return 0;
404 }
405 
406 /******************************************************************************
407  *
408  * V4L2Buffer interface
409  *
410  ******************************************************************************/
411 
413 {
414  v4l2_set_pts(out, frame->pts);
415 
416  return v4l2_buffer_swframe_to_buf(frame, out);
417 }
418 
420 {
421  int ret;
422 
423  av_frame_unref(frame);
424 
425  /* 1. get references to the actual data */
426  ret = v4l2_buffer_buf_to_swframe(frame, avbuf);
427  if (ret)
428  return ret;
429 
430  /* 2. get frame information */
431  frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
433  frame->colorspace = v4l2_get_color_space(avbuf);
434  frame->color_range = v4l2_get_color_range(avbuf);
435  frame->color_trc = v4l2_get_color_trc(avbuf);
436  frame->pts = v4l2_get_pts(avbuf);
437  frame->pkt_dts = AV_NOPTS_VALUE;
438 
439  /* these values are updated also during re-init in v4l2_process_driver_event */
440  frame->height = avbuf->context->height;
441  frame->width = avbuf->context->width;
443 
444  /* 3. report errors upstream */
445  if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
446  av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
448  }
449 
450  return 0;
451 }
452 
454 {
455  int ret;
456 
457  av_packet_unref(pkt);
458  ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
459  if (ret)
460  return ret;
461 
462  pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
463  pkt->data = pkt->buf->data;
464 
465  if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
466  pkt->flags |= AV_PKT_FLAG_KEY;
467 
468  if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
469  av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
470  pkt->flags |= AV_PKT_FLAG_CORRUPT;
471  }
472 
473  pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
474 
475  return 0;
476 }
477 
479 {
480  int ret;
481 
482  ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, 0, pkt->buf);
483  if (ret)
484  return ret;
485 
486  v4l2_set_pts(out, pkt->pts);
487 
488  if (pkt->flags & AV_PKT_FLAG_KEY)
489  out->flags = V4L2_BUF_FLAG_KEYFRAME;
490 
491  return 0;
492 }
493 
495 {
496  V4L2Context *ctx = avbuf->context;
497  int ret, i;
498 
499  avbuf->buf.memory = V4L2_MEMORY_MMAP;
500  avbuf->buf.type = ctx->type;
501  avbuf->buf.index = index;
502 
503  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
504  avbuf->buf.length = VIDEO_MAX_PLANES;
505  avbuf->buf.m.planes = avbuf->planes;
506  }
507 
508  ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
509  if (ret < 0)
510  return AVERROR(errno);
511 
512  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
513  avbuf->num_planes = 0;
514  /* in MP, the V4L2 API states that buf.length means num_planes */
515  for (i = 0; i < avbuf->buf.length; i++) {
516  if (avbuf->buf.m.planes[i].length)
517  avbuf->num_planes++;
518  }
519  } else
520  avbuf->num_planes = 1;
521 
522  for (i = 0; i < avbuf->num_planes; i++) {
523 
524  avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
525  ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
526  ctx->format.fmt.pix.bytesperline;
527 
528  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
529  avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
530  avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
531  PROT_READ | PROT_WRITE, MAP_SHARED,
532  buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
533  } else {
534  avbuf->plane_info[i].length = avbuf->buf.length;
535  avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
536  PROT_READ | PROT_WRITE, MAP_SHARED,
537  buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
538  }
539 
540  if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
541  return AVERROR(ENOMEM);
542  }
543 
544  avbuf->status = V4L2BUF_AVAILABLE;
545 
546  if (V4L2_TYPE_IS_OUTPUT(ctx->type))
547  return 0;
548 
549  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
550  avbuf->buf.m.planes = avbuf->planes;
551  avbuf->buf.length = avbuf->num_planes;
552 
553  } else {
554  avbuf->buf.bytesused = avbuf->planes[0].bytesused;
555  avbuf->buf.length = avbuf->planes[0].length;
556  }
557 
558  return ff_v4l2_buffer_enqueue(avbuf);
559 }
560 
562 {
563  int ret;
564 
565  avbuf->buf.flags = avbuf->flags;
566 
567  ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
568  if (ret < 0)
569  return AVERROR(errno);
570 
571  avbuf->status = V4L2BUF_IN_DRIVER;
572 
573  return 0;
574 }
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
static AVCodecContext * logger(V4L2Buffer *buf)
Definition: v4l2_buffers.c:47
static AVRational v4l2_get_timebase(V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:52
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
const char * name
context name.
Definition: v4l2_context.h:40
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
Definition: v4l2_buffers.c:169
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
Definition: v4l2_buffers.c:453
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
Definition: v4l2_buffers.c:494
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
const char * desc
Definition: nvenc.c:79
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:515
int num
Numerator.
Definition: rational.h:59
int size
Definition: packet.h:356
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
int width
Width and height of the frames it produces (in case of a capture context, e.g.
Definition: v4l2_context.h:71
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
Definition: v4l2_buffers.c:412
static AVPacket pkt
enum V4L2Buffer_status status
Definition: v4l2_buffers.h:64
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:510
static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
Definition: v4l2_buffers.c:137
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:480
functionally identical to above
Definition: pixfmt.h:517
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
#define USEC_PER_SEC
Definition: v4l2_buffers.c:37
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:509
static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
Definition: v4l2_buffers.c:85
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:485
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
static AVFrame * frame
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
Definition: v4l2_buffers.c:561
const char data[16]
Definition: mxf.c:91
#define height
uint8_t * data
Definition: packet.h:355
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:532
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:455
#define sem_post(psem)
Definition: semaphore.h:26
#define av_log(a,...)
struct V4L2Context * context
Definition: v4l2_buffers.h:43
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
Definition: v4l2_buffers.c:119
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:460
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ITU-R BT1361 Extended Colour Gamut.
Definition: pixfmt.h:493
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Definition: v4l2_buffers.c:341
#define atomic_load(object)
Definition: stdatomic.h:93
#define AVERROR(e)
Definition: error.h:43
static int v4l2_buf_increase_ref(V4L2Buffer *in)
Definition: v4l2_buffers.c:237
struct v4l2_buffer buf
Definition: v4l2_buffers.h:60
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:544
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
Definition: v4l2_buffers.c:419
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:338
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:555
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:457
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
#define atomic_fetch_sub(object, operand)
Definition: stdatomic.h:137
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:53
static const uint8_t offset[127][2]
Definition: vf_spp.c:93
#define FFMAX(a, b)
Definition: common.h:94
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:87
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef * context_ref
Definition: v4l2_buffers.h:47
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:520
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:462
AVFormatContext * ctx
Definition: movenc.c:48
struct v4l2_plane planes[VIDEO_MAX_PLANES]
Definition: v4l2_buffers.h:61
#define s(width, name)
Definition: cbs_vp9.c:257
static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t *data, int size, int offset, AVBufferRef *bref)
Definition: v4l2_buffers.c:277
static int64_t v4l2_get_pts(V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:74
static AVRational v4l2_timebase
Definition: v4l2_buffers.c:38
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:514
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:65
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:373
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
also ITU-R BT1361
Definition: pixfmt.h:482
AVRational sample_aspect_ratio
Definition: v4l2_context.h:72
atomic_uint context_refcount
Definition: v4l2_buffers.h:48
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:487
Libavcodec external API header.
#define atomic_fetch_sub_explicit(object, operand, order)
Definition: stdatomic.h:152
functionally identical to above
Definition: pixfmt.h:464
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
atomic_uint refcount
Definition: v4l2_m2m.h:54
main external API structure.
Definition: avcodec.h:526
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:388
V4L2Buffer (wrapper for v4l2_buffer management)
Definition: v4l2_buffers.h:41
static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
Definition: v4l2_buffers.c:257
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
Definition: v4l2_buffers.c:478
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int index
Definition: gxfenc.c:89
#define container_of(ptr, type, member)
Definition: v4l2_m2m.h:35
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define FF_DECODE_ERROR_INVALID_BITSTREAM
Definition: frame.h:596
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:595
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
static int64_t pts
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
ITU-R BT2020 constant luminance system.
Definition: pixfmt.h:521
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:409
A reference to a data buffer.
Definition: buffer.h:81
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:494
common internal api header.
also ITU-R BT470BG
Definition: pixfmt.h:486
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AVBufferRef * self_ref
Definition: v4l2_m2m.h:62
static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:300
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:389
int num_planes
Definition: v4l2_buffers.h:57
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
enum AVColorPrimaries color_primaries
Definition: frame.h:546
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:354
static V4L2m2mContext * buf_to_m2mctx(V4L2Buffer *buf)
Definition: v4l2_buffers.c:40
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:463
ITU-R BT2020.
Definition: pixfmt.h:466
int height
Definition: frame.h:358
FILE * out
Definition: movenc.c:54
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:548
sem_t refsync
Definition: v4l2_m2m.h:53
This structure stores compressed data.
Definition: packet.h:332
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
static void v4l2_free_buffer(void *opaque, uint8_t *unused)
Definition: v4l2_buffers.c:213
static void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
Definition: v4l2_buffers.c:61
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
enum v4l2_buf_type type
Type of this buffer context.
Definition: v4l2_context.h:47
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58