FFmpeg  4.3.7
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 #include "hwcontext.h"
29 
30 #if FF_API_FRAME_GET_SET
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
33 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
34 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
38 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
39 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
40 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
42 #endif
43 
44 #define CHECK_CHANNELS_CONSISTENCY(frame) \
45  av_assert2(!(frame)->channel_layout || \
46  (frame)->channels == \
47  av_get_channel_layout_nb_channels((frame)->channel_layout))
48 
49 #if FF_API_FRAME_QP
50 struct qp_properties {
51  int stride;
52  int type;
53 };
54 
55 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
56 {
57  struct qp_properties *p;
58  AVFrameSideData *sd;
60 
63 
64  f->qp_table_buf = buf;
65  f->qscale_table = buf->data;
66  f->qstride = stride;
67  f->qscale_type = qp_type;
69 
72 
73  ref = av_buffer_ref(buf);
75  av_buffer_unref(&ref);
76  return AVERROR(ENOMEM);
77  }
78 
80  sizeof(struct qp_properties));
81  if (!sd)
82  return AVERROR(ENOMEM);
83 
84  p = (struct qp_properties *)sd->data;
85  p->stride = stride;
86  p->type = qp_type;
87 
88  return 0;
89 }
90 
91 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
92 {
93  AVBufferRef *buf = NULL;
94 
95  *stride = 0;
96  *type = 0;
97 
99  if (f->qp_table_buf) {
100  *stride = f->qstride;
101  *type = f->qscale_type;
102  buf = f->qp_table_buf;
104  } else {
105  AVFrameSideData *sd;
106  struct qp_properties *p;
108  if (!sd)
109  return NULL;
110  p = (struct qp_properties *)sd->data;
112  if (!sd)
113  return NULL;
114  *stride = p->stride;
115  *type = p->type;
116  buf = sd->buf;
117  }
118 
119  return buf ? buf->data : NULL;
120 }
121 #endif
122 
124 {
125  static const char * const name[] = {
126  [AVCOL_SPC_RGB] = "GBR",
127  [AVCOL_SPC_BT709] = "bt709",
128  [AVCOL_SPC_FCC] = "fcc",
129  [AVCOL_SPC_BT470BG] = "bt470bg",
130  [AVCOL_SPC_SMPTE170M] = "smpte170m",
131  [AVCOL_SPC_SMPTE240M] = "smpte240m",
132  [AVCOL_SPC_YCOCG] = "YCgCo",
133  };
134  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
135  return NULL;
136  return name[val];
137 }
138 
140 {
141  if (frame->extended_data != frame->data)
142  av_freep(&frame->extended_data);
143 
144  memset(frame, 0, sizeof(*frame));
145 
146  frame->pts =
147  frame->pkt_dts = AV_NOPTS_VALUE;
148 #if FF_API_PKT_PTS
150  frame->pkt_pts = AV_NOPTS_VALUE;
152 #endif
154  frame->pkt_duration = 0;
155  frame->pkt_pos = -1;
156  frame->pkt_size = -1;
157  frame->key_frame = 1;
158  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
159  frame->format = -1; /* unknown */
160  frame->extended_data = frame->data;
166  frame->flags = 0;
167 }
168 
169 static void free_side_data(AVFrameSideData **ptr_sd)
170 {
171  AVFrameSideData *sd = *ptr_sd;
172 
173  av_buffer_unref(&sd->buf);
174  av_dict_free(&sd->metadata);
175  av_freep(ptr_sd);
176 }
177 
179 {
180  int i;
181 
182  for (i = 0; i < frame->nb_side_data; i++) {
183  free_side_data(&frame->side_data[i]);
184  }
185  frame->nb_side_data = 0;
186 
187  av_freep(&frame->side_data);
188 }
189 
191 {
192  AVFrame *frame = av_mallocz(sizeof(*frame));
193 
194  if (!frame)
195  return NULL;
196 
197  frame->extended_data = NULL;
198  get_frame_defaults(frame);
199 
200  return frame;
201 }
202 
204 {
205  if (!frame || !*frame)
206  return;
207 
208  av_frame_unref(*frame);
209  av_freep(frame);
210 }
211 
212 static int get_video_buffer(AVFrame *frame, int align)
213 {
215  int ret, i, padded_height;
216  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
217 
218  if (!desc)
219  return AVERROR(EINVAL);
220 
221  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
222  return ret;
223 
224  if (!frame->linesize[0]) {
225  if (align <= 0)
226  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
227 
228  for(i=1; i<=align; i+=i) {
229  ret = av_image_fill_linesizes(frame->linesize, frame->format,
230  FFALIGN(frame->width, i));
231  if (ret < 0)
232  return ret;
233  if (!(frame->linesize[0] & (align-1)))
234  break;
235  }
236 
237  for (i = 0; i < 4 && frame->linesize[i]; i++)
238  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
239  }
240 
241  padded_height = FFALIGN(frame->height, 32);
242  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
243  NULL, frame->linesize)) < 0)
244  return ret;
245 
246  frame->buf[0] = av_buffer_alloc(ret + 4*plane_padding);
247  if (!frame->buf[0]) {
248  ret = AVERROR(ENOMEM);
249  goto fail;
250  }
251 
252  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
253  frame->buf[0]->data, frame->linesize)) < 0)
254  goto fail;
255 
256  for (i = 1; i < 4; i++) {
257  if (frame->data[i])
258  frame->data[i] += i * plane_padding;
259  }
260 
261  frame->extended_data = frame->data;
262 
263  return 0;
264 fail:
265  av_frame_unref(frame);
266  return ret;
267 }
268 
269 static int get_audio_buffer(AVFrame *frame, int align)
270 {
271  int channels;
272  int planar = av_sample_fmt_is_planar(frame->format);
273  int planes;
274  int ret, i;
275 
276  if (!frame->channels)
278 
279  channels = frame->channels;
280  planes = planar ? channels : 1;
281 
283  if (!frame->linesize[0]) {
284  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
285  frame->nb_samples, frame->format,
286  align);
287  if (ret < 0)
288  return ret;
289  }
290 
291  if (planes > AV_NUM_DATA_POINTERS) {
292  frame->extended_data = av_mallocz_array(planes,
293  sizeof(*frame->extended_data));
295  sizeof(*frame->extended_buf));
296  if (!frame->extended_data || !frame->extended_buf) {
297  av_freep(&frame->extended_data);
298  av_freep(&frame->extended_buf);
299  return AVERROR(ENOMEM);
300  }
301  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
302  } else
303  frame->extended_data = frame->data;
304 
305  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
306  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
307  if (!frame->buf[i]) {
308  av_frame_unref(frame);
309  return AVERROR(ENOMEM);
310  }
311  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
312  }
313  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
314  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
315  if (!frame->extended_buf[i]) {
316  av_frame_unref(frame);
317  return AVERROR(ENOMEM);
318  }
319  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
320  }
321  return 0;
322 
323 }
324 
326 {
327  if (frame->format < 0)
328  return AVERROR(EINVAL);
329 
330  if (frame->width > 0 && frame->height > 0)
331  return get_video_buffer(frame, align);
332  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
333  return get_audio_buffer(frame, align);
334 
335  return AVERROR(EINVAL);
336 }
337 
338 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
339 {
340  int i;
341 
342  dst->key_frame = src->key_frame;
343  dst->pict_type = src->pict_type;
345  dst->crop_top = src->crop_top;
346  dst->crop_bottom = src->crop_bottom;
347  dst->crop_left = src->crop_left;
348  dst->crop_right = src->crop_right;
349  dst->pts = src->pts;
350  dst->repeat_pict = src->repeat_pict;
352  dst->top_field_first = src->top_field_first;
354  dst->sample_rate = src->sample_rate;
355  dst->opaque = src->opaque;
356 #if FF_API_PKT_PTS
358  dst->pkt_pts = src->pkt_pts;
360 #endif
361  dst->pkt_dts = src->pkt_dts;
362  dst->pkt_pos = src->pkt_pos;
363  dst->pkt_size = src->pkt_size;
364  dst->pkt_duration = src->pkt_duration;
366  dst->quality = src->quality;
370  dst->flags = src->flags;
372  dst->color_primaries = src->color_primaries;
373  dst->color_trc = src->color_trc;
374  dst->colorspace = src->colorspace;
375  dst->color_range = src->color_range;
376  dst->chroma_location = src->chroma_location;
377 
378  av_dict_copy(&dst->metadata, src->metadata, 0);
379 
380 #if FF_API_ERROR_FRAME
382  memcpy(dst->error, src->error, sizeof(dst->error));
384 #endif
385 
386  for (i = 0; i < src->nb_side_data; i++) {
387  const AVFrameSideData *sd_src = src->side_data[i];
388  AVFrameSideData *sd_dst;
389  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
390  && (src->width != dst->width || src->height != dst->height))
391  continue;
392  if (force_copy) {
393  sd_dst = av_frame_new_side_data(dst, sd_src->type,
394  sd_src->size);
395  if (!sd_dst) {
396  wipe_side_data(dst);
397  return AVERROR(ENOMEM);
398  }
399  memcpy(sd_dst->data, sd_src->data, sd_src->size);
400  } else {
401  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
402  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
403  if (!sd_dst) {
404  av_buffer_unref(&ref);
405  wipe_side_data(dst);
406  return AVERROR(ENOMEM);
407  }
408  }
409  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
410  }
411 
412 #if FF_API_FRAME_QP
414  dst->qscale_table = NULL;
415  dst->qstride = 0;
416  dst->qscale_type = 0;
418  if (src->qp_table_buf) {
420  if (dst->qp_table_buf) {
421  dst->qscale_table = dst->qp_table_buf->data;
422  dst->qstride = src->qstride;
423  dst->qscale_type = src->qscale_type;
424  }
425  }
427 #endif
428 
431  if (src->opaque_ref) {
432  dst->opaque_ref = av_buffer_ref(src->opaque_ref);
433  if (!dst->opaque_ref)
434  return AVERROR(ENOMEM);
435  }
436  if (src->private_ref) {
438  if (!dst->private_ref)
439  return AVERROR(ENOMEM);
440  }
441  return 0;
442 }
443 
444 int av_frame_ref(AVFrame *dst, const AVFrame *src)
445 {
446  int i, ret = 0;
447 
448  av_assert1(dst->width == 0 && dst->height == 0);
449  av_assert1(dst->channels == 0);
450 
451  dst->format = src->format;
452  dst->width = src->width;
453  dst->height = src->height;
454  dst->channels = src->channels;
455  dst->channel_layout = src->channel_layout;
456  dst->nb_samples = src->nb_samples;
457 
458  ret = frame_copy_props(dst, src, 0);
459  if (ret < 0)
460  return ret;
461 
462  /* duplicate the frame data if it's not refcounted */
463  if (!src->buf[0]) {
464  ret = av_frame_get_buffer(dst, 0);
465  if (ret < 0)
466  return ret;
467 
468  ret = av_frame_copy(dst, src);
469  if (ret < 0)
470  av_frame_unref(dst);
471 
472  return ret;
473  }
474 
475  /* ref the buffers */
476  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
477  if (!src->buf[i])
478  continue;
479  dst->buf[i] = av_buffer_ref(src->buf[i]);
480  if (!dst->buf[i]) {
481  ret = AVERROR(ENOMEM);
482  goto fail;
483  }
484  }
485 
486  if (src->extended_buf) {
487  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
488  src->nb_extended_buf);
489  if (!dst->extended_buf) {
490  ret = AVERROR(ENOMEM);
491  goto fail;
492  }
493  dst->nb_extended_buf = src->nb_extended_buf;
494 
495  for (i = 0; i < src->nb_extended_buf; i++) {
496  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
497  if (!dst->extended_buf[i]) {
498  ret = AVERROR(ENOMEM);
499  goto fail;
500  }
501  }
502  }
503 
504  if (src->hw_frames_ctx) {
506  if (!dst->hw_frames_ctx) {
507  ret = AVERROR(ENOMEM);
508  goto fail;
509  }
510  }
511 
512  /* duplicate extended data */
513  if (src->extended_data != src->data) {
514  int ch = src->channels;
515 
516  if (!ch) {
517  ret = AVERROR(EINVAL);
518  goto fail;
519  }
521 
522  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
523  if (!dst->extended_data) {
524  ret = AVERROR(ENOMEM);
525  goto fail;
526  }
527  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
528  } else
529  dst->extended_data = dst->data;
530 
531  memcpy(dst->data, src->data, sizeof(src->data));
532  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
533 
534  return 0;
535 
536 fail:
537  av_frame_unref(dst);
538  return ret;
539 }
540 
542 {
543  AVFrame *ret = av_frame_alloc();
544 
545  if (!ret)
546  return NULL;
547 
548  if (av_frame_ref(ret, src) < 0)
549  av_frame_free(&ret);
550 
551  return ret;
552 }
553 
555 {
556  int i;
557 
558  if (!frame)
559  return;
560 
561  wipe_side_data(frame);
562 
563  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
564  av_buffer_unref(&frame->buf[i]);
565  for (i = 0; i < frame->nb_extended_buf; i++)
566  av_buffer_unref(&frame->extended_buf[i]);
567  av_freep(&frame->extended_buf);
568  av_dict_free(&frame->metadata);
569 #if FF_API_FRAME_QP
571  av_buffer_unref(&frame->qp_table_buf);
573 #endif
574 
576 
577  av_buffer_unref(&frame->opaque_ref);
578  av_buffer_unref(&frame->private_ref);
579 
580  get_frame_defaults(frame);
581 }
582 
584 {
585  av_assert1(dst->width == 0 && dst->height == 0);
586  av_assert1(dst->channels == 0);
587 
588  *dst = *src;
589  if (src->extended_data == src->data)
590  dst->extended_data = dst->data;
591  memset(src, 0, sizeof(*src));
592  get_frame_defaults(src);
593 }
594 
596 {
597  int i, ret = 1;
598 
599  /* assume non-refcounted frames are not writable */
600  if (!frame->buf[0])
601  return 0;
602 
603  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
604  if (frame->buf[i])
605  ret &= !!av_buffer_is_writable(frame->buf[i]);
606  for (i = 0; i < frame->nb_extended_buf; i++)
607  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
608 
609  return ret;
610 }
611 
613 {
614  AVFrame tmp;
615  int ret;
616 
617  if (!frame->buf[0])
618  return AVERROR(EINVAL);
619 
620  if (av_frame_is_writable(frame))
621  return 0;
622 
623  memset(&tmp, 0, sizeof(tmp));
624  tmp.format = frame->format;
625  tmp.width = frame->width;
626  tmp.height = frame->height;
627  tmp.channels = frame->channels;
628  tmp.channel_layout = frame->channel_layout;
629  tmp.nb_samples = frame->nb_samples;
630 
631  if (frame->hw_frames_ctx)
632  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
633  else
634  ret = av_frame_get_buffer(&tmp, 0);
635  if (ret < 0)
636  return ret;
637 
638  ret = av_frame_copy(&tmp, frame);
639  if (ret < 0) {
640  av_frame_unref(&tmp);
641  return ret;
642  }
643 
644  ret = av_frame_copy_props(&tmp, frame);
645  if (ret < 0) {
646  av_frame_unref(&tmp);
647  return ret;
648  }
649 
650  av_frame_unref(frame);
651 
652  *frame = tmp;
653  if (tmp.data == tmp.extended_data)
654  frame->extended_data = frame->data;
655 
656  return 0;
657 }
658 
660 {
661  return frame_copy_props(dst, src, 1);
662 }
663 
665 {
666  uint8_t *data;
667  int planes, i;
668 
669  if (frame->nb_samples) {
670  int channels = frame->channels;
671  if (!channels)
672  return NULL;
674  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
675  } else
676  planes = 4;
677 
678  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
679  return NULL;
680  data = frame->extended_data[plane];
681 
682  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
683  AVBufferRef *buf = frame->buf[i];
684  if (data >= buf->data && data < buf->data + buf->size)
685  return buf;
686  }
687  for (i = 0; i < frame->nb_extended_buf; i++) {
688  AVBufferRef *buf = frame->extended_buf[i];
689  if (data >= buf->data && data < buf->data + buf->size)
690  return buf;
691  }
692  return NULL;
693 }
694 
697  AVBufferRef *buf)
698 {
699  AVFrameSideData *ret, **tmp;
700 
701  if (!buf)
702  return NULL;
703 
704  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
705  return NULL;
706 
707  tmp = av_realloc(frame->side_data,
708  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
709  if (!tmp)
710  return NULL;
711  frame->side_data = tmp;
712 
713  ret = av_mallocz(sizeof(*ret));
714  if (!ret)
715  return NULL;
716 
717  ret->buf = buf;
718  ret->data = ret->buf->data;
719  ret->size = buf->size;
720  ret->type = type;
721 
722  frame->side_data[frame->nb_side_data++] = ret;
723 
724  return ret;
725 }
726 
729  int size)
730 {
731  AVFrameSideData *ret;
732  AVBufferRef *buf = av_buffer_alloc(size);
733  ret = av_frame_new_side_data_from_buf(frame, type, buf);
734  if (!ret)
735  av_buffer_unref(&buf);
736  return ret;
737 }
738 
741 {
742  int i;
743 
744  for (i = 0; i < frame->nb_side_data; i++) {
745  if (frame->side_data[i]->type == type)
746  return frame->side_data[i];
747  }
748  return NULL;
749 }
750 
751 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
752 {
753  const uint8_t *src_data[4];
754  int i, planes;
755 
756  if (dst->width < src->width ||
757  dst->height < src->height)
758  return AVERROR(EINVAL);
759 
760  if (src->hw_frames_ctx || dst->hw_frames_ctx)
761  return av_hwframe_transfer_data(dst, src, 0);
762 
763  planes = av_pix_fmt_count_planes(dst->format);
764  for (i = 0; i < planes; i++)
765  if (!dst->data[i] || !src->data[i])
766  return AVERROR(EINVAL);
767 
768  memcpy(src_data, src->data, sizeof(src_data));
769  av_image_copy(dst->data, dst->linesize,
770  src_data, src->linesize,
771  dst->format, src->width, src->height);
772 
773  return 0;
774 }
775 
776 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
777 {
779  int channels = dst->channels;
780  int planes = planar ? channels : 1;
781  int i;
782 
783  if (dst->nb_samples != src->nb_samples ||
784  dst->channels != src->channels ||
785  dst->channel_layout != src->channel_layout)
786  return AVERROR(EINVAL);
787 
789 
790  for (i = 0; i < planes; i++)
791  if (!dst->extended_data[i] || !src->extended_data[i])
792  return AVERROR(EINVAL);
793 
795  dst->nb_samples, channels, dst->format);
796 
797  return 0;
798 }
799 
800 int av_frame_copy(AVFrame *dst, const AVFrame *src)
801 {
802  if (dst->format != src->format || dst->format < 0)
803  return AVERROR(EINVAL);
804 
805  if (dst->width > 0 && dst->height > 0)
806  return frame_copy_video(dst, src);
807  else if (dst->nb_samples > 0 && dst->channels > 0)
808  return frame_copy_audio(dst, src);
809 
810  return AVERROR(EINVAL);
811 }
812 
814 {
815  int i;
816 
817  for (i = frame->nb_side_data - 1; i >= 0; i--) {
818  AVFrameSideData *sd = frame->side_data[i];
819  if (sd->type == type) {
820  free_side_data(&frame->side_data[i]);
821  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
822  frame->nb_side_data--;
823  }
824  }
825 }
826 
828 {
829  switch(type) {
830  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
831  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
832  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
833  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
834  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
835  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
836  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
837  case AV_FRAME_DATA_AFD: return "Active format description";
838  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
839  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
840  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
841  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
842  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
843  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
844  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
845  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
846  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
847 #if FF_API_FRAME_QP
848  case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
849  case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
850 #endif
851  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
852  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
853  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
854  }
855  return NULL;
856 }
857 
858 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
859  const AVPixFmtDescriptor *desc)
860 {
861  int i, j;
862 
863  for (i = 0; frame->data[i]; i++) {
865  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
866  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
867 
868  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
869  offsets[i] = 0;
870  break;
871  }
872 
873  /* find any component descriptor for this plane */
874  for (j = 0; j < desc->nb_components; j++) {
875  if (desc->comp[j].plane == i) {
876  comp = &desc->comp[j];
877  break;
878  }
879  }
880  if (!comp)
881  return AVERROR_BUG;
882 
883  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
884  (frame->crop_left >> shift_x) * comp->step;
885  }
886 
887  return 0;
888 }
889 
891 {
892  const AVPixFmtDescriptor *desc;
893  size_t offsets[4];
894  int i;
895 
896  if (!(frame->width > 0 && frame->height > 0))
897  return AVERROR(EINVAL);
898 
899  if (frame->crop_left >= INT_MAX - frame->crop_right ||
900  frame->crop_top >= INT_MAX - frame->crop_bottom ||
901  (frame->crop_left + frame->crop_right) >= frame->width ||
902  (frame->crop_top + frame->crop_bottom) >= frame->height)
903  return AVERROR(ERANGE);
904 
905  desc = av_pix_fmt_desc_get(frame->format);
906  if (!desc)
907  return AVERROR_BUG;
908 
909  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
910  * formats cannot be easily handled here either (and corresponding decoders
911  * should not export any cropping anyway), so do the same for those as well.
912  * */
914  frame->width -= frame->crop_right;
915  frame->height -= frame->crop_bottom;
916  frame->crop_right = 0;
917  frame->crop_bottom = 0;
918  return 0;
919  }
920 
921  /* calculate the offsets for each plane */
922  calc_cropping_offsets(offsets, frame, desc);
923 
924  /* adjust the offsets to avoid breaking alignment */
925  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
926  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
927  int min_log2_align = INT_MAX;
928 
929  for (i = 0; frame->data[i]; i++) {
930  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
931  min_log2_align = FFMIN(log2_align, min_log2_align);
932  }
933 
934  /* we assume, and it should always be true, that the data alignment is
935  * related to the cropping alignment by a constant power-of-2 factor */
936  if (log2_crop_align < min_log2_align)
937  return AVERROR_BUG;
938 
939  if (min_log2_align < 5) {
940  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
941  calc_cropping_offsets(offsets, frame, desc);
942  }
943  }
944 
945  for (i = 0; frame->data[i]; i++)
946  frame->data[i] += offsets[i];
947 
948  frame->width -= (frame->crop_left + frame->crop_right);
949  frame->height -= (frame->crop_top + frame->crop_bottom);
950  frame->crop_left = 0;
951  frame->crop_right = 0;
952  frame->crop_top = 0;
953  frame->crop_bottom = 0;
954 
955  return 0;
956 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
#define ff_ctz
Definition: intmath.h:106
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:134
#define AV_NUM_DATA_POINTERS
Definition: frame.h:301
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
attribute_deprecated int qscale_type
Definition: frame.h:630
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:571
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
const char * desc
Definition: nvenc.c:79
AVDictionary * metadata
Definition: frame.h:210
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:515
void * opaque
for some private data of the user
Definition: frame.h:428
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:509
Content light level (based on CTA-861.3).
Definition: frame.h:136
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:442
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
Mastering display metadata associated with a video frame.
Definition: frame.h:119
attribute_deprecated AVBufferRef * qp_table_buf
Definition: frame.h:633
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:178
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:583
size_t crop_bottom
Definition: frame.h:661
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:510
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:622
functionally identical to above
Definition: pixfmt.h:517
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:739
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:639
Public dictionary API.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:751
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:509
size_t crop_left
Definition: frame.h:662
#define f(width, name)
Definition: cbs_vp9.c:255
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:627
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:679
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
AVBufferRef * buf
Definition: frame.h:211
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
HDR dynamic metadata associated with a video frame.
Definition: frame.h:175
static AVFrame * frame
const char data[16]
Definition: mxf.c:91
Structure to hold side data for an AVFrame.
Definition: frame.h:206
AVDictionary * metadata
metadata.
Definition: frame.h:586
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:447
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:532
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
channels
Definition: aptx.h:33
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
Metadata relevant to a downmix procedure.
Definition: frame.h:72
int nb_side_data
Definition: frame.h:512
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:435
AVFrameSideData ** side_data
Definition: frame.h:511
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
Definition: frame.h:152
#define src
Definition: vp8dsp.c:254
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:91
#define AVERROR(e)
Definition: error.h:43
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:338
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:890
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:544
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:555
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
#define FFMAX(a, b)
Definition: common.h:94
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:269
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:123
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:800
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:477
size_t crop_top
Definition: frame.h:660
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:443
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
static const struct @315 planes[]
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
const char * name
Definition: qsvenc.c:46
int channels
number of audio channels, only used for audio.
Definition: frame.h:606
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:537
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:418
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:505
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:695
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:664
AVFrameSideDataType
Definition: frame.h:48
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:423
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:123
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:514
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
#define FF_ARRAY_ELEMS(a)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:373
int coded_picture_number
picture number in bitstream order
Definition: frame.h:414
sample_rate
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:579
int stride
Definition: frame.c:51
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:212
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:858
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:388
uint8_t * data
Definition: frame.h:208
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:813
size_t crop_right
Definition: frame.h:663
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:467
int sample_rate
Sample rate of the audio data.
Definition: frame.h:472
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:181
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:457
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:557
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:564
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:595
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:776
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:827
int size
Size of data in bytes.
Definition: buffer.h:93
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:945
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
enum AVFrameSideDataType type
Definition: frame.h:207
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:612
#define flags(name, subs,...)
Definition: cbs_av1.c:565
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:401
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:409
A reference to a data buffer.
Definition: buffer.h:81
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal and external API header
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:44
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
#define FF_PSEUDOPAL
Definition: internal.h:369
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:452
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:650
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
enum AVColorPrimaries color_primaries
Definition: frame.h:546
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:186
int height
Definition: frame.h:358
#define av_freep(p)
int type
Definition: frame.c:52
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:169
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:548
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:347
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:139
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:91
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:615
Stereoscopic 3d metadata.
Definition: frame.h:63
static double val(void *priv, double ch)
Definition: aeval.c:76
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
Raw QP table data.
Definition: frame.h:159
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:190
static uint8_t tmp[11]
Definition: aes_ctr.c:26