output.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
38 #include "libavutil/mathematics.h"
39 #include "libavutil/opt.h"
40 #include "libavformat/avformat.h"
42 #include "libswscale/swscale.h"
43 
44 /* 5 seconds stream duration */
45 #define STREAM_DURATION 5.0
46 #define STREAM_FRAME_RATE 25 /* 25 images/s */
47 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
56 
57  /* pts of the next frame that will be generated */
58  int64_t next_pts;
59 
62 
63  float t, tincr, tincr2;
64 
65  struct SwsContext *sws_ctx;
67 } OutputStream;
68 
69 /**************************************************************/
70 /* audio output */
71 
72 /*
73  * add an audio output stream
74  */
76  enum AVCodecID codec_id)
77 {
78  AVCodecContext *c;
79  AVCodec *codec;
80  int ret;
81 
82  /* find the audio encoder */
83  codec = avcodec_find_encoder(codec_id);
84  if (!codec) {
85  fprintf(stderr, "codec not found\n");
86  exit(1);
87  }
88 
89  ost->st = avformat_new_stream(oc, NULL);
90  if (!ost->st) {
91  fprintf(stderr, "Could not alloc stream\n");
92  exit(1);
93  }
94 
95  c = avcodec_alloc_context3(codec);
96  if (!c) {
97  fprintf(stderr, "Could not alloc an encoding context\n");
98  exit(1);
99  }
100  ost->enc = c;
101 
102  /* put sample parameters */
103  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
104  c->sample_rate = codec->supported_samplerates ? codec->supported_samplerates[0] : 44100;
107  c->bit_rate = 64000;
108 
109  ost->st->time_base = (AVRational){ 1, c->sample_rate };
110 
111  // some formats want stream headers to be separate
112  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
114 
115  /* initialize sample format conversion;
116  * to simplify the code, we always pass the data through lavr, even
117  * if the encoder supports the generated format directly -- the price is
118  * some extra data copying;
119  */
120  ost->avr = avresample_alloc_context();
121  if (!ost->avr) {
122  fprintf(stderr, "Error allocating the resampling context\n");
123  exit(1);
124  }
125 
126  av_opt_set_int(ost->avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
127  av_opt_set_int(ost->avr, "in_sample_rate", 44100, 0);
128  av_opt_set_int(ost->avr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
129  av_opt_set_int(ost->avr, "out_sample_fmt", c->sample_fmt, 0);
130  av_opt_set_int(ost->avr, "out_sample_rate", c->sample_rate, 0);
131  av_opt_set_int(ost->avr, "out_channel_layout", c->channel_layout, 0);
132 
133  ret = avresample_open(ost->avr);
134  if (ret < 0) {
135  fprintf(stderr, "Error opening the resampling context\n");
136  exit(1);
137  }
138 }
139 
140 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
141  uint64_t channel_layout,
142  int sample_rate, int nb_samples)
143 {
145  int ret;
146 
147  if (!frame) {
148  fprintf(stderr, "Error allocating an audio frame\n");
149  exit(1);
150  }
151 
152  frame->format = sample_fmt;
153  frame->channel_layout = channel_layout;
154  frame->sample_rate = sample_rate;
155  frame->nb_samples = nb_samples;
156 
157  if (nb_samples) {
158  ret = av_frame_get_buffer(frame, 0);
159  if (ret < 0) {
160  fprintf(stderr, "Error allocating an audio buffer\n");
161  exit(1);
162  }
163  }
164 
165  return frame;
166 }
167 
169 {
170  AVCodecContext *c;
171  int nb_samples, ret;
172 
173  c = ost->enc;
174 
175  /* open it */
176  if (avcodec_open2(c, NULL, NULL) < 0) {
177  fprintf(stderr, "could not open codec\n");
178  exit(1);
179  }
180 
181  /* init signal generator */
182  ost->t = 0;
183  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
184  /* increment frequency by 110 Hz per second */
185  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
186 
188  nb_samples = 10000;
189  else
190  nb_samples = c->frame_size;
191 
193  c->sample_rate, nb_samples);
195  44100, nb_samples);
196 
197  /* copy the stream parameters to the muxer */
199  if (ret < 0) {
200  fprintf(stderr, "Could not copy the stream parameters\n");
201  exit(1);
202  }
203 }
204 
205 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
206  * 'nb_channels' channels. */
208 {
209  AVFrame *frame = ost->tmp_frame;
210  int j, i, v;
211  int16_t *q = (int16_t*)frame->data[0];
212 
213  /* check if we want to generate more frames */
214  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
215  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
216  return NULL;
217 
218 
219  for (j = 0; j < frame->nb_samples; j++) {
220  v = (int)(sin(ost->t) * 10000);
221  for (i = 0; i < ost->enc->channels; i++)
222  *q++ = v;
223  ost->t += ost->tincr;
224  ost->tincr += ost->tincr2;
225  }
226 
227  return frame;
228 }
229 
230 /* if a frame is provided, send it to the encoder, otherwise flush the encoder;
231  * return 1 when encoding is finished, 0 otherwise
232  */
234  AVFrame *frame)
235 {
236  int ret;
237 
238  ret = avcodec_send_frame(ost->enc, frame);
239  if (ret < 0) {
240  fprintf(stderr, "Error submitting a frame for encoding\n");
241  exit(1);
242  }
243 
244  while (ret >= 0) {
245  AVPacket pkt = { 0 }; // data and size must be 0;
246 
247  av_init_packet(&pkt);
248 
249  ret = avcodec_receive_packet(ost->enc, &pkt);
250  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
251  fprintf(stderr, "Error encoding a video frame\n");
252  exit(1);
253  } else if (ret >= 0) {
254  av_packet_rescale_ts(&pkt, ost->enc->time_base, ost->st->time_base);
255  pkt.stream_index = ost->st->index;
256 
257  /* Write the compressed frame to the media file. */
258  ret = av_interleaved_write_frame(oc, &pkt);
259  if (ret < 0) {
260  fprintf(stderr, "Error while writing video frame\n");
261  exit(1);
262  }
263  }
264  }
265 
266  return ret == AVERROR_EOF;
267 }
268 
269 /*
270  * encode one audio frame and send it to the muxer
271  * return 1 when encoding is finished, 0 otherwise
272  */
274 {
275  AVFrame *frame;
276  int got_output = 0;
277  int ret;
278 
279  frame = get_audio_frame(ost);
280  got_output |= !!frame;
281 
282  /* feed the data to lavr */
283  if (frame) {
284  ret = avresample_convert(ost->avr, NULL, 0, 0,
285  frame->extended_data, frame->linesize[0],
286  frame->nb_samples);
287  if (ret < 0) {
288  fprintf(stderr, "Error feeding audio data to the resampler\n");
289  exit(1);
290  }
291  }
292 
293  while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
294  (!frame && avresample_get_out_samples(ost->avr, 0))) {
295  /* when we pass a frame to the encoder, it may keep a reference to it
296  * internally;
297  * make sure we do not overwrite it here
298  */
299  ret = av_frame_make_writable(ost->frame);
300  if (ret < 0)
301  exit(1);
302 
303  /* the difference between the two avresample calls here is that the
304  * first one just reads the already converted data that is buffered in
305  * the lavr output buffer, while the second one also flushes the
306  * resampler */
307  if (frame) {
308  ret = avresample_read(ost->avr, ost->frame->extended_data,
309  ost->frame->nb_samples);
310  } else {
311  ret = avresample_convert(ost->avr, ost->frame->extended_data,
312  ost->frame->linesize[0], ost->frame->nb_samples,
313  NULL, 0, 0);
314  }
315 
316  if (ret < 0) {
317  fprintf(stderr, "Error while resampling\n");
318  exit(1);
319  } else if (frame && ret != ost->frame->nb_samples) {
320  fprintf(stderr, "Too few samples returned from lavr\n");
321  exit(1);
322  }
323 
324  ost->frame->nb_samples = ret;
325 
326  ost->frame->pts = ost->next_pts;
327  ost->next_pts += ost->frame->nb_samples;
328 
329  got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
330  }
331 
332  return !got_output;
333 }
334 
335 /**************************************************************/
336 /* video output */
337 
338 /* Add a video output stream. */
340  enum AVCodecID codec_id)
341 {
342  AVCodecContext *c;
343  AVCodec *codec;
344 
345  /* find the video encoder */
346  codec = avcodec_find_encoder(codec_id);
347  if (!codec) {
348  fprintf(stderr, "codec not found\n");
349  exit(1);
350  }
351 
352  ost->st = avformat_new_stream(oc, NULL);
353  if (!ost->st) {
354  fprintf(stderr, "Could not alloc stream\n");
355  exit(1);
356  }
357 
358  c = avcodec_alloc_context3(codec);
359  if (!c) {
360  fprintf(stderr, "Could not alloc an encoding context\n");
361  exit(1);
362  }
363  ost->enc = c;
364 
365  /* Put sample parameters. */
366  c->bit_rate = 400000;
367  /* Resolution must be a multiple of two. */
368  c->width = 352;
369  c->height = 288;
370  /* timebase: This is the fundamental unit of time (in seconds) in terms
371  * of which frame timestamps are represented. For fixed-fps content,
372  * timebase should be 1/framerate and timestamp increments should be
373  * identical to 1. */
374  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
375  c->time_base = ost->st->time_base;
376 
377  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
378  c->pix_fmt = STREAM_PIX_FMT;
379  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
380  /* just for testing, we also add B-frames */
381  c->max_b_frames = 2;
382  }
383  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
384  /* Needed to avoid using macroblocks in which some coeffs overflow.
385  * This does not happen with normal video, it just happens here as
386  * the motion of the chroma plane does not match the luma plane. */
387  c->mb_decision = 2;
388  }
389  /* Some formats want stream headers to be separate. */
390  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
392 }
393 
394 static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
395 {
396  AVFrame *picture;
397  int ret;
398 
399  picture = av_frame_alloc();
400  if (!picture)
401  return NULL;
402 
403  picture->format = pix_fmt;
404  picture->width = width;
405  picture->height = height;
406 
407  /* allocate the buffers for the frame data */
408  ret = av_frame_get_buffer(picture, 32);
409  if (ret < 0) {
410  fprintf(stderr, "Could not allocate frame data.\n");
411  exit(1);
412  }
413 
414  return picture;
415 }
416 
418 {
419  AVCodecContext *c;
420  int ret;
421 
422  c = ost->enc;
423 
424  /* open the codec */
425  if (avcodec_open2(c, NULL, NULL) < 0) {
426  fprintf(stderr, "could not open codec\n");
427  exit(1);
428  }
429 
430  /* Allocate the encoded raw picture. */
431  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
432  if (!ost->frame) {
433  fprintf(stderr, "Could not allocate picture\n");
434  exit(1);
435  }
436 
437  /* If the output format is not YUV420P, then a temporary YUV420P
438  * picture is needed too. It is then converted to the required
439  * output format. */
440  ost->tmp_frame = NULL;
441  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
443  if (!ost->tmp_frame) {
444  fprintf(stderr, "Could not allocate temporary picture\n");
445  exit(1);
446  }
447  }
448 
449  /* copy the stream parameters to the muxer */
451  if (ret < 0) {
452  fprintf(stderr, "Could not copy the stream parameters\n");
453  exit(1);
454  }
455 }
456 
457 /* Prepare a dummy image. */
458 static void fill_yuv_image(AVFrame *pict, int frame_index,
459  int width, int height)
460 {
461  int x, y, i, ret;
462 
463  /* when we pass a frame to the encoder, it may keep a reference to it
464  * internally;
465  * make sure we do not overwrite it here
466  */
467  ret = av_frame_make_writable(pict);
468  if (ret < 0)
469  exit(1);
470 
471  i = frame_index;
472 
473  /* Y */
474  for (y = 0; y < height; y++)
475  for (x = 0; x < width; x++)
476  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
477 
478  /* Cb and Cr */
479  for (y = 0; y < height / 2; y++) {
480  for (x = 0; x < width / 2; x++) {
481  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
482  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
483  }
484  }
485 }
486 
488 {
489  AVCodecContext *c = ost->enc;
490 
491  /* check if we want to generate more frames */
492  if (av_compare_ts(ost->next_pts, c->time_base,
493  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
494  return NULL;
495 
496  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
497  /* as we only generate a YUV420P picture, we must convert it
498  * to the codec pixel format if needed */
499  if (!ost->sws_ctx) {
500  ost->sws_ctx = sws_getContext(c->width, c->height,
502  c->width, c->height,
503  c->pix_fmt,
504  SCALE_FLAGS, NULL, NULL, NULL);
505  if (!ost->sws_ctx) {
506  fprintf(stderr,
507  "Cannot initialize the conversion context\n");
508  exit(1);
509  }
510  }
511  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
512  sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
513  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
514  ost->frame->linesize);
515  } else {
516  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
517  }
518 
519  ost->frame->pts = ost->next_pts++;
520 
521  return ost->frame;
522 }
523 
524 /*
525  * encode one video frame and send it to the muxer
526  * return 1 when encoding is finished, 0 otherwise
527  */
529 {
530  int ret;
531  AVCodecContext *c;
532  AVFrame *frame;
533 
534  c = ost->enc;
535 
536  frame = get_video_frame(ost);
537 
538  /* encode the image */
539  ret = avcodec_send_frame(c, frame);
540  if (ret < 0) {
541  fprintf(stderr, "Error submitting a frame for encoding\n");
542  exit(1);
543  }
544 
545  while (ret >= 0) {
546  AVPacket pkt = { 0 };
547 
548  av_init_packet(&pkt);
549 
550  ret = avcodec_receive_packet(c, &pkt);
551  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
552  fprintf(stderr, "Error encoding a video frame\n");
553  exit(1);
554  } else if (ret >= 0) {
555  av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
556  pkt.stream_index = ost->st->index;
557 
558  /* Write the compressed frame to the media file. */
559  ret = av_interleaved_write_frame(oc, &pkt);
560  if (ret < 0) {
561  fprintf(stderr, "Error while writing video frame\n");
562  exit(1);
563  }
564  }
565  }
566 
567  return ret == AVERROR_EOF;
568 }
569 
571 {
572  avcodec_free_context(&ost->enc);
573  av_frame_free(&ost->frame);
574  av_frame_free(&ost->tmp_frame);
575  sws_freeContext(ost->sws_ctx);
576  avresample_free(&ost->avr);
577 }
578 
579 /**************************************************************/
580 /* media file output */
581 
582 int main(int argc, char **argv)
583 {
584  OutputStream video_st = { 0 }, audio_st = { 0 };
585  const char *filename;
586  AVOutputFormat *fmt;
587  AVFormatContext *oc;
588  int have_video = 0, have_audio = 0;
589  int encode_video = 0, encode_audio = 0;
590 
591  /* Initialize libavcodec, and register all codecs and formats. */
592  av_register_all();
593 
594  if (argc != 2) {
595  printf("usage: %s output_file\n"
596  "API example program to output a media file with libavformat.\n"
597  "The output format is automatically guessed according to the file extension.\n"
598  "Raw images can also be output by using '%%d' in the filename\n"
599  "\n", argv[0]);
600  return 1;
601  }
602 
603  filename = argv[1];
604 
605  /* Autodetect the output format from the name. default is MPEG. */
606  fmt = av_guess_format(NULL, filename, NULL);
607  if (!fmt) {
608  printf("Could not deduce output format from file extension: using MPEG.\n");
609  fmt = av_guess_format("mpeg", NULL, NULL);
610  }
611  if (!fmt) {
612  fprintf(stderr, "Could not find suitable output format\n");
613  return 1;
614  }
615 
616  /* Allocate the output media context. */
617  oc = avformat_alloc_context();
618  if (!oc) {
619  fprintf(stderr, "Memory error\n");
620  return 1;
621  }
622  oc->oformat = fmt;
623  snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
624 
625  /* Add the audio and video streams using the default format codecs
626  * and initialize the codecs. */
627  if (fmt->video_codec != AV_CODEC_ID_NONE) {
628  add_video_stream(&video_st, oc, fmt->video_codec);
629  have_video = 1;
630  encode_video = 1;
631  }
632  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
633  add_audio_stream(&audio_st, oc, fmt->audio_codec);
634  have_audio = 1;
635  encode_audio = 1;
636  }
637 
638  /* Now that all the parameters are set, we can open the audio and
639  * video codecs and allocate the necessary encode buffers. */
640  if (have_video)
641  open_video(oc, &video_st);
642  if (have_audio)
643  open_audio(oc, &audio_st);
644 
645  av_dump_format(oc, 0, filename, 1);
646 
647  /* open the output file, if needed */
648  if (!(fmt->flags & AVFMT_NOFILE)) {
649  if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
650  fprintf(stderr, "Could not open '%s'\n", filename);
651  return 1;
652  }
653  }
654 
655  /* Write the stream header, if any. */
656  avformat_write_header(oc, NULL);
657 
658  while (encode_video || encode_audio) {
659  /* select the stream to encode */
660  if (encode_video &&
661  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
662  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
663  encode_video = !write_video_frame(oc, &video_st);
664  } else {
665  encode_audio = !process_audio_stream(oc, &audio_st);
666  }
667  }
668 
669  /* Write the trailer, if any. The trailer must be written before you
670  * close the CodecContexts open when you wrote the header; otherwise
671  * av_write_trailer() may try to use memory that was freed on
672  * av_codec_close(). */
673  av_write_trailer(oc);
674 
675  /* Close each codec. */
676  if (have_video)
677  close_stream(oc, &video_st);
678  if (have_audio)
679  close_stream(oc, &audio_st);
680 
681  if (!(fmt->flags & AVFMT_NOFILE))
682  /* Close the output file. */
683  avio_close(oc->pb);
684 
685  /* free the stream */
687 
688  return 0;
689 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
int64_t next_pts
Definition: output.c:58
const struct AVCodec * codec
Definition: avcodec.h:1225
This structure describes decoded (raw) audio or video data.
Definition: frame.h:147
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
static AVFrame * get_video_frame(OutputStream *ost)
Definition: output.c:487
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, int out_plane_size, int out_samples, uint8_t *const *input, int in_plane_size, int in_samples)
Convert input samples and write them to the output FIFO.
AVStream * st
Definition: output.c:54
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1459
int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples)
Read samples from the output FIFO.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
enum AVCodecID video_codec
default video codec
Definition: avformat.h:449
int index
stream index in AVFormatContext
Definition: avformat.h:694
float tincr
Definition: output.c:63
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:394
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1409
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: output.c:140
#define STREAM_DURATION
Definition: output.c:45
#define AV_CH_LAYOUT_STEREO
AVCodec.
Definition: avcodec.h:2842
void avresample_free(AVAudioResampleContext **avr)
Free AVAudioResampleContext and associated AVOption values.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1327
Format I/O context.
Definition: avformat.h:920
static void open_audio(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:168
static void add_audio_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id)
Definition: output.c:75
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1860
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:457
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int main(int argc, char **argv)
Definition: output.c:582
AVOptions.
AVAudioResampleContext * avr
Definition: output.c:66
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:227
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
#define SCALE_FLAGS
Definition: output.c:50
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
#define AVERROR_EOF
End of file.
Definition: error.h:51
external api for the swscale stuff
#define STREAM_PIX_FMT
Definition: output.c:48
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:939
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
const uint64_t * channel_layouts
array of support channel layouts, or NULL if unknown. array is terminated by 0
Definition: avcodec.h:2866
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:211
int width
Definition: frame.h:192
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: output.c:394
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: output.c:207
int capabilities
Codec capabilities.
Definition: avcodec.h:2861
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1295
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
AVFrame * frame
Definition: output.c:60
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:887
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1903
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:309
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:528
int bit_rate
the average bitrate
Definition: avcodec.h:1265
audio channel layout utility functions
#define STREAM_FRAME_RATE
Definition: output.c:46
char filename[1024]
input or output filename
Definition: avformat.h:996
external API header
static void add_video_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id)
Definition: output.c:339
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:570
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int width
picture width / height.
Definition: avcodec.h:1372
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:407
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
AVOutputFormat * av_guess_format(const char *short_name, const char *filename, const char *mime_type)
Return the output format in the list of registered output formats which best matches the provided par...
int mb_decision
macroblock decision mode
Definition: avcodec.h:1684
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:216
AVCodecContext * enc
Definition: output.c:55
Stream structure.
Definition: avformat.h:693
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:207
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1872
int avresample_available(AVAudioResampleContext *avr)
Return the number of available samples in the output FIFO.
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:60
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
enum AVCodecID codec_id
Definition: avcodec.h:1226
int sample_rate
samples per second
Definition: avcodec.h:1852
struct AVAudioResampleContext AVAudioResampleContext
Definition: avresample.h:106
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:165
AVIOContext * pb
I/O context.
Definition: avformat.h:962
main external API structure.
Definition: avcodec.h:1216
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: output.c:458
int avresample_get_out_samples(AVAudioResampleContext *avr, int in_nb_samples)
Provide the upper bound on the number of samples the configured conversion would output.
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst...
static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:273
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
int sample_rate
Sample rate of the audio data.
Definition: frame.h:304
rational number numerator/denominator
Definition: rational.h:43
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:153
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:762
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1394
AVAudioResampleContext * avresample_alloc_context(void)
Allocate AVAudioResampleContext and set options.
Main libavformat public API header.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:59
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:399
signed 16 bits
Definition: samplefmt.h:63
float t
Definition: output.c:63
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
static void open_video(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:417
int channels
number of audio channels
Definition: avcodec.h:1853
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:448
const int * supported_samplerates
array of supported audio samplerates, or NULL if unknown, array is terminated by 0 ...
Definition: avcodec.h:2864
AVFrame * tmp_frame
Definition: output.c:61
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
int height
Definition: frame.h:192
AVCodecParameters * codecpar
Definition: avformat.h:811
static int encode_audio_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
Definition: output.c:233
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2865
int stream_index
Definition: avcodec.h:1155
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:722
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:181
AVPixelFormat
Pixel format.
Definition: pixfmt.h:57
This structure stores compressed data.
Definition: avcodec.h:1130
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
int avresample_open(AVAudioResampleContext *avr)
Initialize AVAudioResampleContext.
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:200
float tincr2
Definition: output.c:63
struct SwsContext * sws_ctx
Definition: output.c:65