output-example.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
37 #include "libavutil/mathematics.h"
38 #include "libavformat/avformat.h"
39 #include "libswscale/swscale.h"
40 
41 #undef exit
42 
43 /* 5 seconds stream duration */
44 #define STREAM_DURATION 5.0
45 #define STREAM_FRAME_RATE 25 /* 25 images/s */
46 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
47 #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
48 
49 static int sws_flags = SWS_BICUBIC;
50 
51 /**************************************************************/
52 /* audio output */
53 
54 static float t, tincr, tincr2;
55 static int16_t *samples;
56 static uint8_t *audio_outbuf;
57 static int audio_outbuf_size;
59 
60 /*
61  * add an audio output stream
62  */
64 {
65  AVCodecContext *c;
66  AVStream *st;
67 
68  st = av_new_stream(oc, 1);
69  if (!st) {
70  fprintf(stderr, "Could not alloc stream\n");
71  exit(1);
72  }
73 
74  c = st->codec;
75  c->codec_id = codec_id;
77 
78  /* put sample parameters */
80  c->bit_rate = 64000;
81  c->sample_rate = 44100;
82  c->channels = 2;
83 
84  // some formats want stream headers to be separate
87 
88  return st;
89 }
90 
91 static void open_audio(AVFormatContext *oc, AVStream *st)
92 {
93  AVCodecContext *c;
94  AVCodec *codec;
95 
96  c = st->codec;
97 
98  /* find the audio encoder */
99  codec = avcodec_find_encoder(c->codec_id);
100  if (!codec) {
101  fprintf(stderr, "codec not found\n");
102  exit(1);
103  }
104 
105  /* open it */
106  if (avcodec_open(c, codec) < 0) {
107  fprintf(stderr, "could not open codec\n");
108  exit(1);
109  }
110 
111  /* init signal generator */
112  t = 0;
113  tincr = 2 * M_PI * 110.0 / c->sample_rate;
114  /* increment frequency by 110 Hz per second */
115  tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
116 
117  audio_outbuf_size = 10000;
119 
120  /* ugly hack for PCM codecs (will be removed ASAP with new PCM
121  support to compute the input frame size in samples */
122  if (c->frame_size <= 1) {
124  switch(st->codec->codec_id) {
125  case CODEC_ID_PCM_S16LE:
126  case CODEC_ID_PCM_S16BE:
127  case CODEC_ID_PCM_U16LE:
128  case CODEC_ID_PCM_U16BE:
130  break;
131  default:
132  break;
133  }
134  } else {
136  }
138 }
139 
140 /* prepare a 16 bit dummy audio frame of 'frame_size' samples and
141  'nb_channels' channels */
142 static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
143 {
144  int j, i, v;
145  int16_t *q;
146 
147  q = samples;
148  for(j=0;j<frame_size;j++) {
149  v = (int)(sin(t) * 10000);
150  for(i = 0; i < nb_channels; i++)
151  *q++ = v;
152  t += tincr;
153  tincr += tincr2;
154  }
155 }
156 
158 {
159  AVCodecContext *c;
160  AVPacket pkt;
161  av_init_packet(&pkt);
162 
163  c = st->codec;
164 
166 
167  pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
168 
169  if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
170  pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
171  pkt.flags |= AV_PKT_FLAG_KEY;
172  pkt.stream_index= st->index;
173  pkt.data= audio_outbuf;
174 
175  /* write the compressed frame in the media file */
176  if (av_interleaved_write_frame(oc, &pkt) != 0) {
177  fprintf(stderr, "Error while writing audio frame\n");
178  exit(1);
179  }
180 }
181 
182 static void close_audio(AVFormatContext *oc, AVStream *st)
183 {
184  avcodec_close(st->codec);
185 
186  av_free(samples);
188 }
189 
190 /**************************************************************/
191 /* video output */
192 
194 static uint8_t *video_outbuf;
196 
197 /* add a video output stream */
199 {
200  AVCodecContext *c;
201  AVStream *st;
202 
203  st = avformat_new_stream(oc, NULL);
204  if (!st) {
205  fprintf(stderr, "Could not alloc stream\n");
206  exit(1);
207  }
208 
209  c = st->codec;
210  c->codec_id = codec_id;
212 
213  /* put sample parameters */
214  c->bit_rate = 400000;
215  /* resolution must be a multiple of two */
216  c->width = 352;
217  c->height = 288;
218  /* time base: this is the fundamental unit of time (in seconds) in terms
219  of which frame timestamps are represented. for fixed-fps content,
220  timebase should be 1/framerate and timestamp increments should be
221  identically 1. */
223  c->time_base.num = 1;
224  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
225  c->pix_fmt = STREAM_PIX_FMT;
226  if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
227  /* just for testing, we also add B frames */
228  c->max_b_frames = 2;
229  }
230  if (c->codec_id == CODEC_ID_MPEG1VIDEO){
231  /* Needed to avoid using macroblocks in which some coeffs overflow.
232  This does not happen with normal video, it just happens here as
233  the motion of the chroma plane does not match the luma plane. */
234  c->mb_decision=2;
235  }
236  // some formats want stream headers to be separate
237  if(oc->oformat->flags & AVFMT_GLOBALHEADER)
239 
240  return st;
241 }
242 
244 {
245  AVFrame *picture;
246  uint8_t *picture_buf;
247  int size;
248 
249  picture = avcodec_alloc_frame();
250  if (!picture)
251  return NULL;
252  size = avpicture_get_size(pix_fmt, width, height);
253  picture_buf = av_malloc(size);
254  if (!picture_buf) {
255  av_free(picture);
256  return NULL;
257  }
258  avpicture_fill((AVPicture *)picture, picture_buf,
259  pix_fmt, width, height);
260  return picture;
261 }
262 
263 static void open_video(AVFormatContext *oc, AVStream *st)
264 {
265  AVCodec *codec;
266  AVCodecContext *c;
267 
268  c = st->codec;
269 
270  /* find the video encoder */
271  codec = avcodec_find_encoder(c->codec_id);
272  if (!codec) {
273  fprintf(stderr, "codec not found\n");
274  exit(1);
275  }
276 
277  /* open the codec */
278  if (avcodec_open(c, codec) < 0) {
279  fprintf(stderr, "could not open codec\n");
280  exit(1);
281  }
282 
283  video_outbuf = NULL;
284  if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
285  /* allocate output buffer */
286  /* XXX: API change will be done */
287  /* buffers passed into lav* can be allocated any way you prefer,
288  as long as they're aligned enough for the architecture, and
289  they're freed appropriately (such as using av_free for buffers
290  allocated with av_malloc) */
291  video_outbuf_size = 200000;
293  }
294 
295  /* allocate the encoded raw picture */
296  picture = alloc_picture(c->pix_fmt, c->width, c->height);
297  if (!picture) {
298  fprintf(stderr, "Could not allocate picture\n");
299  exit(1);
300  }
301 
302  /* if the output format is not YUV420P, then a temporary YUV420P
303  picture is needed too. It is then converted to the required
304  output format */
305  tmp_picture = NULL;
306  if (c->pix_fmt != PIX_FMT_YUV420P) {
307  tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
308  if (!tmp_picture) {
309  fprintf(stderr, "Could not allocate temporary picture\n");
310  exit(1);
311  }
312  }
313 }
314 
315 /* prepare a dummy image */
316 static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
317 {
318  int x, y, i;
319 
320  i = frame_index;
321 
322  /* Y */
323  for(y=0;y<height;y++) {
324  for(x=0;x<width;x++) {
325  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
326  }
327  }
328 
329  /* Cb and Cr */
330  for(y=0;y<height/2;y++) {
331  for(x=0;x<width/2;x++) {
332  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
333  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
334  }
335  }
336 }
337 
339 {
340  int out_size, ret;
341  AVCodecContext *c;
342  static struct SwsContext *img_convert_ctx;
343 
344  c = st->codec;
345 
346  if (frame_count >= STREAM_NB_FRAMES) {
347  /* no more frame to compress. The codec has a latency of a few
348  frames if using B frames, so we get the last frames by
349  passing the same picture again */
350  } else {
351  if (c->pix_fmt != PIX_FMT_YUV420P) {
352  /* as we only generate a YUV420P picture, we must convert it
353  to the codec pixel format if needed */
354  if (img_convert_ctx == NULL) {
355  img_convert_ctx = sws_getContext(c->width, c->height,
357  c->width, c->height,
358  c->pix_fmt,
359  sws_flags, NULL, NULL, NULL);
360  if (img_convert_ctx == NULL) {
361  fprintf(stderr, "Cannot initialize the conversion context\n");
362  exit(1);
363  }
364  }
365  fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
366  sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
367  0, c->height, picture->data, picture->linesize);
368  } else {
369  fill_yuv_image(picture, frame_count, c->width, c->height);
370  }
371  }
372 
373 
374  if (oc->oformat->flags & AVFMT_RAWPICTURE) {
375  /* raw video case. The API will change slightly in the near
376  futur for that */
377  AVPacket pkt;
378  av_init_packet(&pkt);
379 
380  pkt.flags |= AV_PKT_FLAG_KEY;
381  pkt.stream_index= st->index;
382  pkt.data= (uint8_t *)picture;
383  pkt.size= sizeof(AVPicture);
384 
385  ret = av_interleaved_write_frame(oc, &pkt);
386  } else {
387  /* encode the image */
388  out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
389  /* if zero size, it means the image was buffered */
390  if (out_size > 0) {
391  AVPacket pkt;
392  av_init_packet(&pkt);
393 
394  if (c->coded_frame->pts != AV_NOPTS_VALUE)
395  pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
396  if(c->coded_frame->key_frame)
397  pkt.flags |= AV_PKT_FLAG_KEY;
398  pkt.stream_index= st->index;
399  pkt.data= video_outbuf;
400  pkt.size= out_size;
401 
402  /* write the compressed frame in the media file */
403  ret = av_interleaved_write_frame(oc, &pkt);
404  } else {
405  ret = 0;
406  }
407  }
408  if (ret != 0) {
409  fprintf(stderr, "Error while writing video frame\n");
410  exit(1);
411  }
412  frame_count++;
413 }
414 
415 static void close_video(AVFormatContext *oc, AVStream *st)
416 {
417  avcodec_close(st->codec);
418  av_free(picture->data[0]);
419  av_free(picture);
420  if (tmp_picture) {
421  av_free(tmp_picture->data[0]);
422  av_free(tmp_picture);
423  }
425 }
426 
427 /**************************************************************/
428 /* media file output */
429 
430 int main(int argc, char **argv)
431 {
432  const char *filename;
433  AVOutputFormat *fmt;
434  AVFormatContext *oc;
435  AVStream *audio_st, *video_st;
436  double audio_pts, video_pts;
437  int i;
438 
439  /* initialize libavcodec, and register all codecs and formats */
440  av_register_all();
441 
442  if (argc != 2) {
443  printf("usage: %s output_file\n"
444  "API example program to output a media file with libavformat.\n"
445  "The output format is automatically guessed according to the file extension.\n"
446  "Raw images can also be output by using '%%d' in the filename\n"
447  "\n", argv[0]);
448  return 1;
449  }
450 
451  filename = argv[1];
452 
453  /* auto detect the output format from the name. default is
454  mpeg. */
455  fmt = av_guess_format(NULL, filename, NULL);
456  if (!fmt) {
457  printf("Could not deduce output format from file extension: using MPEG.\n");
458  fmt = av_guess_format("mpeg", NULL, NULL);
459  }
460  if (!fmt) {
461  fprintf(stderr, "Could not find suitable output format\n");
462  return 1;
463  }
464 
465  /* allocate the output media context */
466  oc = avformat_alloc_context();
467  if (!oc) {
468  fprintf(stderr, "Memory error\n");
469  return 1;
470  }
471  oc->oformat = fmt;
472  snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
473 
474  /* add the audio and video streams using the default format codecs
475  and initialize the codecs */
476  video_st = NULL;
477  audio_st = NULL;
478  if (fmt->video_codec != CODEC_ID_NONE) {
479  video_st = add_video_stream(oc, fmt->video_codec);
480  }
481  if (fmt->audio_codec != CODEC_ID_NONE) {
482  audio_st = add_audio_stream(oc, fmt->audio_codec);
483  }
484 
485  /* set the output parameters (must be done even if no
486  parameters). */
487  if (av_set_parameters(oc, NULL) < 0) {
488  fprintf(stderr, "Invalid output format parameters\n");
489  return 1;
490  }
491 
492  av_dump_format(oc, 0, filename, 1);
493 
494  /* now that all the parameters are set, we can open the audio and
495  video codecs and allocate the necessary encode buffers */
496  if (video_st)
497  open_video(oc, video_st);
498  if (audio_st)
499  open_audio(oc, audio_st);
500 
501  /* open the output file, if needed */
502  if (!(fmt->flags & AVFMT_NOFILE)) {
503  if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
504  fprintf(stderr, "Could not open '%s'\n", filename);
505  return 1;
506  }
507  }
508 
509  /* write the stream header, if any */
510  av_write_header(oc);
511 
512  for(;;) {
513  /* compute current audio and video time */
514  if (audio_st)
515  audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
516  else
517  audio_pts = 0.0;
518 
519  if (video_st)
520  video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
521  else
522  video_pts = 0.0;
523 
524  if ((!audio_st || audio_pts >= STREAM_DURATION) &&
525  (!video_st || video_pts >= STREAM_DURATION))
526  break;
527 
528  /* write interleaved audio and video frames */
529  if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
530  write_audio_frame(oc, audio_st);
531  } else {
532  write_video_frame(oc, video_st);
533  }
534  }
535 
536  /* write the trailer, if any. the trailer must be written
537  * before you close the CodecContexts open when you wrote the
538  * header; otherwise write_trailer may try to use memory that
539  * was freed on av_codec_close() */
540  av_write_trailer(oc);
541 
542  /* close each codec */
543  if (video_st)
544  close_video(oc, video_st);
545  if (audio_st)
546  close_audio(oc, audio_st);
547 
548  /* free the streams */
549  for(i = 0; i < oc->nb_streams; i++) {
550  av_freep(&oc->streams[i]->codec);
551  av_freep(&oc->streams[i]);
552  }
553 
554  if (!(fmt->flags & AVFMT_NOFILE)) {
555  /* close the output file */
556  avio_close(oc->pb);
557  }
558 
559  /* free the stream */
560  av_free(oc);
561 
562  return 0;
563 }