Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52867 views
1
/*
2
* Copyright (c) 2000-2003 Fabrice Bellard
3
*
4
* This file is part of FFmpeg.
5
*
6
* FFmpeg is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2.1 of the License, or (at your option) any later version.
10
*
11
* FFmpeg is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
15
*
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with FFmpeg; if not, write to the Free Software
18
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
*/
20
21
/**
22
* @file
23
* multimedia converter based on the FFmpeg libraries
24
*/
25
26
#include "config.h"
27
#include <ctype.h>
28
#include <string.h>
29
#include <math.h>
30
#include <stdlib.h>
31
#include <errno.h>
32
#include <limits.h>
33
#include <stdint.h>
34
35
#if HAVE_IO_H
36
#include <io.h>
37
#endif
38
#if HAVE_UNISTD_H
39
#include <unistd.h>
40
#endif
41
42
#include "libavformat/avformat.h"
43
#include "libavdevice/avdevice.h"
44
#include "libswresample/swresample.h"
45
#include "libavutil/opt.h"
46
#include "libavutil/channel_layout.h"
47
#include "libavutil/parseutils.h"
48
#include "libavutil/samplefmt.h"
49
#include "libavutil/fifo.h"
50
#include "libavutil/internal.h"
51
#include "libavutil/intreadwrite.h"
52
#include "libavutil/dict.h"
53
#include "libavutil/mathematics.h"
54
#include "libavutil/pixdesc.h"
55
#include "libavutil/avstring.h"
56
#include "libavutil/libm.h"
57
#include "libavutil/imgutils.h"
58
#include "libavutil/timestamp.h"
59
#include "libavutil/bprint.h"
60
#include "libavutil/time.h"
61
#include "libavutil/threadmessage.h"
62
#include "libavcodec/mathops.h"
63
#include "libavformat/os_support.h"
64
65
# include "libavfilter/avfilter.h"
66
# include "libavfilter/buffersrc.h"
67
# include "libavfilter/buffersink.h"
68
69
#if HAVE_SYS_RESOURCE_H
70
#include <sys/time.h>
71
#include <sys/types.h>
72
#include <sys/resource.h>
73
#elif HAVE_GETPROCESSTIMES
74
#include <windows.h>
75
#endif
76
#if HAVE_GETPROCESSMEMORYINFO
77
#include <windows.h>
78
#include <psapi.h>
79
#endif
80
#if HAVE_SETCONSOLECTRLHANDLER
81
#include <windows.h>
82
#endif
83
84
85
#if HAVE_SYS_SELECT_H
86
#include <sys/select.h>
87
#endif
88
89
#if HAVE_TERMIOS_H
90
#include <fcntl.h>
91
#include <sys/ioctl.h>
92
#include <sys/time.h>
93
#include <termios.h>
94
#elif HAVE_KBHIT
95
#include <conio.h>
96
#endif
97
98
#if HAVE_PTHREADS
99
#include <pthread.h>
100
#endif
101
102
#include <time.h>
103
104
#include "ffmpeg.h"
105
#include "cmdutils.h"
106
107
#include "libavutil/avassert.h"
108
109
const char program_name[] = "ffmpeg";
110
const int program_birth_year = 2000;
111
112
static FILE *vstats_file;
113
114
const char *const forced_keyframes_const_names[] = {
115
"n",
116
"n_forced",
117
"prev_forced_n",
118
"prev_forced_t",
119
"t",
120
NULL
121
};
122
123
static void do_video_stats(OutputStream *ost, int frame_size);
124
static int64_t getutime(void);
125
static int64_t getmaxrss(void);
126
127
static int run_as_daemon = 0;
128
static int nb_frames_dup = 0;
129
static int nb_frames_drop = 0;
130
static int64_t decode_error_stat[2];
131
132
static int current_time;
133
AVIOContext *progress_avio = NULL;
134
135
static uint8_t *subtitle_out;
136
137
InputStream **input_streams = NULL;
138
int nb_input_streams = 0;
139
InputFile **input_files = NULL;
140
int nb_input_files = 0;
141
142
OutputStream **output_streams = NULL;
143
int nb_output_streams = 0;
144
OutputFile **output_files = NULL;
145
int nb_output_files = 0;
146
147
FilterGraph **filtergraphs;
148
int nb_filtergraphs;
149
150
#if HAVE_TERMIOS_H
151
152
/* init terminal so that we can grab keys */
153
static struct termios oldtty;
154
static int restore_tty;
155
#endif
156
157
#if HAVE_PTHREADS
158
static void free_input_threads(void);
159
#endif
160
161
/* sub2video hack:
162
Convert subtitles to video with alpha to insert them in filter graphs.
163
This is a temporary solution until libavfilter gets real subtitles support.
164
*/
165
166
static int sub2video_get_blank_frame(InputStream *ist)
167
{
168
int ret;
169
AVFrame *frame = ist->sub2video.frame;
170
171
av_frame_unref(frame);
172
ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173
ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174
ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175
if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176
return ret;
177
memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178
return 0;
179
}
180
181
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182
AVSubtitleRect *r)
183
{
184
uint32_t *pal, *dst2;
185
uint8_t *src, *src2;
186
int x, y;
187
188
if (r->type != SUBTITLE_BITMAP) {
189
av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190
return;
191
}
192
if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193
av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194
r->x, r->y, r->w, r->h, w, h
195
);
196
return;
197
}
198
199
dst += r->y * dst_linesize + r->x * 4;
200
src = r->pict.data[0];
201
pal = (uint32_t *)r->pict.data[1];
202
for (y = 0; y < r->h; y++) {
203
dst2 = (uint32_t *)dst;
204
src2 = src;
205
for (x = 0; x < r->w; x++)
206
*(dst2++) = pal[*(src2++)];
207
dst += dst_linesize;
208
src += r->pict.linesize[0];
209
}
210
}
211
212
static void sub2video_push_ref(InputStream *ist, int64_t pts)
213
{
214
AVFrame *frame = ist->sub2video.frame;
215
int i;
216
217
av_assert1(frame->data[0]);
218
ist->sub2video.last_pts = frame->pts = pts;
219
for (i = 0; i < ist->nb_filters; i++)
220
av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221
AV_BUFFERSRC_FLAG_KEEP_REF |
222
AV_BUFFERSRC_FLAG_PUSH);
223
}
224
225
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226
{
227
AVFrame *frame = ist->sub2video.frame;
228
int8_t *dst;
229
int dst_linesize;
230
int num_rects, i;
231
int64_t pts, end_pts;
232
233
if (!frame)
234
return;
235
if (sub) {
236
pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237
AV_TIME_BASE_Q, ist->st->time_base);
238
end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239
AV_TIME_BASE_Q, ist->st->time_base);
240
num_rects = sub->num_rects;
241
} else {
242
pts = ist->sub2video.end_pts;
243
end_pts = INT64_MAX;
244
num_rects = 0;
245
}
246
if (sub2video_get_blank_frame(ist) < 0) {
247
av_log(ist->dec_ctx, AV_LOG_ERROR,
248
"Impossible to get a blank canvas.\n");
249
return;
250
}
251
dst = frame->data [0];
252
dst_linesize = frame->linesize[0];
253
for (i = 0; i < num_rects; i++)
254
sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255
sub2video_push_ref(ist, pts);
256
ist->sub2video.end_pts = end_pts;
257
}
258
259
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260
{
261
InputFile *infile = input_files[ist->file_index];
262
int i, j, nb_reqs;
263
int64_t pts2;
264
265
/* When a frame is read from a file, examine all sub2video streams in
266
the same file and send the sub2video frame again. Otherwise, decoded
267
video frames could be accumulating in the filter graph while a filter
268
(possibly overlay) is desperately waiting for a subtitle frame. */
269
for (i = 0; i < infile->nb_streams; i++) {
270
InputStream *ist2 = input_streams[infile->ist_index + i];
271
if (!ist2->sub2video.frame)
272
continue;
273
/* subtitles seem to be usually muxed ahead of other streams;
274
if not, subtracting a larger time here is necessary */
275
pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276
/* do not send the heartbeat frame if the subtitle is already ahead */
277
if (pts2 <= ist2->sub2video.last_pts)
278
continue;
279
if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280
sub2video_update(ist2, NULL);
281
for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282
nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283
if (nb_reqs)
284
sub2video_push_ref(ist2, pts2);
285
}
286
}
287
288
static void sub2video_flush(InputStream *ist)
289
{
290
int i;
291
292
if (ist->sub2video.end_pts < INT64_MAX)
293
sub2video_update(ist, NULL);
294
for (i = 0; i < ist->nb_filters; i++)
295
av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
296
}
297
298
/* end of sub2video hack */
299
300
static void term_exit_sigsafe(void)
301
{
302
#if HAVE_TERMIOS_H
303
if(restore_tty)
304
tcsetattr (0, TCSANOW, &oldtty);
305
#endif
306
}
307
308
void term_exit(void)
309
{
310
av_log(NULL, AV_LOG_QUIET, "%s", "");
311
term_exit_sigsafe();
312
}
313
314
static volatile int received_sigterm = 0;
315
static volatile int received_nb_signals = 0;
316
static volatile int transcode_init_done = 0;
317
static volatile int ffmpeg_exited = 0;
318
static int main_return_code = 0;
319
320
static void
321
sigterm_handler(int sig)
322
{
323
received_sigterm = sig;
324
received_nb_signals++;
325
term_exit_sigsafe();
326
if(received_nb_signals > 3) {
327
write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328
strlen("Received > 3 system signals, hard exiting\n"));
329
330
exit(123);
331
}
332
}
333
334
#if HAVE_SETCONSOLECTRLHANDLER
335
static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336
{
337
av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338
339
switch (fdwCtrlType)
340
{
341
case CTRL_C_EVENT:
342
case CTRL_BREAK_EVENT:
343
sigterm_handler(SIGINT);
344
return TRUE;
345
346
case CTRL_CLOSE_EVENT:
347
case CTRL_LOGOFF_EVENT:
348
case CTRL_SHUTDOWN_EVENT:
349
sigterm_handler(SIGTERM);
350
/* Basically, with these 3 events, when we return from this method the
351
process is hard terminated, so stall as long as we need to
352
to try and let the main thread(s) clean up and gracefully terminate
353
(we have at most 5 seconds, but should be done far before that). */
354
while (!ffmpeg_exited) {
355
Sleep(0);
356
}
357
return TRUE;
358
359
default:
360
av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361
return FALSE;
362
}
363
}
364
#endif
365
366
void term_init(void)
367
{
368
#if HAVE_TERMIOS_H
369
if(!run_as_daemon){
370
struct termios tty;
371
if (tcgetattr (0, &tty) == 0) {
372
oldtty = tty;
373
restore_tty = 1;
374
375
tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376
|INLCR|IGNCR|ICRNL|IXON);
377
tty.c_oflag |= OPOST;
378
tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379
tty.c_cflag &= ~(CSIZE|PARENB);
380
tty.c_cflag |= CS8;
381
tty.c_cc[VMIN] = 1;
382
tty.c_cc[VTIME] = 0;
383
384
tcsetattr (0, TCSANOW, &tty);
385
}
386
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387
}
388
#endif
389
390
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392
#ifdef SIGXCPU
393
signal(SIGXCPU, sigterm_handler);
394
#endif
395
#if HAVE_SETCONSOLECTRLHANDLER
396
SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397
#endif
398
}
399
400
/* read a key without blocking */
401
static int read_key(void)
402
{
403
unsigned char ch;
404
#if HAVE_TERMIOS_H
405
int n = 1;
406
struct timeval tv;
407
fd_set rfds;
408
409
FD_ZERO(&rfds);
410
FD_SET(0, &rfds);
411
tv.tv_sec = 0;
412
tv.tv_usec = 0;
413
n = select(1, &rfds, NULL, NULL, &tv);
414
if (n > 0) {
415
n = read(0, &ch, 1);
416
if (n == 1)
417
return ch;
418
419
return n;
420
}
421
#elif HAVE_KBHIT
422
# if HAVE_PEEKNAMEDPIPE
423
static int is_pipe;
424
static HANDLE input_handle;
425
DWORD dw, nchars;
426
if(!input_handle){
427
input_handle = GetStdHandle(STD_INPUT_HANDLE);
428
is_pipe = !GetConsoleMode(input_handle, &dw);
429
}
430
431
if (is_pipe) {
432
/* When running under a GUI, you will end here. */
433
if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434
// input pipe may have been closed by the program that ran ffmpeg
435
return -1;
436
}
437
//Read it
438
if(nchars != 0) {
439
read(0, &ch, 1);
440
return ch;
441
}else{
442
return -1;
443
}
444
}
445
# endif
446
if(kbhit())
447
return(getch());
448
#endif
449
return -1;
450
}
451
452
static int decode_interrupt_cb(void *ctx)
453
{
454
return received_nb_signals > transcode_init_done;
455
}
456
457
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
458
459
static void ffmpeg_cleanup(int ret)
460
{
461
int i, j;
462
463
if (do_benchmark) {
464
int maxrss = getmaxrss() / 1024;
465
av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466
}
467
468
for (i = 0; i < nb_filtergraphs; i++) {
469
FilterGraph *fg = filtergraphs[i];
470
avfilter_graph_free(&fg->graph);
471
for (j = 0; j < fg->nb_inputs; j++) {
472
av_freep(&fg->inputs[j]->name);
473
av_freep(&fg->inputs[j]);
474
}
475
av_freep(&fg->inputs);
476
for (j = 0; j < fg->nb_outputs; j++) {
477
av_freep(&fg->outputs[j]->name);
478
av_freep(&fg->outputs[j]);
479
}
480
av_freep(&fg->outputs);
481
av_freep(&fg->graph_desc);
482
483
av_freep(&filtergraphs[i]);
484
}
485
av_freep(&filtergraphs);
486
487
av_freep(&subtitle_out);
488
489
/* close files */
490
for (i = 0; i < nb_output_files; i++) {
491
OutputFile *of = output_files[i];
492
AVFormatContext *s;
493
if (!of)
494
continue;
495
s = of->ctx;
496
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497
avio_closep(&s->pb);
498
avformat_free_context(s);
499
av_dict_free(&of->opts);
500
501
av_freep(&output_files[i]);
502
}
503
for (i = 0; i < nb_output_streams; i++) {
504
OutputStream *ost = output_streams[i];
505
AVBitStreamFilterContext *bsfc;
506
507
if (!ost)
508
continue;
509
510
bsfc = ost->bitstream_filters;
511
while (bsfc) {
512
AVBitStreamFilterContext *next = bsfc->next;
513
av_bitstream_filter_close(bsfc);
514
bsfc = next;
515
}
516
ost->bitstream_filters = NULL;
517
av_frame_free(&ost->filtered_frame);
518
av_frame_free(&ost->last_frame);
519
520
av_parser_close(ost->parser);
521
522
av_freep(&ost->forced_keyframes);
523
av_expr_free(ost->forced_keyframes_pexpr);
524
av_freep(&ost->avfilter);
525
av_freep(&ost->logfile_prefix);
526
527
av_freep(&ost->audio_channels_map);
528
ost->audio_channels_mapped = 0;
529
530
av_dict_free(&ost->sws_dict);
531
532
avcodec_free_context(&ost->enc_ctx);
533
534
av_freep(&output_streams[i]);
535
}
536
#if HAVE_PTHREADS
537
free_input_threads();
538
#endif
539
for (i = 0; i < nb_input_files; i++) {
540
avformat_close_input(&input_files[i]->ctx);
541
av_freep(&input_files[i]);
542
}
543
for (i = 0; i < nb_input_streams; i++) {
544
InputStream *ist = input_streams[i];
545
546
av_frame_free(&ist->decoded_frame);
547
av_frame_free(&ist->filter_frame);
548
av_dict_free(&ist->decoder_opts);
549
avsubtitle_free(&ist->prev_sub.subtitle);
550
av_frame_free(&ist->sub2video.frame);
551
av_freep(&ist->filters);
552
av_freep(&ist->hwaccel_device);
553
554
avcodec_free_context(&ist->dec_ctx);
555
556
av_freep(&input_streams[i]);
557
}
558
559
if (vstats_file) {
560
if (fclose(vstats_file))
561
av_log(NULL, AV_LOG_ERROR,
562
"Error closing vstats file, loss of information possible: %s\n",
563
av_err2str(AVERROR(errno)));
564
}
565
av_freep(&vstats_filename);
566
567
av_freep(&input_streams);
568
av_freep(&input_files);
569
av_freep(&output_streams);
570
av_freep(&output_files);
571
572
uninit_opts();
573
574
avformat_network_deinit();
575
576
if (received_sigterm) {
577
av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578
(int) received_sigterm);
579
} else if (ret && transcode_init_done) {
580
av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581
}
582
term_exit();
583
ffmpeg_exited = 1;
584
}
585
586
void remove_avoptions(AVDictionary **a, AVDictionary *b)
587
{
588
AVDictionaryEntry *t = NULL;
589
590
while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591
av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
592
}
593
}
594
595
void assert_avoptions(AVDictionary *m)
596
{
597
AVDictionaryEntry *t;
598
if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599
av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600
exit_program(1);
601
}
602
}
603
604
static void abort_codec_experimental(AVCodec *c, int encoder)
605
{
606
exit_program(1);
607
}
608
609
static void update_benchmark(const char *fmt, ...)
610
{
611
if (do_benchmark_all) {
612
int64_t t = getutime();
613
va_list va;
614
char buf[1024];
615
616
if (fmt) {
617
va_start(va, fmt);
618
vsnprintf(buf, sizeof(buf), fmt, va);
619
va_end(va);
620
av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621
}
622
current_time = t;
623
}
624
}
625
626
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627
{
628
int i;
629
for (i = 0; i < nb_output_streams; i++) {
630
OutputStream *ost2 = output_streams[i];
631
ost2->finished |= ost == ost2 ? this_stream : others;
632
}
633
}
634
635
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
636
{
637
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638
AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639
int ret;
640
641
if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642
ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643
if (ost->st->codec->extradata) {
644
memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645
ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
646
}
647
}
648
649
if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650
(avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652
653
/*
654
* Audio encoders may split the packets -- #frames in != #packets out.
655
* But there is no reordering, so we can limit the number of output packets
656
* by simply dropping them here.
657
* Counting encoded video frames needs to be done separately because of
658
* reordering, see do_video_out()
659
*/
660
if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661
if (ost->frame_number >= ost->max_frames) {
662
av_packet_unref(pkt);
663
return;
664
}
665
ost->frame_number++;
666
}
667
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668
int i;
669
uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
670
NULL);
671
ost->quality = sd ? AV_RL32(sd) : -1;
672
ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
673
674
for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
675
if (sd && i < sd[5])
676
ost->error[i] = AV_RL64(sd + 8 + 8*i);
677
else
678
ost->error[i] = -1;
679
}
680
681
if (ost->frame_rate.num && ost->is_cfr) {
682
if (pkt->duration > 0)
683
av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684
pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
685
ost->st->time_base);
686
}
687
}
688
689
if (bsfc)
690
av_packet_split_side_data(pkt);
691
692
if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693
print_error("", ret);
694
if (exit_on_error)
695
exit_program(1);
696
}
697
698
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
699
if (pkt->dts != AV_NOPTS_VALUE &&
700
pkt->pts != AV_NOPTS_VALUE &&
701
pkt->dts > pkt->pts) {
702
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
703
pkt->dts, pkt->pts,
704
ost->file_index, ost->st->index);
705
pkt->pts =
706
pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
707
- FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
708
- FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
709
}
710
if(
711
(avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
712
pkt->dts != AV_NOPTS_VALUE &&
713
ost->last_mux_dts != AV_NOPTS_VALUE) {
714
int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
715
if (pkt->dts < max) {
716
int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
717
av_log(s, loglevel, "Non-monotonous DTS in output stream "
718
"%d:%d; previous: %"PRId64", current: %"PRId64"; ",
719
ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
720
if (exit_on_error) {
721
av_log(NULL, AV_LOG_FATAL, "aborting.\n");
722
exit_program(1);
723
}
724
av_log(s, loglevel, "changing to %"PRId64". This may result "
725
"in incorrect timestamps in the output file.\n",
726
max);
727
if(pkt->pts >= pkt->dts)
728
pkt->pts = FFMAX(pkt->pts, max);
729
pkt->dts = max;
730
}
731
}
732
}
733
ost->last_mux_dts = pkt->dts;
734
735
ost->data_size += pkt->size;
736
ost->packets_written++;
737
738
pkt->stream_index = ost->index;
739
740
if (debug_ts) {
741
av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
742
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
743
av_get_media_type_string(ost->enc_ctx->codec_type),
744
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
745
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
746
pkt->size
747
);
748
}
749
750
ret = av_interleaved_write_frame(s, pkt);
751
if (ret < 0) {
752
print_error("av_interleaved_write_frame()", ret);
753
main_return_code = 1;
754
close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
755
}
756
av_packet_unref(pkt);
757
}
758
759
static void close_output_stream(OutputStream *ost)
760
{
761
OutputFile *of = output_files[ost->file_index];
762
763
ost->finished |= ENCODER_FINISHED;
764
if (of->shortest) {
765
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
766
of->recording_time = FFMIN(of->recording_time, end);
767
}
768
}
769
770
static int check_recording_time(OutputStream *ost)
771
{
772
OutputFile *of = output_files[ost->file_index];
773
774
if (of->recording_time != INT64_MAX &&
775
av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
776
AV_TIME_BASE_Q) >= 0) {
777
close_output_stream(ost);
778
return 0;
779
}
780
return 1;
781
}
782
783
static void do_audio_out(AVFormatContext *s, OutputStream *ost,
784
AVFrame *frame)
785
{
786
AVCodecContext *enc = ost->enc_ctx;
787
AVPacket pkt;
788
int got_packet = 0;
789
790
av_init_packet(&pkt);
791
pkt.data = NULL;
792
pkt.size = 0;
793
794
if (!check_recording_time(ost))
795
return;
796
797
if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
798
frame->pts = ost->sync_opts;
799
ost->sync_opts = frame->pts + frame->nb_samples;
800
ost->samples_encoded += frame->nb_samples;
801
ost->frames_encoded++;
802
803
av_assert0(pkt.size || !pkt.data);
804
update_benchmark(NULL);
805
if (debug_ts) {
806
av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
807
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
808
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
809
enc->time_base.num, enc->time_base.den);
810
}
811
812
if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
813
av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
814
exit_program(1);
815
}
816
update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
817
818
if (got_packet) {
819
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
820
821
if (debug_ts) {
822
av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
823
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
824
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
825
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
826
}
827
828
write_frame(s, &pkt, ost);
829
}
830
}
831
832
static void do_subtitle_out(AVFormatContext *s,
833
OutputStream *ost,
834
InputStream *ist,
835
AVSubtitle *sub)
836
{
837
int subtitle_out_max_size = 1024 * 1024;
838
int subtitle_out_size, nb, i;
839
AVCodecContext *enc;
840
AVPacket pkt;
841
int64_t pts;
842
843
if (sub->pts == AV_NOPTS_VALUE) {
844
av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
845
if (exit_on_error)
846
exit_program(1);
847
return;
848
}
849
850
enc = ost->enc_ctx;
851
852
if (!subtitle_out) {
853
subtitle_out = av_malloc(subtitle_out_max_size);
854
if (!subtitle_out) {
855
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
856
exit_program(1);
857
}
858
}
859
860
/* Note: DVB subtitle need one packet to draw them and one other
861
packet to clear them */
862
/* XXX: signal it in the codec context ? */
863
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
864
nb = 2;
865
else
866
nb = 1;
867
868
/* shift timestamp to honor -ss and make check_recording_time() work with -t */
869
pts = sub->pts;
870
if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
871
pts -= output_files[ost->file_index]->start_time;
872
for (i = 0; i < nb; i++) {
873
unsigned save_num_rects = sub->num_rects;
874
875
ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
876
if (!check_recording_time(ost))
877
return;
878
879
sub->pts = pts;
880
// start_display_time is required to be 0
881
sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
882
sub->end_display_time -= sub->start_display_time;
883
sub->start_display_time = 0;
884
if (i == 1)
885
sub->num_rects = 0;
886
887
ost->frames_encoded++;
888
889
subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
890
subtitle_out_max_size, sub);
891
if (i == 1)
892
sub->num_rects = save_num_rects;
893
if (subtitle_out_size < 0) {
894
av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
895
exit_program(1);
896
}
897
898
av_init_packet(&pkt);
899
pkt.data = subtitle_out;
900
pkt.size = subtitle_out_size;
901
pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
902
pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
903
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
904
/* XXX: the pts correction is handled here. Maybe handling
905
it in the codec would be better */
906
if (i == 0)
907
pkt.pts += 90 * sub->start_display_time;
908
else
909
pkt.pts += 90 * sub->end_display_time;
910
}
911
pkt.dts = pkt.pts;
912
write_frame(s, &pkt, ost);
913
}
914
}
915
916
static void do_video_out(AVFormatContext *s,
917
OutputStream *ost,
918
AVFrame *next_picture,
919
double sync_ipts)
920
{
921
int ret, format_video_sync;
922
AVPacket pkt;
923
AVCodecContext *enc = ost->enc_ctx;
924
AVCodecContext *mux_enc = ost->st->codec;
925
int nb_frames, nb0_frames, i;
926
double delta, delta0;
927
double duration = 0;
928
int frame_size = 0;
929
InputStream *ist = NULL;
930
AVFilterContext *filter = ost->filter->filter;
931
932
if (ost->source_index >= 0)
933
ist = input_streams[ost->source_index];
934
935
if (filter->inputs[0]->frame_rate.num > 0 &&
936
filter->inputs[0]->frame_rate.den > 0)
937
duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
938
939
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
940
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
941
942
if (!ost->filters_script &&
943
!ost->filters &&
944
next_picture &&
945
ist &&
946
lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
947
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
948
}
949
950
if (!next_picture) {
951
//end, flushing
952
nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
953
ost->last_nb0_frames[1],
954
ost->last_nb0_frames[2]);
955
} else {
956
delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
957
delta = delta0 + duration;
958
959
/* by default, we output a single frame */
960
nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
961
nb_frames = 1;
962
963
format_video_sync = video_sync_method;
964
if (format_video_sync == VSYNC_AUTO) {
965
if(!strcmp(s->oformat->name, "avi")) {
966
format_video_sync = VSYNC_VFR;
967
} else
968
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
969
if ( ist
970
&& format_video_sync == VSYNC_CFR
971
&& input_files[ist->file_index]->ctx->nb_streams == 1
972
&& input_files[ist->file_index]->input_ts_offset == 0) {
973
format_video_sync = VSYNC_VSCFR;
974
}
975
if (format_video_sync == VSYNC_CFR && copy_ts) {
976
format_video_sync = VSYNC_VSCFR;
977
}
978
}
979
ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
980
981
if (delta0 < 0 &&
982
delta > 0 &&
983
format_video_sync != VSYNC_PASSTHROUGH &&
984
format_video_sync != VSYNC_DROP) {
985
if (delta0 < -0.6) {
986
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
987
} else
988
av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
989
sync_ipts = ost->sync_opts;
990
duration += delta0;
991
delta0 = 0;
992
}
993
994
switch (format_video_sync) {
995
case VSYNC_VSCFR:
996
if (ost->frame_number == 0 && delta0 >= 0.5) {
997
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
998
delta = duration;
999
delta0 = 0;
1000
ost->sync_opts = lrint(sync_ipts);
1001
}
1002
case VSYNC_CFR:
1003
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1004
if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1005
nb_frames = 0;
1006
} else if (delta < -1.1)
1007
nb_frames = 0;
1008
else if (delta > 1.1) {
1009
nb_frames = lrintf(delta);
1010
if (delta0 > 1.1)
1011
nb0_frames = lrintf(delta0 - 0.6);
1012
}
1013
break;
1014
case VSYNC_VFR:
1015
if (delta <= -0.6)
1016
nb_frames = 0;
1017
else if (delta > 0.6)
1018
ost->sync_opts = lrint(sync_ipts);
1019
break;
1020
case VSYNC_DROP:
1021
case VSYNC_PASSTHROUGH:
1022
ost->sync_opts = lrint(sync_ipts);
1023
break;
1024
default:
1025
av_assert0(0);
1026
}
1027
}
1028
1029
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1030
nb0_frames = FFMIN(nb0_frames, nb_frames);
1031
1032
memmove(ost->last_nb0_frames + 1,
1033
ost->last_nb0_frames,
1034
sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1035
ost->last_nb0_frames[0] = nb0_frames;
1036
1037
if (nb0_frames == 0 && ost->last_dropped) {
1038
nb_frames_drop++;
1039
av_log(NULL, AV_LOG_VERBOSE,
1040
"*** dropping frame %d from stream %d at ts %"PRId64"\n",
1041
ost->frame_number, ost->st->index, ost->last_frame->pts);
1042
}
1043
if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1044
if (nb_frames > dts_error_threshold * 30) {
1045
av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1046
nb_frames_drop++;
1047
return;
1048
}
1049
nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1050
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1051
}
1052
ost->last_dropped = nb_frames == nb0_frames && next_picture;
1053
1054
/* duplicates frame if needed */
1055
for (i = 0; i < nb_frames; i++) {
1056
AVFrame *in_picture;
1057
av_init_packet(&pkt);
1058
pkt.data = NULL;
1059
pkt.size = 0;
1060
1061
if (i < nb0_frames && ost->last_frame) {
1062
in_picture = ost->last_frame;
1063
} else
1064
in_picture = next_picture;
1065
1066
if (!in_picture)
1067
return;
1068
1069
in_picture->pts = ost->sync_opts;
1070
1071
#if 1
1072
if (!check_recording_time(ost))
1073
#else
1074
if (ost->frame_number >= ost->max_frames)
1075
#endif
1076
return;
1077
1078
#if FF_API_LAVF_FMT_RAWPICTURE
1079
if (s->oformat->flags & AVFMT_RAWPICTURE &&
1080
enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1081
/* raw pictures are written as AVPicture structure to
1082
avoid any copies. We support temporarily the older
1083
method. */
1084
if (in_picture->interlaced_frame)
1085
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1086
else
1087
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1088
pkt.data = (uint8_t *)in_picture;
1089
pkt.size = sizeof(AVPicture);
1090
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1091
pkt.flags |= AV_PKT_FLAG_KEY;
1092
1093
write_frame(s, &pkt, ost);
1094
} else
1095
#endif
1096
{
1097
int got_packet, forced_keyframe = 0;
1098
double pts_time;
1099
1100
if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1101
ost->top_field_first >= 0)
1102
in_picture->top_field_first = !!ost->top_field_first;
1103
1104
if (in_picture->interlaced_frame) {
1105
if (enc->codec->id == AV_CODEC_ID_MJPEG)
1106
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1107
else
1108
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1109
} else
1110
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1111
1112
in_picture->quality = enc->global_quality;
1113
in_picture->pict_type = 0;
1114
1115
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1116
in_picture->pts * av_q2d(enc->time_base) : NAN;
1117
if (ost->forced_kf_index < ost->forced_kf_count &&
1118
in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1119
ost->forced_kf_index++;
1120
forced_keyframe = 1;
1121
} else if (ost->forced_keyframes_pexpr) {
1122
double res;
1123
ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1124
res = av_expr_eval(ost->forced_keyframes_pexpr,
1125
ost->forced_keyframes_expr_const_values, NULL);
1126
ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1127
ost->forced_keyframes_expr_const_values[FKF_N],
1128
ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1129
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1130
ost->forced_keyframes_expr_const_values[FKF_T],
1131
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1132
res);
1133
if (res) {
1134
forced_keyframe = 1;
1135
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1136
ost->forced_keyframes_expr_const_values[FKF_N];
1137
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1138
ost->forced_keyframes_expr_const_values[FKF_T];
1139
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1140
}
1141
1142
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1143
} else if ( ost->forced_keyframes
1144
&& !strncmp(ost->forced_keyframes, "source", 6)
1145
&& in_picture->key_frame==1) {
1146
forced_keyframe = 1;
1147
}
1148
1149
if (forced_keyframe) {
1150
in_picture->pict_type = AV_PICTURE_TYPE_I;
1151
av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1152
}
1153
1154
update_benchmark(NULL);
1155
if (debug_ts) {
1156
av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1157
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1158
av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1159
enc->time_base.num, enc->time_base.den);
1160
}
1161
1162
ost->frames_encoded++;
1163
1164
ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1165
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1166
if (ret < 0) {
1167
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1168
exit_program(1);
1169
}
1170
1171
if (got_packet) {
1172
if (debug_ts) {
1173
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1174
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1175
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1176
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1177
}
1178
1179
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1180
pkt.pts = ost->sync_opts;
1181
1182
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1183
1184
if (debug_ts) {
1185
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1188
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1189
}
1190
1191
frame_size = pkt.size;
1192
write_frame(s, &pkt, ost);
1193
1194
/* if two pass, output log */
1195
if (ost->logfile && enc->stats_out) {
1196
fprintf(ost->logfile, "%s", enc->stats_out);
1197
}
1198
}
1199
}
1200
ost->sync_opts++;
1201
/*
1202
* For video, number of frames in == number of packets out.
1203
* But there may be reordering, so we can't throw away frames on encoder
1204
* flush, we need to limit them here, before they go into encoder.
1205
*/
1206
ost->frame_number++;
1207
1208
if (vstats_filename && frame_size)
1209
do_video_stats(ost, frame_size);
1210
}
1211
1212
if (!ost->last_frame)
1213
ost->last_frame = av_frame_alloc();
1214
av_frame_unref(ost->last_frame);
1215
if (next_picture && ost->last_frame)
1216
av_frame_ref(ost->last_frame, next_picture);
1217
else
1218
av_frame_free(&ost->last_frame);
1219
}
1220
1221
static double psnr(double d)
1222
{
1223
return -10.0 * log10(d);
1224
}
1225
1226
static void do_video_stats(OutputStream *ost, int frame_size)
1227
{
1228
AVCodecContext *enc;
1229
int frame_number;
1230
double ti1, bitrate, avg_bitrate;
1231
1232
/* this is executed just the first time do_video_stats is called */
1233
if (!vstats_file) {
1234
vstats_file = fopen(vstats_filename, "w");
1235
if (!vstats_file) {
1236
perror("fopen");
1237
exit_program(1);
1238
}
1239
}
1240
1241
enc = ost->enc_ctx;
1242
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1243
frame_number = ost->st->nb_frames;
1244
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1245
ost->quality / (float)FF_QP2LAMBDA);
1246
1247
if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1248
fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1249
1250
fprintf(vstats_file,"f_size= %6d ", frame_size);
1251
/* compute pts value */
1252
ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1253
if (ti1 < 0.01)
1254
ti1 = 0.01;
1255
1256
bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1257
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1258
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1259
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1260
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1261
}
1262
}
1263
1264
static void finish_output_stream(OutputStream *ost)
1265
{
1266
OutputFile *of = output_files[ost->file_index];
1267
int i;
1268
1269
ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1270
1271
if (of->shortest) {
1272
for (i = 0; i < of->ctx->nb_streams; i++)
1273
output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1274
}
1275
}
1276
1277
/**
1278
* Get and encode new output from any of the filtergraphs, without causing
1279
* activity.
1280
*
1281
* @return 0 for success, <0 for severe errors
1282
*/
1283
static int reap_filters(int flush)
1284
{
1285
AVFrame *filtered_frame = NULL;
1286
int i;
1287
1288
/* Reap all buffers present in the buffer sinks */
1289
for (i = 0; i < nb_output_streams; i++) {
1290
OutputStream *ost = output_streams[i];
1291
OutputFile *of = output_files[ost->file_index];
1292
AVFilterContext *filter;
1293
AVCodecContext *enc = ost->enc_ctx;
1294
int ret = 0;
1295
1296
if (!ost->filter)
1297
continue;
1298
filter = ost->filter->filter;
1299
1300
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1301
return AVERROR(ENOMEM);
1302
}
1303
filtered_frame = ost->filtered_frame;
1304
1305
while (1) {
1306
double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1307
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1308
AV_BUFFERSINK_FLAG_NO_REQUEST);
1309
if (ret < 0) {
1310
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1311
av_log(NULL, AV_LOG_WARNING,
1312
"Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1313
} else if (flush && ret == AVERROR_EOF) {
1314
if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1315
do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1316
}
1317
break;
1318
}
1319
if (ost->finished) {
1320
av_frame_unref(filtered_frame);
1321
continue;
1322
}
1323
if (filtered_frame->pts != AV_NOPTS_VALUE) {
1324
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1325
AVRational tb = enc->time_base;
1326
int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1327
1328
tb.den <<= extra_bits;
1329
float_pts =
1330
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1331
av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1332
float_pts /= 1 << extra_bits;
1333
// avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1334
float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1335
1336
filtered_frame->pts =
1337
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1338
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1339
}
1340
//if (ost->source_index >= 0)
1341
// *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1342
1343
switch (filter->inputs[0]->type) {
1344
case AVMEDIA_TYPE_VIDEO:
1345
if (!ost->frame_aspect_ratio.num)
1346
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1347
1348
if (debug_ts) {
1349
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1350
av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1351
float_pts,
1352
enc->time_base.num, enc->time_base.den);
1353
}
1354
1355
do_video_out(of->ctx, ost, filtered_frame, float_pts);
1356
break;
1357
case AVMEDIA_TYPE_AUDIO:
1358
if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1359
enc->channels != av_frame_get_channels(filtered_frame)) {
1360
av_log(NULL, AV_LOG_ERROR,
1361
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1362
break;
1363
}
1364
do_audio_out(of->ctx, ost, filtered_frame);
1365
break;
1366
default:
1367
// TODO support subtitle filters
1368
av_assert0(0);
1369
}
1370
1371
av_frame_unref(filtered_frame);
1372
}
1373
}
1374
1375
return 0;
1376
}
1377
1378
static void print_final_stats(int64_t total_size)
1379
{
1380
uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1381
uint64_t subtitle_size = 0;
1382
uint64_t data_size = 0;
1383
float percent = -1.0;
1384
int i, j;
1385
int pass1_used = 1;
1386
1387
for (i = 0; i < nb_output_streams; i++) {
1388
OutputStream *ost = output_streams[i];
1389
switch (ost->enc_ctx->codec_type) {
1390
case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1391
case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1392
case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1393
default: other_size += ost->data_size; break;
1394
}
1395
extra_size += ost->enc_ctx->extradata_size;
1396
data_size += ost->data_size;
1397
if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1398
!= AV_CODEC_FLAG_PASS1)
1399
pass1_used = 0;
1400
}
1401
1402
if (data_size && total_size>0 && total_size >= data_size)
1403
percent = 100.0 * (total_size - data_size) / data_size;
1404
1405
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1406
video_size / 1024.0,
1407
audio_size / 1024.0,
1408
subtitle_size / 1024.0,
1409
other_size / 1024.0,
1410
extra_size / 1024.0);
1411
if (percent >= 0.0)
1412
av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1413
else
1414
av_log(NULL, AV_LOG_INFO, "unknown");
1415
av_log(NULL, AV_LOG_INFO, "\n");
1416
1417
/* print verbose per-stream stats */
1418
for (i = 0; i < nb_input_files; i++) {
1419
InputFile *f = input_files[i];
1420
uint64_t total_packets = 0, total_size = 0;
1421
1422
av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1423
i, f->ctx->filename);
1424
1425
for (j = 0; j < f->nb_streams; j++) {
1426
InputStream *ist = input_streams[f->ist_index + j];
1427
enum AVMediaType type = ist->dec_ctx->codec_type;
1428
1429
total_size += ist->data_size;
1430
total_packets += ist->nb_packets;
1431
1432
av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1433
i, j, media_type_string(type));
1434
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1435
ist->nb_packets, ist->data_size);
1436
1437
if (ist->decoding_needed) {
1438
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1439
ist->frames_decoded);
1440
if (type == AVMEDIA_TYPE_AUDIO)
1441
av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1442
av_log(NULL, AV_LOG_VERBOSE, "; ");
1443
}
1444
1445
av_log(NULL, AV_LOG_VERBOSE, "\n");
1446
}
1447
1448
av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1449
total_packets, total_size);
1450
}
1451
1452
for (i = 0; i < nb_output_files; i++) {
1453
OutputFile *of = output_files[i];
1454
uint64_t total_packets = 0, total_size = 0;
1455
1456
av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1457
i, of->ctx->filename);
1458
1459
for (j = 0; j < of->ctx->nb_streams; j++) {
1460
OutputStream *ost = output_streams[of->ost_index + j];
1461
enum AVMediaType type = ost->enc_ctx->codec_type;
1462
1463
total_size += ost->data_size;
1464
total_packets += ost->packets_written;
1465
1466
av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1467
i, j, media_type_string(type));
1468
if (ost->encoding_needed) {
1469
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1470
ost->frames_encoded);
1471
if (type == AVMEDIA_TYPE_AUDIO)
1472
av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1473
av_log(NULL, AV_LOG_VERBOSE, "; ");
1474
}
1475
1476
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1477
ost->packets_written, ost->data_size);
1478
1479
av_log(NULL, AV_LOG_VERBOSE, "\n");
1480
}
1481
1482
av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1483
total_packets, total_size);
1484
}
1485
if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1486
av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1487
if (pass1_used) {
1488
av_log(NULL, AV_LOG_WARNING, "\n");
1489
} else {
1490
av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1491
}
1492
}
1493
}
1494
1495
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1496
{
1497
char buf[1024];
1498
AVBPrint buf_script;
1499
OutputStream *ost;
1500
AVFormatContext *oc;
1501
int64_t total_size;
1502
AVCodecContext *enc;
1503
int frame_number, vid, i;
1504
double bitrate;
1505
double speed;
1506
int64_t pts = INT64_MIN + 1;
1507
static int64_t last_time = -1;
1508
static int qp_histogram[52];
1509
int hours, mins, secs, us;
1510
int ret;
1511
float t;
1512
1513
if (!print_stats && !is_last_report && !progress_avio)
1514
return;
1515
1516
if (!is_last_report) {
1517
if (last_time == -1) {
1518
last_time = cur_time;
1519
return;
1520
}
1521
if ((cur_time - last_time) < 500000)
1522
return;
1523
last_time = cur_time;
1524
}
1525
1526
t = (cur_time-timer_start) / 1000000.0;
1527
1528
1529
oc = output_files[0]->ctx;
1530
1531
total_size = avio_size(oc->pb);
1532
if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1533
total_size = avio_tell(oc->pb);
1534
1535
buf[0] = '\0';
1536
vid = 0;
1537
av_bprint_init(&buf_script, 0, 1);
1538
for (i = 0; i < nb_output_streams; i++) {
1539
float q = -1;
1540
ost = output_streams[i];
1541
enc = ost->enc_ctx;
1542
if (!ost->stream_copy)
1543
q = ost->quality / (float) FF_QP2LAMBDA;
1544
1545
if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1547
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1548
ost->file_index, ost->index, q);
1549
}
1550
if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1551
float fps;
1552
1553
frame_number = ost->frame_number;
1554
fps = t > 1 ? frame_number / t : 0;
1555
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1556
frame_number, fps < 9.95, fps, q);
1557
av_bprintf(&buf_script, "frame=%d\n", frame_number);
1558
av_bprintf(&buf_script, "fps=%.1f\n", fps);
1559
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560
ost->file_index, ost->index, q);
1561
if (is_last_report)
1562
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1563
if (qp_hist) {
1564
int j;
1565
int qp = lrintf(q);
1566
if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1567
qp_histogram[qp]++;
1568
for (j = 0; j < 32; j++)
1569
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1570
}
1571
1572
if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1573
int j;
1574
double error, error_sum = 0;
1575
double scale, scale_sum = 0;
1576
double p;
1577
char type[3] = { 'Y','U','V' };
1578
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1579
for (j = 0; j < 3; j++) {
1580
if (is_last_report) {
1581
error = enc->error[j];
1582
scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1583
} else {
1584
error = ost->error[j];
1585
scale = enc->width * enc->height * 255.0 * 255.0;
1586
}
1587
if (j)
1588
scale /= 4;
1589
error_sum += error;
1590
scale_sum += scale;
1591
p = psnr(error / scale);
1592
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1593
av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1594
ost->file_index, ost->index, type[j] | 32, p);
1595
}
1596
p = psnr(error_sum / scale_sum);
1597
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1598
av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1599
ost->file_index, ost->index, p);
1600
}
1601
vid = 1;
1602
}
1603
/* compute min output value */
1604
if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1605
pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1606
ost->st->time_base, AV_TIME_BASE_Q));
1607
if (is_last_report)
1608
nb_frames_drop += ost->last_dropped;
1609
}
1610
1611
secs = FFABS(pts) / AV_TIME_BASE;
1612
us = FFABS(pts) % AV_TIME_BASE;
1613
mins = secs / 60;
1614
secs %= 60;
1615
hours = mins / 60;
1616
mins %= 60;
1617
1618
bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1619
speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1620
1621
if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1622
"size=N/A time=");
1623
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624
"size=%8.0fkB time=", total_size / 1024.0);
1625
if (pts < 0)
1626
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628
"%02d:%02d:%02d.%02d ", hours, mins, secs,
1629
(100 * us) / AV_TIME_BASE);
1630
1631
if (bitrate < 0) {
1632
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633
av_bprintf(&buf_script, "bitrate=N/A\n");
1634
}else{
1635
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636
av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1637
}
1638
1639
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642
av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643
hours, mins, secs, us);
1644
1645
if (nb_frames_dup || nb_frames_drop)
1646
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1647
nb_frames_dup, nb_frames_drop);
1648
av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1650
1651
if (speed < 0) {
1652
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1653
av_bprintf(&buf_script, "speed=N/A\n");
1654
} else {
1655
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1656
av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1657
}
1658
1659
if (print_stats || is_last_report) {
1660
const char end = is_last_report ? '\n' : '\r';
1661
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662
fprintf(stderr, "%s %c", buf, end);
1663
} else
1664
av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1665
1666
fflush(stderr);
1667
}
1668
1669
if (progress_avio) {
1670
av_bprintf(&buf_script, "progress=%s\n",
1671
is_last_report ? "end" : "continue");
1672
avio_write(progress_avio, buf_script.str,
1673
FFMIN(buf_script.len, buf_script.size - 1));
1674
avio_flush(progress_avio);
1675
av_bprint_finalize(&buf_script, NULL);
1676
if (is_last_report) {
1677
if ((ret = avio_closep(&progress_avio)) < 0)
1678
av_log(NULL, AV_LOG_ERROR,
1679
"Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1680
}
1681
}
1682
1683
if (is_last_report)
1684
print_final_stats(total_size);
1685
}
1686
1687
static void flush_encoders(void)
1688
{
1689
int i, ret;
1690
1691
for (i = 0; i < nb_output_streams; i++) {
1692
OutputStream *ost = output_streams[i];
1693
AVCodecContext *enc = ost->enc_ctx;
1694
AVFormatContext *os = output_files[ost->file_index]->ctx;
1695
int stop_encoding = 0;
1696
1697
if (!ost->encoding_needed)
1698
continue;
1699
1700
if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1701
continue;
1702
#if FF_API_LAVF_FMT_RAWPICTURE
1703
if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1704
continue;
1705
#endif
1706
1707
for (;;) {
1708
int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1709
const char *desc;
1710
1711
switch (enc->codec_type) {
1712
case AVMEDIA_TYPE_AUDIO:
1713
encode = avcodec_encode_audio2;
1714
desc = "audio";
1715
break;
1716
case AVMEDIA_TYPE_VIDEO:
1717
encode = avcodec_encode_video2;
1718
desc = "video";
1719
break;
1720
default:
1721
stop_encoding = 1;
1722
}
1723
1724
if (encode) {
1725
AVPacket pkt;
1726
int pkt_size;
1727
int got_packet;
1728
av_init_packet(&pkt);
1729
pkt.data = NULL;
1730
pkt.size = 0;
1731
1732
update_benchmark(NULL);
1733
ret = encode(enc, &pkt, NULL, &got_packet);
1734
update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1735
if (ret < 0) {
1736
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1737
desc,
1738
av_err2str(ret));
1739
exit_program(1);
1740
}
1741
if (ost->logfile && enc->stats_out) {
1742
fprintf(ost->logfile, "%s", enc->stats_out);
1743
}
1744
if (!got_packet) {
1745
stop_encoding = 1;
1746
break;
1747
}
1748
if (ost->finished & MUXER_FINISHED) {
1749
av_packet_unref(&pkt);
1750
continue;
1751
}
1752
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1753
pkt_size = pkt.size;
1754
write_frame(os, &pkt, ost);
1755
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1756
do_video_stats(ost, pkt_size);
1757
}
1758
}
1759
1760
if (stop_encoding)
1761
break;
1762
}
1763
}
1764
}
1765
1766
/*
1767
* Check whether a packet from ist should be written into ost at this time
1768
*/
1769
static int check_output_constraints(InputStream *ist, OutputStream *ost)
1770
{
1771
OutputFile *of = output_files[ost->file_index];
1772
int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1773
1774
if (ost->source_index != ist_index)
1775
return 0;
1776
1777
if (ost->finished)
1778
return 0;
1779
1780
if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1781
return 0;
1782
1783
return 1;
1784
}
1785
1786
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1787
{
1788
OutputFile *of = output_files[ost->file_index];
1789
InputFile *f = input_files [ist->file_index];
1790
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1791
int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1792
AVPicture pict;
1793
AVPacket opkt;
1794
1795
av_init_packet(&opkt);
1796
1797
if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1798
!ost->copy_initial_nonkeyframes)
1799
return;
1800
1801
if (!ost->frame_number && !ost->copy_prior_start) {
1802
int64_t comp_start = start_time;
1803
if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1804
comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1805
if (pkt->pts == AV_NOPTS_VALUE ?
1806
ist->pts < comp_start :
1807
pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1808
return;
1809
}
1810
1811
if (of->recording_time != INT64_MAX &&
1812
ist->pts >= of->recording_time + start_time) {
1813
close_output_stream(ost);
1814
return;
1815
}
1816
1817
if (f->recording_time != INT64_MAX) {
1818
start_time = f->ctx->start_time;
1819
if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1820
start_time += f->start_time;
1821
if (ist->pts >= f->recording_time + start_time) {
1822
close_output_stream(ost);
1823
return;
1824
}
1825
}
1826
1827
/* force the input stream PTS */
1828
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1829
ost->sync_opts++;
1830
1831
if (pkt->pts != AV_NOPTS_VALUE)
1832
opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1833
else
1834
opkt.pts = AV_NOPTS_VALUE;
1835
1836
if (pkt->dts == AV_NOPTS_VALUE)
1837
opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1838
else
1839
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1840
opkt.dts -= ost_tb_start_time;
1841
1842
if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1843
int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1844
if(!duration)
1845
duration = ist->dec_ctx->frame_size;
1846
opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1847
(AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1848
ost->st->time_base) - ost_tb_start_time;
1849
}
1850
1851
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1852
opkt.flags = pkt->flags;
1853
// FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1854
if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1855
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1856
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1857
&& ost->st->codec->codec_id != AV_CODEC_ID_VC1
1858
) {
1859
int ret = av_parser_change(ost->parser, ost->st->codec,
1860
&opkt.data, &opkt.size,
1861
pkt->data, pkt->size,
1862
pkt->flags & AV_PKT_FLAG_KEY);
1863
if (ret < 0) {
1864
av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1865
av_err2str(ret));
1866
exit_program(1);
1867
}
1868
if (ret) {
1869
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1870
if (!opkt.buf)
1871
exit_program(1);
1872
}
1873
} else {
1874
opkt.data = pkt->data;
1875
opkt.size = pkt->size;
1876
}
1877
av_copy_packet_side_data(&opkt, pkt);
1878
1879
#if FF_API_LAVF_FMT_RAWPICTURE
1880
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1881
ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1882
(of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1883
/* store AVPicture in AVPacket, as expected by the output format */
1884
int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1885
if (ret < 0) {
1886
av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1887
av_err2str(ret));
1888
exit_program(1);
1889
}
1890
opkt.data = (uint8_t *)&pict;
1891
opkt.size = sizeof(AVPicture);
1892
opkt.flags |= AV_PKT_FLAG_KEY;
1893
}
1894
#endif
1895
1896
write_frame(of->ctx, &opkt, ost);
1897
}
1898
1899
int guess_input_channel_layout(InputStream *ist)
1900
{
1901
AVCodecContext *dec = ist->dec_ctx;
1902
1903
if (!dec->channel_layout) {
1904
char layout_name[256];
1905
1906
if (dec->channels > ist->guess_layout_max)
1907
return 0;
1908
dec->channel_layout = av_get_default_channel_layout(dec->channels);
1909
if (!dec->channel_layout)
1910
return 0;
1911
av_get_channel_layout_string(layout_name, sizeof(layout_name),
1912
dec->channels, dec->channel_layout);
1913
av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1914
"#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1915
}
1916
return 1;
1917
}
1918
1919
static void check_decode_result(InputStream *ist, int *got_output, int ret)
1920
{
1921
if (*got_output || ret<0)
1922
decode_error_stat[ret<0] ++;
1923
1924
if (ret < 0 && exit_on_error)
1925
exit_program(1);
1926
1927
if (exit_on_error && *got_output && ist) {
1928
if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1929
av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1930
exit_program(1);
1931
}
1932
}
1933
}
1934
1935
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1936
{
1937
AVFrame *decoded_frame, *f;
1938
AVCodecContext *avctx = ist->dec_ctx;
1939
int i, ret, err = 0, resample_changed;
1940
AVRational decoded_frame_tb;
1941
1942
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1943
return AVERROR(ENOMEM);
1944
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1945
return AVERROR(ENOMEM);
1946
decoded_frame = ist->decoded_frame;
1947
1948
update_benchmark(NULL);
1949
ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1950
update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1951
1952
if (ret >= 0 && avctx->sample_rate <= 0) {
1953
av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1954
ret = AVERROR_INVALIDDATA;
1955
}
1956
1957
check_decode_result(ist, got_output, ret);
1958
1959
if (!*got_output || ret < 0)
1960
return ret;
1961
1962
ist->samples_decoded += decoded_frame->nb_samples;
1963
ist->frames_decoded++;
1964
1965
#if 1
1966
/* increment next_dts to use for the case where the input stream does not
1967
have timestamps or there are multiple frames in the packet */
1968
ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1969
avctx->sample_rate;
1970
ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1971
avctx->sample_rate;
1972
#endif
1973
1974
resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1975
ist->resample_channels != avctx->channels ||
1976
ist->resample_channel_layout != decoded_frame->channel_layout ||
1977
ist->resample_sample_rate != decoded_frame->sample_rate;
1978
if (resample_changed) {
1979
char layout1[64], layout2[64];
1980
1981
if (!guess_input_channel_layout(ist)) {
1982
av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1983
"layout for Input Stream #%d.%d\n", ist->file_index,
1984
ist->st->index);
1985
exit_program(1);
1986
}
1987
decoded_frame->channel_layout = avctx->channel_layout;
1988
1989
av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1990
ist->resample_channel_layout);
1991
av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1992
decoded_frame->channel_layout);
1993
1994
av_log(NULL, AV_LOG_INFO,
1995
"Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1996
ist->file_index, ist->st->index,
1997
ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1998
ist->resample_channels, layout1,
1999
decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2000
avctx->channels, layout2);
2001
2002
ist->resample_sample_fmt = decoded_frame->format;
2003
ist->resample_sample_rate = decoded_frame->sample_rate;
2004
ist->resample_channel_layout = decoded_frame->channel_layout;
2005
ist->resample_channels = avctx->channels;
2006
2007
for (i = 0; i < nb_filtergraphs; i++)
2008
if (ist_in_filtergraph(filtergraphs[i], ist)) {
2009
FilterGraph *fg = filtergraphs[i];
2010
if (configure_filtergraph(fg) < 0) {
2011
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2012
exit_program(1);
2013
}
2014
}
2015
}
2016
2017
/* if the decoder provides a pts, use it instead of the last packet pts.
2018
the decoder could be delaying output by a packet or more. */
2019
if (decoded_frame->pts != AV_NOPTS_VALUE) {
2020
ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2021
decoded_frame_tb = avctx->time_base;
2022
} else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2023
decoded_frame->pts = decoded_frame->pkt_pts;
2024
decoded_frame_tb = ist->st->time_base;
2025
} else if (pkt->pts != AV_NOPTS_VALUE) {
2026
decoded_frame->pts = pkt->pts;
2027
decoded_frame_tb = ist->st->time_base;
2028
}else {
2029
decoded_frame->pts = ist->dts;
2030
decoded_frame_tb = AV_TIME_BASE_Q;
2031
}
2032
pkt->pts = AV_NOPTS_VALUE;
2033
if (decoded_frame->pts != AV_NOPTS_VALUE)
2034
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2035
(AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2036
(AVRational){1, avctx->sample_rate});
2037
ist->nb_samples = decoded_frame->nb_samples;
2038
for (i = 0; i < ist->nb_filters; i++) {
2039
if (i < ist->nb_filters - 1) {
2040
f = ist->filter_frame;
2041
err = av_frame_ref(f, decoded_frame);
2042
if (err < 0)
2043
break;
2044
} else
2045
f = decoded_frame;
2046
err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2047
AV_BUFFERSRC_FLAG_PUSH);
2048
if (err == AVERROR_EOF)
2049
err = 0; /* ignore */
2050
if (err < 0)
2051
break;
2052
}
2053
decoded_frame->pts = AV_NOPTS_VALUE;
2054
2055
av_frame_unref(ist->filter_frame);
2056
av_frame_unref(decoded_frame);
2057
return err < 0 ? err : ret;
2058
}
2059
2060
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2061
{
2062
AVFrame *decoded_frame, *f;
2063
int i, ret = 0, err = 0, resample_changed;
2064
int64_t best_effort_timestamp;
2065
AVRational *frame_sample_aspect;
2066
2067
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2068
return AVERROR(ENOMEM);
2069
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2070
return AVERROR(ENOMEM);
2071
decoded_frame = ist->decoded_frame;
2072
pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2073
2074
update_benchmark(NULL);
2075
ret = avcodec_decode_video2(ist->dec_ctx,
2076
decoded_frame, got_output, pkt);
2077
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2078
2079
// The following line may be required in some cases where there is no parser
2080
// or the parser does not has_b_frames correctly
2081
if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2082
if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2083
ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2084
} else
2085
av_log(ist->dec_ctx, AV_LOG_WARNING,
2086
"has_b_frames is larger in decoder than demuxer %d > %d.\n"
2087
"If you want to help, upload a sample "
2088
"of this file to ftp://upload.ffmpeg.org/incoming/ "
2089
"and contact the ffmpeg-devel mailing list. ([email protected])",
2090
ist->dec_ctx->has_b_frames,
2091
ist->st->codec->has_b_frames);
2092
}
2093
2094
check_decode_result(ist, got_output, ret);
2095
2096
if (*got_output && ret >= 0) {
2097
if (ist->dec_ctx->width != decoded_frame->width ||
2098
ist->dec_ctx->height != decoded_frame->height ||
2099
ist->dec_ctx->pix_fmt != decoded_frame->format) {
2100
av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2101
decoded_frame->width,
2102
decoded_frame->height,
2103
decoded_frame->format,
2104
ist->dec_ctx->width,
2105
ist->dec_ctx->height,
2106
ist->dec_ctx->pix_fmt);
2107
}
2108
}
2109
2110
if (!*got_output || ret < 0)
2111
return ret;
2112
2113
if(ist->top_field_first>=0)
2114
decoded_frame->top_field_first = ist->top_field_first;
2115
2116
ist->frames_decoded++;
2117
2118
if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2119
err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2120
if (err < 0)
2121
goto fail;
2122
}
2123
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2124
2125
best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2126
if(best_effort_timestamp != AV_NOPTS_VALUE)
2127
ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2128
2129
if (debug_ts) {
2130
av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2131
"frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2132
ist->st->index, av_ts2str(decoded_frame->pts),
2133
av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2134
best_effort_timestamp,
2135
av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2136
decoded_frame->key_frame, decoded_frame->pict_type,
2137
ist->st->time_base.num, ist->st->time_base.den);
2138
}
2139
2140
pkt->size = 0;
2141
2142
if (ist->st->sample_aspect_ratio.num)
2143
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2144
2145
resample_changed = ist->resample_width != decoded_frame->width ||
2146
ist->resample_height != decoded_frame->height ||
2147
ist->resample_pix_fmt != decoded_frame->format;
2148
if (resample_changed) {
2149
av_log(NULL, AV_LOG_INFO,
2150
"Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2151
ist->file_index, ist->st->index,
2152
ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2153
decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2154
2155
ist->resample_width = decoded_frame->width;
2156
ist->resample_height = decoded_frame->height;
2157
ist->resample_pix_fmt = decoded_frame->format;
2158
2159
for (i = 0; i < nb_filtergraphs; i++) {
2160
if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2161
configure_filtergraph(filtergraphs[i]) < 0) {
2162
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2163
exit_program(1);
2164
}
2165
}
2166
}
2167
2168
frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2169
for (i = 0; i < ist->nb_filters; i++) {
2170
if (!frame_sample_aspect->num)
2171
*frame_sample_aspect = ist->st->sample_aspect_ratio;
2172
2173
if (i < ist->nb_filters - 1) {
2174
f = ist->filter_frame;
2175
err = av_frame_ref(f, decoded_frame);
2176
if (err < 0)
2177
break;
2178
} else
2179
f = decoded_frame;
2180
ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2181
if (ret == AVERROR_EOF) {
2182
ret = 0; /* ignore */
2183
} else if (ret < 0) {
2184
av_log(NULL, AV_LOG_FATAL,
2185
"Failed to inject frame into filter network: %s\n", av_err2str(ret));
2186
exit_program(1);
2187
}
2188
}
2189
2190
fail:
2191
av_frame_unref(ist->filter_frame);
2192
av_frame_unref(decoded_frame);
2193
return err < 0 ? err : ret;
2194
}
2195
2196
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2197
{
2198
AVSubtitle subtitle;
2199
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2200
&subtitle, got_output, pkt);
2201
2202
check_decode_result(NULL, got_output, ret);
2203
2204
if (ret < 0 || !*got_output) {
2205
if (!pkt->size)
2206
sub2video_flush(ist);
2207
return ret;
2208
}
2209
2210
if (ist->fix_sub_duration) {
2211
int end = 1;
2212
if (ist->prev_sub.got_output) {
2213
end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2214
1000, AV_TIME_BASE);
2215
if (end < ist->prev_sub.subtitle.end_display_time) {
2216
av_log(ist->dec_ctx, AV_LOG_DEBUG,
2217
"Subtitle duration reduced from %d to %d%s\n",
2218
ist->prev_sub.subtitle.end_display_time, end,
2219
end <= 0 ? ", dropping it" : "");
2220
ist->prev_sub.subtitle.end_display_time = end;
2221
}
2222
}
2223
FFSWAP(int, *got_output, ist->prev_sub.got_output);
2224
FFSWAP(int, ret, ist->prev_sub.ret);
2225
FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2226
if (end <= 0)
2227
goto out;
2228
}
2229
2230
if (!*got_output)
2231
return ret;
2232
2233
sub2video_update(ist, &subtitle);
2234
2235
if (!subtitle.num_rects)
2236
goto out;
2237
2238
ist->frames_decoded++;
2239
2240
for (i = 0; i < nb_output_streams; i++) {
2241
OutputStream *ost = output_streams[i];
2242
2243
if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2244
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2245
continue;
2246
2247
do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2248
}
2249
2250
out:
2251
avsubtitle_free(&subtitle);
2252
return ret;
2253
}
2254
2255
static int send_filter_eof(InputStream *ist)
2256
{
2257
int i, ret;
2258
for (i = 0; i < ist->nb_filters; i++) {
2259
ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2260
if (ret < 0)
2261
return ret;
2262
}
2263
return 0;
2264
}
2265
2266
/* pkt = NULL means EOF (needed to flush decoder buffers) */
2267
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2268
{
2269
int ret = 0, i;
2270
int got_output = 0;
2271
2272
AVPacket avpkt;
2273
if (!ist->saw_first_ts) {
2274
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2275
ist->pts = 0;
2276
if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2277
ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2278
ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2279
}
2280
ist->saw_first_ts = 1;
2281
}
2282
2283
if (ist->next_dts == AV_NOPTS_VALUE)
2284
ist->next_dts = ist->dts;
2285
if (ist->next_pts == AV_NOPTS_VALUE)
2286
ist->next_pts = ist->pts;
2287
2288
if (!pkt) {
2289
/* EOF handling */
2290
av_init_packet(&avpkt);
2291
avpkt.data = NULL;
2292
avpkt.size = 0;
2293
goto handle_eof;
2294
} else {
2295
avpkt = *pkt;
2296
}
2297
2298
if (pkt->dts != AV_NOPTS_VALUE) {
2299
ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2300
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2301
ist->next_pts = ist->pts = ist->dts;
2302
}
2303
2304
// while we have more to decode or while the decoder did output something on EOF
2305
while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2306
int duration;
2307
handle_eof:
2308
2309
ist->pts = ist->next_pts;
2310
ist->dts = ist->next_dts;
2311
2312
if (avpkt.size && avpkt.size != pkt->size &&
2313
!(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2314
av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2315
"Multiple frames in a packet from stream %d\n", pkt->stream_index);
2316
ist->showed_multi_packet_warning = 1;
2317
}
2318
2319
switch (ist->dec_ctx->codec_type) {
2320
case AVMEDIA_TYPE_AUDIO:
2321
ret = decode_audio (ist, &avpkt, &got_output);
2322
break;
2323
case AVMEDIA_TYPE_VIDEO:
2324
ret = decode_video (ist, &avpkt, &got_output);
2325
if (avpkt.duration) {
2326
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2327
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2328
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2329
duration = ((int64_t)AV_TIME_BASE *
2330
ist->dec_ctx->framerate.den * ticks) /
2331
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2332
} else
2333
duration = 0;
2334
2335
if(ist->dts != AV_NOPTS_VALUE && duration) {
2336
ist->next_dts += duration;
2337
}else
2338
ist->next_dts = AV_NOPTS_VALUE;
2339
2340
if (got_output)
2341
ist->next_pts += duration; //FIXME the duration is not correct in some cases
2342
break;
2343
case AVMEDIA_TYPE_SUBTITLE:
2344
ret = transcode_subtitles(ist, &avpkt, &got_output);
2345
break;
2346
default:
2347
return -1;
2348
}
2349
2350
if (ret < 0) {
2351
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2352
ist->file_index, ist->st->index, av_err2str(ret));
2353
if (exit_on_error)
2354
exit_program(1);
2355
break;
2356
}
2357
2358
avpkt.dts=
2359
avpkt.pts= AV_NOPTS_VALUE;
2360
2361
// touch data and size only if not EOF
2362
if (pkt) {
2363
if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2364
ret = avpkt.size;
2365
avpkt.data += ret;
2366
avpkt.size -= ret;
2367
}
2368
if (!got_output) {
2369
continue;
2370
}
2371
if (got_output && !pkt)
2372
break;
2373
}
2374
2375
/* after flushing, send an EOF on all the filter inputs attached to the stream */
2376
/* except when looping we need to flush but not to send an EOF */
2377
if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2378
int ret = send_filter_eof(ist);
2379
if (ret < 0) {
2380
av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2381
exit_program(1);
2382
}
2383
}
2384
2385
/* handle stream copy */
2386
if (!ist->decoding_needed) {
2387
ist->dts = ist->next_dts;
2388
switch (ist->dec_ctx->codec_type) {
2389
case AVMEDIA_TYPE_AUDIO:
2390
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2391
ist->dec_ctx->sample_rate;
2392
break;
2393
case AVMEDIA_TYPE_VIDEO:
2394
if (ist->framerate.num) {
2395
// TODO: Remove work-around for c99-to-c89 issue 7
2396
AVRational time_base_q = AV_TIME_BASE_Q;
2397
int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2398
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2399
} else if (pkt->duration) {
2400
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2401
} else if(ist->dec_ctx->framerate.num != 0) {
2402
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2403
ist->next_dts += ((int64_t)AV_TIME_BASE *
2404
ist->dec_ctx->framerate.den * ticks) /
2405
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2406
}
2407
break;
2408
}
2409
ist->pts = ist->dts;
2410
ist->next_pts = ist->next_dts;
2411
}
2412
for (i = 0; pkt && i < nb_output_streams; i++) {
2413
OutputStream *ost = output_streams[i];
2414
2415
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2416
continue;
2417
2418
do_streamcopy(ist, ost, pkt);
2419
}
2420
2421
return got_output;
2422
}
2423
2424
static void print_sdp(void)
2425
{
2426
char sdp[16384];
2427
int i;
2428
int j;
2429
AVIOContext *sdp_pb;
2430
AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2431
2432
if (!avc)
2433
exit_program(1);
2434
for (i = 0, j = 0; i < nb_output_files; i++) {
2435
if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2436
avc[j] = output_files[i]->ctx;
2437
j++;
2438
}
2439
}
2440
2441
if (!j)
2442
goto fail;
2443
2444
av_sdp_create(avc, j, sdp, sizeof(sdp));
2445
2446
if (!sdp_filename) {
2447
printf("SDP:\n%s\n", sdp);
2448
fflush(stdout);
2449
} else {
2450
if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2451
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2452
} else {
2453
avio_printf(sdp_pb, "SDP:\n%s", sdp);
2454
avio_closep(&sdp_pb);
2455
av_freep(&sdp_filename);
2456
}
2457
}
2458
2459
fail:
2460
av_freep(&avc);
2461
}
2462
2463
static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2464
{
2465
int i;
2466
for (i = 0; hwaccels[i].name; i++)
2467
if (hwaccels[i].pix_fmt == pix_fmt)
2468
return &hwaccels[i];
2469
return NULL;
2470
}
2471
2472
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2473
{
2474
InputStream *ist = s->opaque;
2475
const enum AVPixelFormat *p;
2476
int ret;
2477
2478
for (p = pix_fmts; *p != -1; p++) {
2479
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2480
const HWAccel *hwaccel;
2481
2482
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2483
break;
2484
2485
hwaccel = get_hwaccel(*p);
2486
if (!hwaccel ||
2487
(ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2488
(ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2489
continue;
2490
2491
ret = hwaccel->init(s);
2492
if (ret < 0) {
2493
if (ist->hwaccel_id == hwaccel->id) {
2494
av_log(NULL, AV_LOG_FATAL,
2495
"%s hwaccel requested for input stream #%d:%d, "
2496
"but cannot be initialized.\n", hwaccel->name,
2497
ist->file_index, ist->st->index);
2498
return AV_PIX_FMT_NONE;
2499
}
2500
continue;
2501
}
2502
ist->active_hwaccel_id = hwaccel->id;
2503
ist->hwaccel_pix_fmt = *p;
2504
break;
2505
}
2506
2507
return *p;
2508
}
2509
2510
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2511
{
2512
InputStream *ist = s->opaque;
2513
2514
if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2515
return ist->hwaccel_get_buffer(s, frame, flags);
2516
2517
return avcodec_default_get_buffer2(s, frame, flags);
2518
}
2519
2520
static int init_input_stream(int ist_index, char *error, int error_len)
2521
{
2522
int ret;
2523
InputStream *ist = input_streams[ist_index];
2524
2525
if (ist->decoding_needed) {
2526
AVCodec *codec = ist->dec;
2527
if (!codec) {
2528
snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2529
avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2530
return AVERROR(EINVAL);
2531
}
2532
2533
ist->dec_ctx->opaque = ist;
2534
ist->dec_ctx->get_format = get_format;
2535
ist->dec_ctx->get_buffer2 = get_buffer;
2536
ist->dec_ctx->thread_safe_callbacks = 1;
2537
2538
av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2539
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2540
(ist->decoding_needed & DECODING_FOR_OST)) {
2541
av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2542
if (ist->decoding_needed & DECODING_FOR_FILTER)
2543
av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2544
}
2545
2546
if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2547
av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2548
if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2549
if (ret == AVERROR_EXPERIMENTAL)
2550
abort_codec_experimental(codec, 0);
2551
2552
snprintf(error, error_len,
2553
"Error while opening decoder for input stream "
2554
"#%d:%d : %s",
2555
ist->file_index, ist->st->index, av_err2str(ret));
2556
return ret;
2557
}
2558
assert_avoptions(ist->decoder_opts);
2559
}
2560
2561
ist->next_pts = AV_NOPTS_VALUE;
2562
ist->next_dts = AV_NOPTS_VALUE;
2563
2564
return 0;
2565
}
2566
2567
static InputStream *get_input_stream(OutputStream *ost)
2568
{
2569
if (ost->source_index >= 0)
2570
return input_streams[ost->source_index];
2571
return NULL;
2572
}
2573
2574
static int compare_int64(const void *a, const void *b)
2575
{
2576
return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2577
}
2578
2579
static int init_output_stream(OutputStream *ost, char *error, int error_len)
2580
{
2581
int ret = 0;
2582
2583
if (ost->encoding_needed) {
2584
AVCodec *codec = ost->enc;
2585
AVCodecContext *dec = NULL;
2586
InputStream *ist;
2587
2588
if ((ist = get_input_stream(ost)))
2589
dec = ist->dec_ctx;
2590
if (dec && dec->subtitle_header) {
2591
/* ASS code assumes this buffer is null terminated so add extra byte. */
2592
ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2593
if (!ost->enc_ctx->subtitle_header)
2594
return AVERROR(ENOMEM);
2595
memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2596
ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2597
}
2598
if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2599
av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2600
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2601
!codec->defaults &&
2602
!av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2603
!av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2604
av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2605
2606
if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2607
if (ret == AVERROR_EXPERIMENTAL)
2608
abort_codec_experimental(codec, 1);
2609
snprintf(error, error_len,
2610
"Error while opening encoder for output stream #%d:%d - "
2611
"maybe incorrect parameters such as bit_rate, rate, width or height",
2612
ost->file_index, ost->index);
2613
return ret;
2614
}
2615
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2616
!(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2617
av_buffersink_set_frame_size(ost->filter->filter,
2618
ost->enc_ctx->frame_size);
2619
assert_avoptions(ost->encoder_opts);
2620
if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2621
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2622
" It takes bits/s as argument, not kbits/s\n");
2623
2624
ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2625
if (ret < 0) {
2626
av_log(NULL, AV_LOG_FATAL,
2627
"Error initializing the output stream codec context.\n");
2628
exit_program(1);
2629
}
2630
2631
if (ost->enc_ctx->nb_coded_side_data) {
2632
int i;
2633
2634
ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2635
sizeof(*ost->st->side_data));
2636
if (!ost->st->side_data)
2637
return AVERROR(ENOMEM);
2638
2639
for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2640
const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2641
AVPacketSideData *sd_dst = &ost->st->side_data[i];
2642
2643
sd_dst->data = av_malloc(sd_src->size);
2644
if (!sd_dst->data)
2645
return AVERROR(ENOMEM);
2646
memcpy(sd_dst->data, sd_src->data, sd_src->size);
2647
sd_dst->size = sd_src->size;
2648
sd_dst->type = sd_src->type;
2649
ost->st->nb_side_data++;
2650
}
2651
}
2652
2653
// copy timebase while removing common factors
2654
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2655
ost->st->codec->codec= ost->enc_ctx->codec;
2656
} else {
2657
ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2658
if (ret < 0) {
2659
av_log(NULL, AV_LOG_FATAL,
2660
"Error setting up codec context options.\n");
2661
return ret;
2662
}
2663
// copy timebase while removing common factors
2664
ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2665
}
2666
2667
return ret;
2668
}
2669
2670
static void parse_forced_key_frames(char *kf, OutputStream *ost,
2671
AVCodecContext *avctx)
2672
{
2673
char *p;
2674
int n = 1, i, size, index = 0;
2675
int64_t t, *pts;
2676
2677
for (p = kf; *p; p++)
2678
if (*p == ',')
2679
n++;
2680
size = n;
2681
pts = av_malloc_array(size, sizeof(*pts));
2682
if (!pts) {
2683
av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2684
exit_program(1);
2685
}
2686
2687
p = kf;
2688
for (i = 0; i < n; i++) {
2689
char *next = strchr(p, ',');
2690
2691
if (next)
2692
*next++ = 0;
2693
2694
if (!memcmp(p, "chapters", 8)) {
2695
2696
AVFormatContext *avf = output_files[ost->file_index]->ctx;
2697
int j;
2698
2699
if (avf->nb_chapters > INT_MAX - size ||
2700
!(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2701
sizeof(*pts)))) {
2702
av_log(NULL, AV_LOG_FATAL,
2703
"Could not allocate forced key frames array.\n");
2704
exit_program(1);
2705
}
2706
t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2707
t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2708
2709
for (j = 0; j < avf->nb_chapters; j++) {
2710
AVChapter *c = avf->chapters[j];
2711
av_assert1(index < size);
2712
pts[index++] = av_rescale_q(c->start, c->time_base,
2713
avctx->time_base) + t;
2714
}
2715
2716
} else {
2717
2718
t = parse_time_or_die("force_key_frames", p, 1);
2719
av_assert1(index < size);
2720
pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2721
2722
}
2723
2724
p = next;
2725
}
2726
2727
av_assert0(index == size);
2728
qsort(pts, size, sizeof(*pts), compare_int64);
2729
ost->forced_kf_count = size;
2730
ost->forced_kf_pts = pts;
2731
}
2732
2733
static void report_new_stream(int input_index, AVPacket *pkt)
2734
{
2735
InputFile *file = input_files[input_index];
2736
AVStream *st = file->ctx->streams[pkt->stream_index];
2737
2738
if (pkt->stream_index < file->nb_streams_warn)
2739
return;
2740
av_log(file->ctx, AV_LOG_WARNING,
2741
"New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2742
av_get_media_type_string(st->codec->codec_type),
2743
input_index, pkt->stream_index,
2744
pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2745
file->nb_streams_warn = pkt->stream_index + 1;
2746
}
2747
2748
static void set_encoder_id(OutputFile *of, OutputStream *ost)
2749
{
2750
AVDictionaryEntry *e;
2751
2752
uint8_t *encoder_string;
2753
int encoder_string_len;
2754
int format_flags = 0;
2755
int codec_flags = 0;
2756
2757
if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2758
return;
2759
2760
e = av_dict_get(of->opts, "fflags", NULL, 0);
2761
if (e) {
2762
const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2763
if (!o)
2764
return;
2765
av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2766
}
2767
e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2768
if (e) {
2769
const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2770
if (!o)
2771
return;
2772
av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2773
}
2774
2775
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2776
encoder_string = av_mallocz(encoder_string_len);
2777
if (!encoder_string)
2778
exit_program(1);
2779
2780
if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2781
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2782
else
2783
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2784
av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2785
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2786
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2787
}
2788
2789
static int transcode_init(void)
2790
{
2791
int ret = 0, i, j, k;
2792
AVFormatContext *oc;
2793
OutputStream *ost;
2794
InputStream *ist;
2795
char error[1024] = {0};
2796
int want_sdp = 1;
2797
2798
for (i = 0; i < nb_filtergraphs; i++) {
2799
FilterGraph *fg = filtergraphs[i];
2800
for (j = 0; j < fg->nb_outputs; j++) {
2801
OutputFilter *ofilter = fg->outputs[j];
2802
if (!ofilter->ost || ofilter->ost->source_index >= 0)
2803
continue;
2804
if (fg->nb_inputs != 1)
2805
continue;
2806
for (k = nb_input_streams-1; k >= 0 ; k--)
2807
if (fg->inputs[0]->ist == input_streams[k])
2808
break;
2809
ofilter->ost->source_index = k;
2810
}
2811
}
2812
2813
/* init framerate emulation */
2814
for (i = 0; i < nb_input_files; i++) {
2815
InputFile *ifile = input_files[i];
2816
if (ifile->rate_emu)
2817
for (j = 0; j < ifile->nb_streams; j++)
2818
input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2819
}
2820
2821
/* for each output stream, we compute the right encoding parameters */
2822
for (i = 0; i < nb_output_streams; i++) {
2823
AVCodecContext *enc_ctx;
2824
AVCodecContext *dec_ctx = NULL;
2825
ost = output_streams[i];
2826
oc = output_files[ost->file_index]->ctx;
2827
ist = get_input_stream(ost);
2828
2829
if (ost->attachment_filename)
2830
continue;
2831
2832
enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2833
2834
if (ist) {
2835
dec_ctx = ist->dec_ctx;
2836
2837
ost->st->disposition = ist->st->disposition;
2838
enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2839
enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2840
} else {
2841
for (j=0; j<oc->nb_streams; j++) {
2842
AVStream *st = oc->streams[j];
2843
if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2844
break;
2845
}
2846
if (j == oc->nb_streams)
2847
if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2848
ost->st->disposition = AV_DISPOSITION_DEFAULT;
2849
}
2850
2851
if (ost->stream_copy) {
2852
AVRational sar;
2853
uint64_t extra_size;
2854
2855
av_assert0(ist && !ost->filter);
2856
2857
extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2858
2859
if (extra_size > INT_MAX) {
2860
return AVERROR(EINVAL);
2861
}
2862
2863
/* if stream_copy is selected, no need to decode or encode */
2864
enc_ctx->codec_id = dec_ctx->codec_id;
2865
enc_ctx->codec_type = dec_ctx->codec_type;
2866
2867
if (!enc_ctx->codec_tag) {
2868
unsigned int codec_tag;
2869
if (!oc->oformat->codec_tag ||
2870
av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2871
!av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2872
enc_ctx->codec_tag = dec_ctx->codec_tag;
2873
}
2874
2875
enc_ctx->bit_rate = dec_ctx->bit_rate;
2876
enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2877
enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2878
enc_ctx->field_order = dec_ctx->field_order;
2879
if (dec_ctx->extradata_size) {
2880
enc_ctx->extradata = av_mallocz(extra_size);
2881
if (!enc_ctx->extradata) {
2882
return AVERROR(ENOMEM);
2883
}
2884
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2885
}
2886
enc_ctx->extradata_size= dec_ctx->extradata_size;
2887
enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2888
2889
enc_ctx->time_base = ist->st->time_base;
2890
/*
2891
* Avi is a special case here because it supports variable fps but
2892
* having the fps and timebase differe significantly adds quite some
2893
* overhead
2894
*/
2895
if(!strcmp(oc->oformat->name, "avi")) {
2896
if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2897
&& 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2898
&& 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2899
&& av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2900
|| copy_tb==2){
2901
enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2902
enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2903
enc_ctx->ticks_per_frame = 2;
2904
} else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2905
&& av_q2d(ist->st->time_base) < 1.0/500
2906
|| copy_tb==0){
2907
enc_ctx->time_base = dec_ctx->time_base;
2908
enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2909
enc_ctx->time_base.den *= 2;
2910
enc_ctx->ticks_per_frame = 2;
2911
}
2912
} else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2913
&& strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2914
&& strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2915
&& strcmp(oc->oformat->name, "f4v")
2916
) {
2917
if( copy_tb<0 && dec_ctx->time_base.den
2918
&& av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2919
&& av_q2d(ist->st->time_base) < 1.0/500
2920
|| copy_tb==0){
2921
enc_ctx->time_base = dec_ctx->time_base;
2922
enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2923
}
2924
}
2925
if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2926
&& dec_ctx->time_base.num < dec_ctx->time_base.den
2927
&& dec_ctx->time_base.num > 0
2928
&& 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2929
enc_ctx->time_base = dec_ctx->time_base;
2930
}
2931
2932
if (!ost->frame_rate.num)
2933
ost->frame_rate = ist->framerate;
2934
if(ost->frame_rate.num)
2935
enc_ctx->time_base = av_inv_q(ost->frame_rate);
2936
2937
av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2938
enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2939
2940
if (ist->st->nb_side_data) {
2941
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2942
sizeof(*ist->st->side_data));
2943
if (!ost->st->side_data)
2944
return AVERROR(ENOMEM);
2945
2946
ost->st->nb_side_data = 0;
2947
for (j = 0; j < ist->st->nb_side_data; j++) {
2948
const AVPacketSideData *sd_src = &ist->st->side_data[j];
2949
AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2950
2951
if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2952
continue;
2953
2954
sd_dst->data = av_malloc(sd_src->size);
2955
if (!sd_dst->data)
2956
return AVERROR(ENOMEM);
2957
memcpy(sd_dst->data, sd_src->data, sd_src->size);
2958
sd_dst->size = sd_src->size;
2959
sd_dst->type = sd_src->type;
2960
ost->st->nb_side_data++;
2961
}
2962
}
2963
2964
ost->parser = av_parser_init(enc_ctx->codec_id);
2965
2966
switch (enc_ctx->codec_type) {
2967
case AVMEDIA_TYPE_AUDIO:
2968
if (audio_volume != 256) {
2969
av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2970
exit_program(1);
2971
}
2972
enc_ctx->channel_layout = dec_ctx->channel_layout;
2973
enc_ctx->sample_rate = dec_ctx->sample_rate;
2974
enc_ctx->channels = dec_ctx->channels;
2975
enc_ctx->frame_size = dec_ctx->frame_size;
2976
enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2977
enc_ctx->block_align = dec_ctx->block_align;
2978
enc_ctx->initial_padding = dec_ctx->delay;
2979
enc_ctx->profile = dec_ctx->profile;
2980
#if FF_API_AUDIOENC_DELAY
2981
enc_ctx->delay = dec_ctx->delay;
2982
#endif
2983
if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2984
enc_ctx->block_align= 0;
2985
if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2986
enc_ctx->block_align= 0;
2987
break;
2988
case AVMEDIA_TYPE_VIDEO:
2989
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2990
enc_ctx->width = dec_ctx->width;
2991
enc_ctx->height = dec_ctx->height;
2992
enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2993
if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2994
sar =
2995
av_mul_q(ost->frame_aspect_ratio,
2996
(AVRational){ enc_ctx->height, enc_ctx->width });
2997
av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2998
"with stream copy may produce invalid files\n");
2999
}
3000
else if (ist->st->sample_aspect_ratio.num)
3001
sar = ist->st->sample_aspect_ratio;
3002
else
3003
sar = dec_ctx->sample_aspect_ratio;
3004
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3005
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3006
ost->st->r_frame_rate = ist->st->r_frame_rate;
3007
break;
3008
case AVMEDIA_TYPE_SUBTITLE:
3009
enc_ctx->width = dec_ctx->width;
3010
enc_ctx->height = dec_ctx->height;
3011
break;
3012
case AVMEDIA_TYPE_UNKNOWN:
3013
case AVMEDIA_TYPE_DATA:
3014
case AVMEDIA_TYPE_ATTACHMENT:
3015
break;
3016
default:
3017
abort();
3018
}
3019
} else {
3020
if (!ost->enc)
3021
ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3022
if (!ost->enc) {
3023
/* should only happen when a default codec is not present. */
3024
snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3025
avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3026
ret = AVERROR(EINVAL);
3027
goto dump_format;
3028
}
3029
3030
set_encoder_id(output_files[ost->file_index], ost);
3031
3032
#if CONFIG_LIBMFX
3033
if (qsv_transcode_init(ost))
3034
exit_program(1);
3035
#endif
3036
3037
if (!ost->filter &&
3038
(enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3039
enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3040
FilterGraph *fg;
3041
fg = init_simple_filtergraph(ist, ost);
3042
if (configure_filtergraph(fg)) {
3043
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3044
exit_program(1);
3045
}
3046
}
3047
3048
if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3049
if (!ost->frame_rate.num)
3050
ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3051
if (ist && !ost->frame_rate.num)
3052
ost->frame_rate = ist->framerate;
3053
if (ist && !ost->frame_rate.num)
3054
ost->frame_rate = ist->st->r_frame_rate;
3055
if (ist && !ost->frame_rate.num) {
3056
ost->frame_rate = (AVRational){25, 1};
3057
av_log(NULL, AV_LOG_WARNING,
3058
"No information "
3059
"about the input framerate is available. Falling "
3060
"back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3061
"if you want a different framerate.\n",
3062
ost->file_index, ost->index);
3063
}
3064
// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3065
if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3066
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3067
ost->frame_rate = ost->enc->supported_framerates[idx];
3068
}
3069
// reduce frame rate for mpeg4 to be within the spec limits
3070
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3071
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3072
ost->frame_rate.num, ost->frame_rate.den, 65535);
3073
}
3074
}
3075
3076
switch (enc_ctx->codec_type) {
3077
case AVMEDIA_TYPE_AUDIO:
3078
enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3079
enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3080
enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3081
enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3082
enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3083
break;
3084
case AVMEDIA_TYPE_VIDEO:
3085
enc_ctx->time_base = av_inv_q(ost->frame_rate);
3086
if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3087
enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3088
if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3089
&& (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3090
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3091
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3092
}
3093
for (j = 0; j < ost->forced_kf_count; j++)
3094
ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3095
AV_TIME_BASE_Q,
3096
enc_ctx->time_base);
3097
3098
enc_ctx->width = ost->filter->filter->inputs[0]->w;
3099
enc_ctx->height = ost->filter->filter->inputs[0]->h;
3100
enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3101
ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3102
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3103
ost->filter->filter->inputs[0]->sample_aspect_ratio;
3104
if (!strncmp(ost->enc->name, "libx264", 7) &&
3105
enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3106
ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3107
av_log(NULL, AV_LOG_WARNING,
3108
"No pixel format specified, %s for H.264 encoding chosen.\n"
3109
"Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3110
av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3111
if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3112
enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3113
ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3114
av_log(NULL, AV_LOG_WARNING,
3115
"No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3116
"Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3117
av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3118
enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3119
3120
ost->st->avg_frame_rate = ost->frame_rate;
3121
3122
if (!dec_ctx ||
3123
enc_ctx->width != dec_ctx->width ||
3124
enc_ctx->height != dec_ctx->height ||
3125
enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3126
enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3127
}
3128
3129
if (ost->forced_keyframes) {
3130
if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3131
ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3132
forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3133
if (ret < 0) {
3134
av_log(NULL, AV_LOG_ERROR,
3135
"Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3136
return ret;
3137
}
3138
ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3139
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3140
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3141
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3142
3143
// Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3144
// parse it only for static kf timings
3145
} else if(strncmp(ost->forced_keyframes, "source", 6)) {
3146
parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3147
}
3148
}
3149
break;
3150
case AVMEDIA_TYPE_SUBTITLE:
3151
enc_ctx->time_base = (AVRational){1, 1000};
3152
if (!enc_ctx->width) {
3153
enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3154
enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3155
}
3156
break;
3157
case AVMEDIA_TYPE_DATA:
3158
break;
3159
default:
3160
abort();
3161
break;
3162
}
3163
}
3164
3165
if (ost->disposition) {
3166
static const AVOption opts[] = {
3167
{ "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3168
{ "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3169
{ "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3170
{ "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3171
{ "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3172
{ "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3173
{ "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3174
{ "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3175
{ "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3176
{ "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3177
{ "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3178
{ "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3179
{ "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3180
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3181
{ NULL },
3182
};
3183
static const AVClass class = {
3184
.class_name = "",
3185
.item_name = av_default_item_name,
3186
.option = opts,
3187
.version = LIBAVUTIL_VERSION_INT,
3188
};
3189
const AVClass *pclass = &class;
3190
3191
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3192
if (ret < 0)
3193
goto dump_format;
3194
}
3195
}
3196
3197
/* open each encoder */
3198
for (i = 0; i < nb_output_streams; i++) {
3199
ret = init_output_stream(output_streams[i], error, sizeof(error));
3200
if (ret < 0)
3201
goto dump_format;
3202
}
3203
3204
/* init input streams */
3205
for (i = 0; i < nb_input_streams; i++)
3206
if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3207
for (i = 0; i < nb_output_streams; i++) {
3208
ost = output_streams[i];
3209
avcodec_close(ost->enc_ctx);
3210
}
3211
goto dump_format;
3212
}
3213
3214
/* discard unused programs */
3215
for (i = 0; i < nb_input_files; i++) {
3216
InputFile *ifile = input_files[i];
3217
for (j = 0; j < ifile->ctx->nb_programs; j++) {
3218
AVProgram *p = ifile->ctx->programs[j];
3219
int discard = AVDISCARD_ALL;
3220
3221
for (k = 0; k < p->nb_stream_indexes; k++)
3222
if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3223
discard = AVDISCARD_DEFAULT;
3224
break;
3225
}
3226
p->discard = discard;
3227
}
3228
}
3229
3230
/* open files and write file headers */
3231
for (i = 0; i < nb_output_files; i++) {
3232
oc = output_files[i]->ctx;
3233
oc->interrupt_callback = int_cb;
3234
if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3235
snprintf(error, sizeof(error),
3236
"Could not write header for output file #%d "
3237
"(incorrect codec parameters ?): %s",
3238
i, av_err2str(ret));
3239
ret = AVERROR(EINVAL);
3240
goto dump_format;
3241
}
3242
// assert_avoptions(output_files[i]->opts);
3243
if (strcmp(oc->oformat->name, "rtp")) {
3244
want_sdp = 0;
3245
}
3246
}
3247
3248
dump_format:
3249
/* dump the file output parameters - cannot be done before in case
3250
of stream copy */
3251
for (i = 0; i < nb_output_files; i++) {
3252
av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3253
}
3254
3255
/* dump the stream mapping */
3256
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3257
for (i = 0; i < nb_input_streams; i++) {
3258
ist = input_streams[i];
3259
3260
for (j = 0; j < ist->nb_filters; j++) {
3261
if (ist->filters[j]->graph->graph_desc) {
3262
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3263
ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3264
ist->filters[j]->name);
3265
if (nb_filtergraphs > 1)
3266
av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3267
av_log(NULL, AV_LOG_INFO, "\n");
3268
}
3269
}
3270
}
3271
3272
for (i = 0; i < nb_output_streams; i++) {
3273
ost = output_streams[i];
3274
3275
if (ost->attachment_filename) {
3276
/* an attached file */
3277
av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3278
ost->attachment_filename, ost->file_index, ost->index);
3279
continue;
3280
}
3281
3282
if (ost->filter && ost->filter->graph->graph_desc) {
3283
/* output from a complex graph */
3284
av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3285
if (nb_filtergraphs > 1)
3286
av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3287
3288
av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3289
ost->index, ost->enc ? ost->enc->name : "?");
3290
continue;
3291
}
3292
3293
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3294
input_streams[ost->source_index]->file_index,
3295
input_streams[ost->source_index]->st->index,
3296
ost->file_index,
3297
ost->index);
3298
if (ost->sync_ist != input_streams[ost->source_index])
3299
av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3300
ost->sync_ist->file_index,
3301
ost->sync_ist->st->index);
3302
if (ost->stream_copy)
3303
av_log(NULL, AV_LOG_INFO, " (copy)");
3304
else {
3305
const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3306
const AVCodec *out_codec = ost->enc;
3307
const char *decoder_name = "?";
3308
const char *in_codec_name = "?";
3309
const char *encoder_name = "?";
3310
const char *out_codec_name = "?";
3311
const AVCodecDescriptor *desc;
3312
3313
if (in_codec) {
3314
decoder_name = in_codec->name;
3315
desc = avcodec_descriptor_get(in_codec->id);
3316
if (desc)
3317
in_codec_name = desc->name;
3318
if (!strcmp(decoder_name, in_codec_name))
3319
decoder_name = "native";
3320
}
3321
3322
if (out_codec) {
3323
encoder_name = out_codec->name;
3324
desc = avcodec_descriptor_get(out_codec->id);
3325
if (desc)
3326
out_codec_name = desc->name;
3327
if (!strcmp(encoder_name, out_codec_name))
3328
encoder_name = "native";
3329
}
3330
3331
av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3332
in_codec_name, decoder_name,
3333
out_codec_name, encoder_name);
3334
}
3335
av_log(NULL, AV_LOG_INFO, "\n");
3336
}
3337
3338
if (ret) {
3339
av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3340
return ret;
3341
}
3342
3343
if (sdp_filename || want_sdp) {
3344
print_sdp();
3345
}
3346
3347
transcode_init_done = 1;
3348
3349
return 0;
3350
}
3351
3352
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3353
static int need_output(void)
3354
{
3355
int i;
3356
3357
for (i = 0; i < nb_output_streams; i++) {
3358
OutputStream *ost = output_streams[i];
3359
OutputFile *of = output_files[ost->file_index];
3360
AVFormatContext *os = output_files[ost->file_index]->ctx;
3361
3362
if (ost->finished ||
3363
(os->pb && avio_tell(os->pb) >= of->limit_filesize))
3364
continue;
3365
if (ost->frame_number >= ost->max_frames) {
3366
int j;
3367
for (j = 0; j < of->ctx->nb_streams; j++)
3368
close_output_stream(output_streams[of->ost_index + j]);
3369
continue;
3370
}
3371
3372
return 1;
3373
}
3374
3375
return 0;
3376
}
3377
3378
/**
3379
* Select the output stream to process.
3380
*
3381
* @return selected output stream, or NULL if none available
3382
*/
3383
static OutputStream *choose_output(void)
3384
{
3385
int i;
3386
int64_t opts_min = INT64_MAX;
3387
OutputStream *ost_min = NULL;
3388
3389
for (i = 0; i < nb_output_streams; i++) {
3390
OutputStream *ost = output_streams[i];
3391
int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3392
av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3393
AV_TIME_BASE_Q);
3394
if (ost->st->cur_dts == AV_NOPTS_VALUE)
3395
av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3396
3397
if (!ost->finished && opts < opts_min) {
3398
opts_min = opts;
3399
ost_min = ost->unavailable ? NULL : ost;
3400
}
3401
}
3402
return ost_min;
3403
}
3404
3405
static void set_tty_echo(int on)
3406
{
3407
#if HAVE_TERMIOS_H
3408
struct termios tty;
3409
if (tcgetattr(0, &tty) == 0) {
3410
if (on) tty.c_lflag |= ECHO;
3411
else tty.c_lflag &= ~ECHO;
3412
tcsetattr(0, TCSANOW, &tty);
3413
}
3414
#endif
3415
}
3416
3417
static int check_keyboard_interaction(int64_t cur_time)
3418
{
3419
int i, ret, key;
3420
static int64_t last_time;
3421
if (received_nb_signals)
3422
return AVERROR_EXIT;
3423
/* read_key() returns 0 on EOF */
3424
if(cur_time - last_time >= 100000 && !run_as_daemon){
3425
key = read_key();
3426
last_time = cur_time;
3427
}else
3428
key = -1;
3429
if (key == 'q')
3430
return AVERROR_EXIT;
3431
if (key == '+') av_log_set_level(av_log_get_level()+10);
3432
if (key == '-') av_log_set_level(av_log_get_level()-10);
3433
if (key == 's') qp_hist ^= 1;
3434
if (key == 'h'){
3435
if (do_hex_dump){
3436
do_hex_dump = do_pkt_dump = 0;
3437
} else if(do_pkt_dump){
3438
do_hex_dump = 1;
3439
} else
3440
do_pkt_dump = 1;
3441
av_log_set_level(AV_LOG_DEBUG);
3442
}
3443
if (key == 'c' || key == 'C'){
3444
char buf[4096], target[64], command[256], arg[256] = {0};
3445
double time;
3446
int k, n = 0;
3447
fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3448
i = 0;
3449
set_tty_echo(1);
3450
while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3451
if (k > 0)
3452
buf[i++] = k;
3453
buf[i] = 0;
3454
set_tty_echo(0);
3455
fprintf(stderr, "\n");
3456
if (k > 0 &&
3457
(n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3458
av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3459
target, time, command, arg);
3460
for (i = 0; i < nb_filtergraphs; i++) {
3461
FilterGraph *fg = filtergraphs[i];
3462
if (fg->graph) {
3463
if (time < 0) {
3464
ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3465
key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3466
fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3467
} else if (key == 'c') {
3468
fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3469
ret = AVERROR_PATCHWELCOME;
3470
} else {
3471
ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3472
if (ret < 0)
3473
fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3474
}
3475
}
3476
}
3477
} else {
3478
av_log(NULL, AV_LOG_ERROR,
3479
"Parse error, at least 3 arguments were expected, "
3480
"only %d given in string '%s'\n", n, buf);
3481
}
3482
}
3483
if (key == 'd' || key == 'D'){
3484
int debug=0;
3485
if(key == 'D') {
3486
debug = input_streams[0]->st->codec->debug<<1;
3487
if(!debug) debug = 1;
3488
while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3489
debug += debug;
3490
}else{
3491
char buf[32];
3492
int k = 0;
3493
i = 0;
3494
set_tty_echo(1);
3495
while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3496
if (k > 0)
3497
buf[i++] = k;
3498
buf[i] = 0;
3499
set_tty_echo(0);
3500
fprintf(stderr, "\n");
3501
if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3502
fprintf(stderr,"error parsing debug value\n");
3503
}
3504
for(i=0;i<nb_input_streams;i++) {
3505
input_streams[i]->st->codec->debug = debug;
3506
}
3507
for(i=0;i<nb_output_streams;i++) {
3508
OutputStream *ost = output_streams[i];
3509
ost->enc_ctx->debug = debug;
3510
}
3511
if(debug) av_log_set_level(AV_LOG_DEBUG);
3512
fprintf(stderr,"debug=%d\n", debug);
3513
}
3514
if (key == '?'){
3515
fprintf(stderr, "key function\n"
3516
"? show this help\n"
3517
"+ increase verbosity\n"
3518
"- decrease verbosity\n"
3519
"c Send command to first matching filter supporting it\n"
3520
"C Send/Que command to all matching filters\n"
3521
"D cycle through available debug modes\n"
3522
"h dump packets/hex press to cycle through the 3 states\n"
3523
"q quit\n"
3524
"s Show QP histogram\n"
3525
);
3526
}
3527
return 0;
3528
}
3529
3530
#if HAVE_PTHREADS
3531
static void *input_thread(void *arg)
3532
{
3533
InputFile *f = arg;
3534
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3535
int ret = 0;
3536
3537
while (1) {
3538
AVPacket pkt;
3539
ret = av_read_frame(f->ctx, &pkt);
3540
3541
if (ret == AVERROR(EAGAIN)) {
3542
av_usleep(10000);
3543
continue;
3544
}
3545
if (ret < 0) {
3546
av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3547
break;
3548
}
3549
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3550
if (flags && ret == AVERROR(EAGAIN)) {
3551
flags = 0;
3552
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3553
av_log(f->ctx, AV_LOG_WARNING,
3554
"Thread message queue blocking; consider raising the "
3555
"thread_queue_size option (current value: %d)\n",
3556
f->thread_queue_size);
3557
}
3558
if (ret < 0) {
3559
if (ret != AVERROR_EOF)
3560
av_log(f->ctx, AV_LOG_ERROR,
3561
"Unable to send packet to main thread: %s\n",
3562
av_err2str(ret));
3563
av_packet_unref(&pkt);
3564
av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3565
break;
3566
}
3567
}
3568
3569
return NULL;
3570
}
3571
3572
static void free_input_threads(void)
3573
{
3574
int i;
3575
3576
for (i = 0; i < nb_input_files; i++) {
3577
InputFile *f = input_files[i];
3578
AVPacket pkt;
3579
3580
if (!f || !f->in_thread_queue)
3581
continue;
3582
av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3583
while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3584
av_packet_unref(&pkt);
3585
3586
pthread_join(f->thread, NULL);
3587
f->joined = 1;
3588
av_thread_message_queue_free(&f->in_thread_queue);
3589
}
3590
}
3591
3592
static int init_input_threads(void)
3593
{
3594
int i, ret;
3595
3596
if (nb_input_files == 1)
3597
return 0;
3598
3599
for (i = 0; i < nb_input_files; i++) {
3600
InputFile *f = input_files[i];
3601
3602
if (f->ctx->pb ? !f->ctx->pb->seekable :
3603
strcmp(f->ctx->iformat->name, "lavfi"))
3604
f->non_blocking = 1;
3605
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3606
f->thread_queue_size, sizeof(AVPacket));
3607
if (ret < 0)
3608
return ret;
3609
3610
if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3611
av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3612
av_thread_message_queue_free(&f->in_thread_queue);
3613
return AVERROR(ret);
3614
}
3615
}
3616
return 0;
3617
}
3618
3619
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3620
{
3621
return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3622
f->non_blocking ?
3623
AV_THREAD_MESSAGE_NONBLOCK : 0);
3624
}
3625
#endif
3626
3627
static int get_input_packet(InputFile *f, AVPacket *pkt)
3628
{
3629
if (f->rate_emu) {
3630
int i;
3631
for (i = 0; i < f->nb_streams; i++) {
3632
InputStream *ist = input_streams[f->ist_index + i];
3633
int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3634
int64_t now = av_gettime_relative() - ist->start;
3635
if (pts > now)
3636
return AVERROR(EAGAIN);
3637
}
3638
}
3639
3640
#if HAVE_PTHREADS
3641
if (nb_input_files > 1)
3642
return get_input_packet_mt(f, pkt);
3643
#endif
3644
return av_read_frame(f->ctx, pkt);
3645
}
3646
3647
static int got_eagain(void)
3648
{
3649
int i;
3650
for (i = 0; i < nb_output_streams; i++)
3651
if (output_streams[i]->unavailable)
3652
return 1;
3653
return 0;
3654
}
3655
3656
static void reset_eagain(void)
3657
{
3658
int i;
3659
for (i = 0; i < nb_input_files; i++)
3660
input_files[i]->eagain = 0;
3661
for (i = 0; i < nb_output_streams; i++)
3662
output_streams[i]->unavailable = 0;
3663
}
3664
3665
// set duration to max(tmp, duration) in a proper time base and return duration's time_base
3666
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3667
AVRational time_base)
3668
{
3669
int ret;
3670
3671
if (!*duration) {
3672
*duration = tmp;
3673
return tmp_time_base;
3674
}
3675
3676
ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3677
if (ret < 0) {
3678
*duration = tmp;
3679
return tmp_time_base;
3680
}
3681
3682
return time_base;
3683
}
3684
3685
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3686
{
3687
InputStream *ist;
3688
AVCodecContext *avctx;
3689
int i, ret, has_audio = 0;
3690
int64_t duration = 0;
3691
3692
ret = av_seek_frame(is, -1, is->start_time, 0);
3693
if (ret < 0)
3694
return ret;
3695
3696
for (i = 0; i < ifile->nb_streams; i++) {
3697
ist = input_streams[ifile->ist_index + i];
3698
avctx = ist->dec_ctx;
3699
3700
// flush decoders
3701
if (ist->decoding_needed) {
3702
process_input_packet(ist, NULL, 1);
3703
avcodec_flush_buffers(avctx);
3704
}
3705
3706
/* duration is the length of the last frame in a stream
3707
* when audio stream is present we don't care about
3708
* last video frame length because it's not defined exactly */
3709
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3710
has_audio = 1;
3711
}
3712
3713
for (i = 0; i < ifile->nb_streams; i++) {
3714
ist = input_streams[ifile->ist_index + i];
3715
avctx = ist->dec_ctx;
3716
3717
if (has_audio) {
3718
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3719
AVRational sample_rate = {1, avctx->sample_rate};
3720
3721
duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3722
} else
3723
continue;
3724
} else {
3725
if (ist->framerate.num) {
3726
duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3727
} else if (ist->st->avg_frame_rate.num) {
3728
duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3729
} else duration = 1;
3730
}
3731
if (!ifile->duration)
3732
ifile->time_base = ist->st->time_base;
3733
/* the total duration of the stream, max_pts - min_pts is
3734
* the duration of the stream without the last frame */
3735
duration += ist->max_pts - ist->min_pts;
3736
ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3737
ifile->time_base);
3738
}
3739
3740
if (ifile->loop > 0)
3741
ifile->loop--;
3742
3743
return ret;
3744
}
3745
3746
/*
3747
* Return
3748
* - 0 -- one packet was read and processed
3749
* - AVERROR(EAGAIN) -- no packets were available for selected file,
3750
* this function should be called again
3751
* - AVERROR_EOF -- this function should not be called again
3752
*/
3753
static int process_input(int file_index)
3754
{
3755
InputFile *ifile = input_files[file_index];
3756
AVFormatContext *is;
3757
InputStream *ist;
3758
AVPacket pkt;
3759
int ret, i, j;
3760
int64_t duration;
3761
int64_t pkt_dts;
3762
3763
is = ifile->ctx;
3764
ret = get_input_packet(ifile, &pkt);
3765
3766
if (ret == AVERROR(EAGAIN)) {
3767
ifile->eagain = 1;
3768
return ret;
3769
}
3770
if (ret < 0 && ifile->loop) {
3771
if ((ret = seek_to_start(ifile, is)) < 0)
3772
return ret;
3773
ret = get_input_packet(ifile, &pkt);
3774
}
3775
if (ret < 0) {
3776
if (ret != AVERROR_EOF) {
3777
print_error(is->filename, ret);
3778
if (exit_on_error)
3779
exit_program(1);
3780
}
3781
3782
for (i = 0; i < ifile->nb_streams; i++) {
3783
ist = input_streams[ifile->ist_index + i];
3784
if (ist->decoding_needed) {
3785
ret = process_input_packet(ist, NULL, 0);
3786
if (ret>0)
3787
return 0;
3788
}
3789
3790
/* mark all outputs that don't go through lavfi as finished */
3791
for (j = 0; j < nb_output_streams; j++) {
3792
OutputStream *ost = output_streams[j];
3793
3794
if (ost->source_index == ifile->ist_index + i &&
3795
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3796
finish_output_stream(ost);
3797
}
3798
}
3799
3800
ifile->eof_reached = 1;
3801
return AVERROR(EAGAIN);
3802
}
3803
3804
reset_eagain();
3805
3806
if (do_pkt_dump) {
3807
av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3808
is->streams[pkt.stream_index]);
3809
}
3810
/* the following test is needed in case new streams appear
3811
dynamically in stream : we ignore them */
3812
if (pkt.stream_index >= ifile->nb_streams) {
3813
report_new_stream(file_index, &pkt);
3814
goto discard_packet;
3815
}
3816
3817
ist = input_streams[ifile->ist_index + pkt.stream_index];
3818
3819
ist->data_size += pkt.size;
3820
ist->nb_packets++;
3821
3822
if (ist->discard)
3823
goto discard_packet;
3824
3825
if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3826
av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3827
exit_program(1);
3828
}
3829
3830
if (debug_ts) {
3831
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3832
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3833
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3834
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3835
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3836
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3837
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3838
av_ts2str(input_files[ist->file_index]->ts_offset),
3839
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3840
}
3841
3842
if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3843
int64_t stime, stime2;
3844
// Correcting starttime based on the enabled streams
3845
// FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3846
// so we instead do it here as part of discontinuity handling
3847
if ( ist->next_dts == AV_NOPTS_VALUE
3848
&& ifile->ts_offset == -is->start_time
3849
&& (is->iformat->flags & AVFMT_TS_DISCONT)) {
3850
int64_t new_start_time = INT64_MAX;
3851
for (i=0; i<is->nb_streams; i++) {
3852
AVStream *st = is->streams[i];
3853
if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3854
continue;
3855
new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3856
}
3857
if (new_start_time > is->start_time) {
3858
av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3859
ifile->ts_offset = -new_start_time;
3860
}
3861
}
3862
3863
stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3864
stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3865
ist->wrap_correction_done = 1;
3866
3867
if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3868
pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3869
ist->wrap_correction_done = 0;
3870
}
3871
if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3872
pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3873
ist->wrap_correction_done = 0;
3874
}
3875
}
3876
3877
/* add the stream-global side data to the first packet */
3878
if (ist->nb_packets == 1) {
3879
if (ist->st->nb_side_data)
3880
av_packet_split_side_data(&pkt);
3881
for (i = 0; i < ist->st->nb_side_data; i++) {
3882
AVPacketSideData *src_sd = &ist->st->side_data[i];
3883
uint8_t *dst_data;
3884
3885
if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3886
continue;
3887
if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3888
continue;
3889
3890
dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3891
if (!dst_data)
3892
exit_program(1);
3893
3894
memcpy(dst_data, src_sd->data, src_sd->size);
3895
}
3896
}
3897
3898
if (pkt.dts != AV_NOPTS_VALUE)
3899
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3900
if (pkt.pts != AV_NOPTS_VALUE)
3901
pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3902
3903
if (pkt.pts != AV_NOPTS_VALUE)
3904
pkt.pts *= ist->ts_scale;
3905
if (pkt.dts != AV_NOPTS_VALUE)
3906
pkt.dts *= ist->ts_scale;
3907
3908
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3909
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3910
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3911
pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3912
&& (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3913
int64_t delta = pkt_dts - ifile->last_ts;
3914
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3915
delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3916
ifile->ts_offset -= delta;
3917
av_log(NULL, AV_LOG_DEBUG,
3918
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3919
delta, ifile->ts_offset);
3920
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3921
if (pkt.pts != AV_NOPTS_VALUE)
3922
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3923
}
3924
}
3925
3926
duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3927
if (pkt.pts != AV_NOPTS_VALUE) {
3928
pkt.pts += duration;
3929
ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3930
ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3931
}
3932
3933
if (pkt.dts != AV_NOPTS_VALUE)
3934
pkt.dts += duration;
3935
3936
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3937
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3938
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3939
pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3940
!copy_ts) {
3941
int64_t delta = pkt_dts - ist->next_dts;
3942
if (is->iformat->flags & AVFMT_TS_DISCONT) {
3943
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3944
delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3945
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3946
ifile->ts_offset -= delta;
3947
av_log(NULL, AV_LOG_DEBUG,
3948
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3949
delta, ifile->ts_offset);
3950
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3951
if (pkt.pts != AV_NOPTS_VALUE)
3952
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3953
}
3954
} else {
3955
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3956
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3957
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3958
pkt.dts = AV_NOPTS_VALUE;
3959
}
3960
if (pkt.pts != AV_NOPTS_VALUE){
3961
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3962
delta = pkt_pts - ist->next_dts;
3963
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3964
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3965
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3966
pkt.pts = AV_NOPTS_VALUE;
3967
}
3968
}
3969
}
3970
}
3971
3972
if (pkt.dts != AV_NOPTS_VALUE)
3973
ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3974
3975
if (debug_ts) {
3976
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3977
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3978
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3979
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3980
av_ts2str(input_files[ist->file_index]->ts_offset),
3981
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3982
}
3983
3984
sub2video_heartbeat(ist, pkt.pts);
3985
3986
process_input_packet(ist, &pkt, 0);
3987
3988
discard_packet:
3989
av_packet_unref(&pkt);
3990
3991
return 0;
3992
}
3993
3994
/**
3995
* Perform a step of transcoding for the specified filter graph.
3996
*
3997
* @param[in] graph filter graph to consider
3998
* @param[out] best_ist input stream where a frame would allow to continue
3999
* @return 0 for success, <0 for error
4000
*/
4001
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4002
{
4003
int i, ret;
4004
int nb_requests, nb_requests_max = 0;
4005
InputFilter *ifilter;
4006
InputStream *ist;
4007
4008
*best_ist = NULL;
4009
ret = avfilter_graph_request_oldest(graph->graph);
4010
if (ret >= 0)
4011
return reap_filters(0);
4012
4013
if (ret == AVERROR_EOF) {
4014
ret = reap_filters(1);
4015
for (i = 0; i < graph->nb_outputs; i++)
4016
close_output_stream(graph->outputs[i]->ost);
4017
return ret;
4018
}
4019
if (ret != AVERROR(EAGAIN))
4020
return ret;
4021
4022
for (i = 0; i < graph->nb_inputs; i++) {
4023
ifilter = graph->inputs[i];
4024
ist = ifilter->ist;
4025
if (input_files[ist->file_index]->eagain ||
4026
input_files[ist->file_index]->eof_reached)
4027
continue;
4028
nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4029
if (nb_requests > nb_requests_max) {
4030
nb_requests_max = nb_requests;
4031
*best_ist = ist;
4032
}
4033
}
4034
4035
if (!*best_ist)
4036
for (i = 0; i < graph->nb_outputs; i++)
4037
graph->outputs[i]->ost->unavailable = 1;
4038
4039
return 0;
4040
}
4041
4042
/**
4043
* Run a single step of transcoding.
4044
*
4045
* @return 0 for success, <0 for error
4046
*/
4047
static int transcode_step(void)
4048
{
4049
OutputStream *ost;
4050
InputStream *ist;
4051
int ret;
4052
4053
ost = choose_output();
4054
if (!ost) {
4055
if (got_eagain()) {
4056
reset_eagain();
4057
av_usleep(10000);
4058
return 0;
4059
}
4060
av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4061
return AVERROR_EOF;
4062
}
4063
4064
if (ost->filter) {
4065
if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4066
return ret;
4067
if (!ist)
4068
return 0;
4069
} else {
4070
av_assert0(ost->source_index >= 0);
4071
ist = input_streams[ost->source_index];
4072
}
4073
4074
ret = process_input(ist->file_index);
4075
if (ret == AVERROR(EAGAIN)) {
4076
if (input_files[ist->file_index]->eagain)
4077
ost->unavailable = 1;
4078
return 0;
4079
}
4080
4081
if (ret < 0)
4082
return ret == AVERROR_EOF ? 0 : ret;
4083
4084
return reap_filters(0);
4085
}
4086
4087
/*
4088
* The following code is the main loop of the file converter
4089
*/
4090
static int transcode(void)
4091
{
4092
int ret, i;
4093
AVFormatContext *os;
4094
OutputStream *ost;
4095
InputStream *ist;
4096
int64_t timer_start;
4097
int64_t total_packets_written = 0;
4098
4099
ret = transcode_init();
4100
if (ret < 0)
4101
goto fail;
4102
4103
if (stdin_interaction) {
4104
av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4105
}
4106
4107
timer_start = av_gettime_relative();
4108
4109
#if HAVE_PTHREADS
4110
if ((ret = init_input_threads()) < 0)
4111
goto fail;
4112
#endif
4113
4114
while (!received_sigterm) {
4115
int64_t cur_time= av_gettime_relative();
4116
4117
/* if 'q' pressed, exits */
4118
if (stdin_interaction)
4119
if (check_keyboard_interaction(cur_time) < 0)
4120
break;
4121
4122
/* check if there's any stream where output is still needed */
4123
if (!need_output()) {
4124
av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4125
break;
4126
}
4127
4128
ret = transcode_step();
4129
if (ret < 0 && ret != AVERROR_EOF) {
4130
char errbuf[128];
4131
av_strerror(ret, errbuf, sizeof(errbuf));
4132
4133
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4134
break;
4135
}
4136
4137
/* dump report by using the output first video and audio streams */
4138
print_report(0, timer_start, cur_time);
4139
}
4140
#if HAVE_PTHREADS
4141
free_input_threads();
4142
#endif
4143
4144
/* at the end of stream, we must flush the decoder buffers */
4145
for (i = 0; i < nb_input_streams; i++) {
4146
ist = input_streams[i];
4147
if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4148
process_input_packet(ist, NULL, 0);
4149
}
4150
}
4151
flush_encoders();
4152
4153
term_exit();
4154
4155
/* write the trailer if needed and close file */
4156
for (i = 0; i < nb_output_files; i++) {
4157
os = output_files[i]->ctx;
4158
if ((ret = av_write_trailer(os)) < 0) {
4159
av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4160
if (exit_on_error)
4161
exit_program(1);
4162
}
4163
}
4164
4165
/* dump report by using the first video and audio streams */
4166
print_report(1, timer_start, av_gettime_relative());
4167
4168
/* close each encoder */
4169
for (i = 0; i < nb_output_streams; i++) {
4170
ost = output_streams[i];
4171
if (ost->encoding_needed) {
4172
av_freep(&ost->enc_ctx->stats_in);
4173
}
4174
total_packets_written += ost->packets_written;
4175
}
4176
4177
if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4178
av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4179
exit_program(1);
4180
}
4181
4182
/* close each decoder */
4183
for (i = 0; i < nb_input_streams; i++) {
4184
ist = input_streams[i];
4185
if (ist->decoding_needed) {
4186
avcodec_close(ist->dec_ctx);
4187
if (ist->hwaccel_uninit)
4188
ist->hwaccel_uninit(ist->dec_ctx);
4189
}
4190
}
4191
4192
/* finished ! */
4193
ret = 0;
4194
4195
fail:
4196
#if HAVE_PTHREADS
4197
free_input_threads();
4198
#endif
4199
4200
if (output_streams) {
4201
for (i = 0; i < nb_output_streams; i++) {
4202
ost = output_streams[i];
4203
if (ost) {
4204
if (ost->logfile) {
4205
if (fclose(ost->logfile))
4206
av_log(NULL, AV_LOG_ERROR,
4207
"Error closing logfile, loss of information possible: %s\n",
4208
av_err2str(AVERROR(errno)));
4209
ost->logfile = NULL;
4210
}
4211
av_freep(&ost->forced_kf_pts);
4212
av_freep(&ost->apad);
4213
av_freep(&ost->disposition);
4214
av_dict_free(&ost->encoder_opts);
4215
av_dict_free(&ost->sws_dict);
4216
av_dict_free(&ost->swr_opts);
4217
av_dict_free(&ost->resample_opts);
4218
}
4219
}
4220
}
4221
return ret;
4222
}
4223
4224
4225
static int64_t getutime(void)
4226
{
4227
#if HAVE_GETRUSAGE
4228
struct rusage rusage;
4229
4230
getrusage(RUSAGE_SELF, &rusage);
4231
return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4232
#elif HAVE_GETPROCESSTIMES
4233
HANDLE proc;
4234
FILETIME c, e, k, u;
4235
proc = GetCurrentProcess();
4236
GetProcessTimes(proc, &c, &e, &k, &u);
4237
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4238
#else
4239
return av_gettime_relative();
4240
#endif
4241
}
4242
4243
static int64_t getmaxrss(void)
4244
{
4245
#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4246
struct rusage rusage;
4247
getrusage(RUSAGE_SELF, &rusage);
4248
return (int64_t)rusage.ru_maxrss * 1024;
4249
#elif HAVE_GETPROCESSMEMORYINFO
4250
HANDLE proc;
4251
PROCESS_MEMORY_COUNTERS memcounters;
4252
proc = GetCurrentProcess();
4253
memcounters.cb = sizeof(memcounters);
4254
GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4255
return memcounters.PeakPagefileUsage;
4256
#else
4257
return 0;
4258
#endif
4259
}
4260
4261
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4262
{
4263
}
4264
4265
int main(int argc, char **argv)
4266
{
4267
int ret;
4268
int64_t ti;
4269
4270
register_exit(ffmpeg_cleanup);
4271
4272
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4273
4274
av_log_set_flags(AV_LOG_SKIP_REPEATED);
4275
parse_loglevel(argc, argv, options);
4276
4277
if(argc>1 && !strcmp(argv[1], "-d")){
4278
run_as_daemon=1;
4279
av_log_set_callback(log_callback_null);
4280
argc--;
4281
argv++;
4282
}
4283
4284
avcodec_register_all();
4285
#if CONFIG_AVDEVICE
4286
avdevice_register_all();
4287
#endif
4288
avfilter_register_all();
4289
av_register_all();
4290
avformat_network_init();
4291
4292
show_banner(argc, argv, options);
4293
4294
term_init();
4295
4296
/* parse options and open all input/output files */
4297
ret = ffmpeg_parse_options(argc, argv);
4298
if (ret < 0)
4299
exit_program(1);
4300
4301
if (nb_output_files <= 0 && nb_input_files == 0) {
4302
show_usage();
4303
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4304
exit_program(1);
4305
}
4306
4307
/* file converter / grab */
4308
if (nb_output_files <= 0) {
4309
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4310
exit_program(1);
4311
}
4312
4313
// if (nb_input_files == 0) {
4314
// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4315
// exit_program(1);
4316
// }
4317
4318
current_time = ti = getutime();
4319
if (transcode() < 0)
4320
exit_program(1);
4321
ti = getutime() - ti;
4322
if (do_benchmark) {
4323
av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4324
}
4325
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4326
decode_error_stat[0], decode_error_stat[1]);
4327
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4328
exit_program(69);
4329
4330
exit_program(received_nb_signals ? 255 : main_return_code);
4331
return main_return_code;
4332
}
4333
4334