Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52868 views
1
/*
2
* Copyright (c) 2011 Nicolas George <[email protected]>
3
*
4
* This file is part of FFmpeg.
5
*
6
* FFmpeg is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2.1 of the License, or (at your option) any later version.
10
*
11
* FFmpeg is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU Lesser General Public License for more details.
15
*
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with FFmpeg; if not, write to the Free Software
18
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
*/
20
21
/**
22
* @file
23
* Audio merging filter
24
*/
25
26
#include "libavutil/avstring.h"
27
#include "libavutil/bprint.h"
28
#include "libavutil/channel_layout.h"
29
#include "libavutil/opt.h"
30
#include "avfilter.h"
31
#include "audio.h"
32
#include "bufferqueue.h"
33
#include "internal.h"
34
35
#define SWR_CH_MAX 64
36
37
typedef struct {
38
const AVClass *class;
39
int nb_inputs;
40
int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
41
int bps;
42
struct amerge_input {
43
struct FFBufQueue queue;
44
int nb_ch; /**< number of channels for the input */
45
int nb_samples;
46
int pos;
47
} *in;
48
} AMergeContext;
49
50
#define OFFSET(x) offsetof(AMergeContext, x)
51
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
52
53
static const AVOption amerge_options[] = {
54
{ "inputs", "specify the number of inputs", OFFSET(nb_inputs),
55
AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
56
{ NULL }
57
};
58
59
AVFILTER_DEFINE_CLASS(amerge);
60
61
static av_cold void uninit(AVFilterContext *ctx)
62
{
63
AMergeContext *s = ctx->priv;
64
int i;
65
66
for (i = 0; i < s->nb_inputs; i++) {
67
if (s->in)
68
ff_bufqueue_discard_all(&s->in[i].queue);
69
if (ctx->input_pads)
70
av_freep(&ctx->input_pads[i].name);
71
}
72
av_freep(&s->in);
73
}
74
75
static int query_formats(AVFilterContext *ctx)
76
{
77
AMergeContext *s = ctx->priv;
78
int64_t inlayout[SWR_CH_MAX], outlayout = 0;
79
AVFilterFormats *formats;
80
AVFilterChannelLayouts *layouts;
81
int i, ret, overlap = 0, nb_ch = 0;
82
83
for (i = 0; i < s->nb_inputs; i++) {
84
if (!ctx->inputs[i]->in_channel_layouts ||
85
!ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
86
av_log(ctx, AV_LOG_WARNING,
87
"No channel layout for input %d\n", i + 1);
88
return AVERROR(EAGAIN);
89
}
90
inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
91
if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
92
char buf[256];
93
av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
94
av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
95
}
96
s->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
97
if (outlayout & inlayout[i])
98
overlap++;
99
outlayout |= inlayout[i];
100
nb_ch += s->in[i].nb_ch;
101
}
102
if (nb_ch > SWR_CH_MAX) {
103
av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
104
return AVERROR(EINVAL);
105
}
106
if (overlap) {
107
av_log(ctx, AV_LOG_WARNING,
108
"Input channel layouts overlap: "
109
"output layout will be determined by the number of distinct input channels\n");
110
for (i = 0; i < nb_ch; i++)
111
s->route[i] = i;
112
outlayout = av_get_default_channel_layout(nb_ch);
113
if (!outlayout && nb_ch)
114
outlayout = 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch);
115
} else {
116
int *route[SWR_CH_MAX];
117
int c, out_ch_number = 0;
118
119
route[0] = s->route;
120
for (i = 1; i < s->nb_inputs; i++)
121
route[i] = route[i - 1] + s->in[i - 1].nb_ch;
122
for (c = 0; c < 64; c++)
123
for (i = 0; i < s->nb_inputs; i++)
124
if ((inlayout[i] >> c) & 1)
125
*(route[i]++) = out_ch_number++;
126
}
127
formats = ff_make_format_list(ff_packed_sample_fmts_array);
128
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
129
return ret;
130
for (i = 0; i < s->nb_inputs; i++) {
131
layouts = NULL;
132
if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
133
return ret;
134
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
135
return ret;
136
}
137
layouts = NULL;
138
if ((ret = ff_add_channel_layout(&layouts, outlayout)) < 0)
139
return ret;
140
if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
141
return ret;
142
143
return ff_set_common_samplerates(ctx, ff_all_samplerates());
144
}
145
146
static int config_output(AVFilterLink *outlink)
147
{
148
AVFilterContext *ctx = outlink->src;
149
AMergeContext *s = ctx->priv;
150
AVBPrint bp;
151
int i;
152
153
for (i = 1; i < s->nb_inputs; i++) {
154
if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
155
av_log(ctx, AV_LOG_ERROR,
156
"Inputs must have the same sample rate "
157
"%d for in%d vs %d\n",
158
ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
159
return AVERROR(EINVAL);
160
}
161
}
162
s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
163
outlink->sample_rate = ctx->inputs[0]->sample_rate;
164
outlink->time_base = ctx->inputs[0]->time_base;
165
166
av_bprint_init(&bp, 0, 1);
167
for (i = 0; i < s->nb_inputs; i++) {
168
av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
169
av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
170
}
171
av_bprintf(&bp, " -> out:");
172
av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
173
av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
174
175
return 0;
176
}
177
178
static int request_frame(AVFilterLink *outlink)
179
{
180
AVFilterContext *ctx = outlink->src;
181
AMergeContext *s = ctx->priv;
182
int i, ret;
183
184
for (i = 0; i < s->nb_inputs; i++)
185
if (!s->in[i].nb_samples)
186
if ((ret = ff_request_frame(ctx->inputs[i])) < 0)
187
return ret;
188
return 0;
189
}
190
191
/**
192
* Copy samples from several input streams to one output stream.
193
* @param nb_inputs number of inputs
194
* @param in inputs; used only for the nb_ch field;
195
* @param route routing values;
196
* input channel i goes to output channel route[i];
197
* i < in[0].nb_ch are the channels from the first output;
198
* i >= in[0].nb_ch are the channels from the second output
199
* @param ins pointer to the samples of each inputs, in packed format;
200
* will be left at the end of the copied samples
201
* @param outs pointer to the samples of the output, in packet format;
202
* must point to a buffer big enough;
203
* will be left at the end of the copied samples
204
* @param ns number of samples to copy
205
* @param bps bytes per sample
206
*/
207
static inline void copy_samples(int nb_inputs, struct amerge_input in[],
208
int *route, uint8_t *ins[],
209
uint8_t **outs, int ns, int bps)
210
{
211
int *route_cur;
212
int i, c, nb_ch = 0;
213
214
for (i = 0; i < nb_inputs; i++)
215
nb_ch += in[i].nb_ch;
216
while (ns--) {
217
route_cur = route;
218
for (i = 0; i < nb_inputs; i++) {
219
for (c = 0; c < in[i].nb_ch; c++) {
220
memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
221
ins[i] += bps;
222
}
223
}
224
*outs += nb_ch * bps;
225
}
226
}
227
228
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
229
{
230
AVFilterContext *ctx = inlink->dst;
231
AMergeContext *s = ctx->priv;
232
AVFilterLink *const outlink = ctx->outputs[0];
233
int input_number;
234
int nb_samples, ns, i;
235
AVFrame *outbuf, *inbuf[SWR_CH_MAX];
236
uint8_t *ins[SWR_CH_MAX], *outs;
237
238
for (input_number = 0; input_number < s->nb_inputs; input_number++)
239
if (inlink == ctx->inputs[input_number])
240
break;
241
av_assert1(input_number < s->nb_inputs);
242
if (ff_bufqueue_is_full(&s->in[input_number].queue)) {
243
av_frame_free(&insamples);
244
return AVERROR(ENOMEM);
245
}
246
ff_bufqueue_add(ctx, &s->in[input_number].queue, av_frame_clone(insamples));
247
s->in[input_number].nb_samples += insamples->nb_samples;
248
av_frame_free(&insamples);
249
nb_samples = s->in[0].nb_samples;
250
for (i = 1; i < s->nb_inputs; i++)
251
nb_samples = FFMIN(nb_samples, s->in[i].nb_samples);
252
if (!nb_samples)
253
return 0;
254
255
outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
256
if (!outbuf)
257
return AVERROR(ENOMEM);
258
outs = outbuf->data[0];
259
for (i = 0; i < s->nb_inputs; i++) {
260
inbuf[i] = ff_bufqueue_peek(&s->in[i].queue, 0);
261
ins[i] = inbuf[i]->data[0] +
262
s->in[i].pos * s->in[i].nb_ch * s->bps;
263
}
264
av_frame_copy_props(outbuf, inbuf[0]);
265
outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
266
inbuf[0]->pts +
267
av_rescale_q(s->in[0].pos,
268
av_make_q(1, ctx->inputs[0]->sample_rate),
269
ctx->outputs[0]->time_base);
270
271
outbuf->nb_samples = nb_samples;
272
outbuf->channel_layout = outlink->channel_layout;
273
av_frame_set_channels(outbuf, outlink->channels);
274
275
while (nb_samples) {
276
ns = nb_samples;
277
for (i = 0; i < s->nb_inputs; i++)
278
ns = FFMIN(ns, inbuf[i]->nb_samples - s->in[i].pos);
279
/* Unroll the most common sample formats: speed +~350% for the loop,
280
+~13% overall (including two common decoders) */
281
switch (s->bps) {
282
case 1:
283
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 1);
284
break;
285
case 2:
286
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 2);
287
break;
288
case 4:
289
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 4);
290
break;
291
default:
292
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, s->bps);
293
break;
294
}
295
296
nb_samples -= ns;
297
for (i = 0; i < s->nb_inputs; i++) {
298
s->in[i].nb_samples -= ns;
299
s->in[i].pos += ns;
300
if (s->in[i].pos == inbuf[i]->nb_samples) {
301
s->in[i].pos = 0;
302
av_frame_free(&inbuf[i]);
303
ff_bufqueue_get(&s->in[i].queue);
304
inbuf[i] = ff_bufqueue_peek(&s->in[i].queue, 0);
305
ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
306
}
307
}
308
}
309
return ff_filter_frame(ctx->outputs[0], outbuf);
310
}
311
312
static av_cold int init(AVFilterContext *ctx)
313
{
314
AMergeContext *s = ctx->priv;
315
int i;
316
317
s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
318
if (!s->in)
319
return AVERROR(ENOMEM);
320
for (i = 0; i < s->nb_inputs; i++) {
321
char *name = av_asprintf("in%d", i);
322
AVFilterPad pad = {
323
.name = name,
324
.type = AVMEDIA_TYPE_AUDIO,
325
.filter_frame = filter_frame,
326
};
327
if (!name)
328
return AVERROR(ENOMEM);
329
ff_insert_inpad(ctx, i, &pad);
330
}
331
return 0;
332
}
333
334
static const AVFilterPad amerge_outputs[] = {
335
{
336
.name = "default",
337
.type = AVMEDIA_TYPE_AUDIO,
338
.config_props = config_output,
339
.request_frame = request_frame,
340
},
341
{ NULL }
342
};
343
344
AVFilter ff_af_amerge = {
345
.name = "amerge",
346
.description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
347
"a single multi-channel stream."),
348
.priv_size = sizeof(AMergeContext),
349
.init = init,
350
.uninit = uninit,
351
.query_formats = query_formats,
352
.inputs = NULL,
353
.outputs = amerge_outputs,
354
.priv_class = &amerge_class,
355
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
356
};
357
358