Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/soc/intel/avs/path.c
29270 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
//
3
// Copyright(c) 2021 Intel Corporation
4
//
5
// Authors: Cezary Rojewski <[email protected]>
6
// Amadeusz Slawinski <[email protected]>
7
//
8
9
#include <linux/acpi.h>
10
#include <acpi/nhlt.h>
11
#include <sound/pcm_params.h>
12
#include <sound/soc.h>
13
#include "avs.h"
14
#include "control.h"
15
#include "path.h"
16
#include "topology.h"
17
18
/* Must be called with adev->comp_list_mutex held. */
19
static struct avs_tplg *
20
avs_path_find_tplg(struct avs_dev *adev, const char *name)
21
{
22
struct avs_soc_component *acomp;
23
24
list_for_each_entry(acomp, &adev->comp_list, node)
25
if (!strcmp(acomp->tplg->name, name))
26
return acomp->tplg;
27
return NULL;
28
}
29
30
static struct avs_path_module *
31
avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
32
{
33
struct avs_path_module *mod;
34
35
list_for_each_entry(mod, &ppl->mod_list, node)
36
if (mod->template->id == template_id)
37
return mod;
38
return NULL;
39
}
40
41
static struct avs_path_pipeline *
42
avs_path_find_pipeline(struct avs_path *path, u32 template_id)
43
{
44
struct avs_path_pipeline *ppl;
45
46
list_for_each_entry(ppl, &path->ppl_list, node)
47
if (ppl->template->id == template_id)
48
return ppl;
49
return NULL;
50
}
51
52
static struct avs_path *
53
avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
54
{
55
struct avs_tplg_path_template *pos, *template = NULL;
56
struct avs_tplg *tplg;
57
struct avs_path *path;
58
59
tplg = avs_path_find_tplg(adev, name);
60
if (!tplg)
61
return NULL;
62
63
list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
64
if (pos->id == template_id) {
65
template = pos;
66
break;
67
}
68
}
69
if (!template)
70
return NULL;
71
72
spin_lock(&adev->path_list_lock);
73
/* Only one variant of given path template may be instantiated at a time. */
74
list_for_each_entry(path, &adev->path_list, node) {
75
if (path->template->owner == template) {
76
spin_unlock(&adev->path_list_lock);
77
return path;
78
}
79
}
80
81
spin_unlock(&adev->path_list_lock);
82
return NULL;
83
}
84
85
static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
86
struct avs_audio_format *fmt)
87
{
88
return (params_rate(params) == fmt->sampling_freq &&
89
params_channels(params) == fmt->num_channels &&
90
params_physical_width(params) == fmt->bit_depth &&
91
snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
92
}
93
94
static struct avs_tplg_path *
95
avs_path_find_variant(struct avs_dev *adev,
96
struct avs_tplg_path_template *template,
97
struct snd_pcm_hw_params *fe_params,
98
struct snd_pcm_hw_params *be_params)
99
{
100
struct avs_tplg_path *variant;
101
102
list_for_each_entry(variant, &template->path_list, node) {
103
dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
104
variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
105
variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
106
dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
107
variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
108
variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
109
110
if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
111
variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
112
return variant;
113
}
114
115
return NULL;
116
}
117
118
static struct avs_tplg_path *avs_condpath_find_variant(struct avs_dev *adev,
119
struct avs_tplg_path_template *template,
120
struct avs_path *source,
121
struct avs_path *sink)
122
{
123
struct avs_tplg_path *variant;
124
125
list_for_each_entry(variant, &template->path_list, node) {
126
if (variant->source_path_id == source->template->id &&
127
variant->sink_path_id == sink->template->id)
128
return variant;
129
}
130
131
return NULL;
132
}
133
134
static bool avs_tplg_path_template_id_equal(struct avs_tplg_path_template_id *id,
135
struct avs_tplg_path_template_id *id2)
136
{
137
return id->id == id2->id && !strcmp(id->tplg_name, id2->tplg_name);
138
}
139
140
static struct avs_path *avs_condpath_find_match(struct avs_dev *adev,
141
struct avs_tplg_path_template *template,
142
struct avs_path *path, int dir)
143
{
144
struct avs_tplg_path_template_id *id, *id2;
145
146
if (dir) {
147
id = &template->source;
148
id2 = &template->sink;
149
} else {
150
id = &template->sink;
151
id2 = &template->source;
152
}
153
154
/* Check whether this path is either source or sink of condpath template. */
155
if (id->id != path->template->owner->id ||
156
strcmp(id->tplg_name, path->template->owner->owner->name))
157
return NULL;
158
159
/* Unidirectional condpaths are allowed. */
160
if (avs_tplg_path_template_id_equal(id, id2))
161
return path;
162
163
/* Now find the counterpart. */
164
return avs_path_find_path(adev, id2->tplg_name, id2->id);
165
}
166
167
static struct acpi_nhlt_config *
168
avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t);
169
170
int avs_path_set_constraint(struct avs_dev *adev, struct avs_tplg_path_template *template,
171
struct snd_pcm_hw_constraint_list *rate_list,
172
struct snd_pcm_hw_constraint_list *channels_list,
173
struct snd_pcm_hw_constraint_list *sample_bits_list)
174
{
175
struct avs_tplg_path *path_template;
176
unsigned int *rlist, *clist, *slist;
177
size_t i;
178
179
i = 0;
180
list_for_each_entry(path_template, &template->path_list, node)
181
i++;
182
183
rlist = kcalloc(i, sizeof(*rlist), GFP_KERNEL);
184
clist = kcalloc(i, sizeof(*clist), GFP_KERNEL);
185
slist = kcalloc(i, sizeof(*slist), GFP_KERNEL);
186
if (!rlist || !clist || !slist)
187
return -ENOMEM;
188
189
i = 0;
190
list_for_each_entry(path_template, &template->path_list, node) {
191
struct avs_tplg_pipeline *pipeline_template;
192
193
list_for_each_entry(pipeline_template, &path_template->ppl_list, node) {
194
struct avs_tplg_module *module_template;
195
196
list_for_each_entry(module_template, &pipeline_template->mod_list, node) {
197
const guid_t *type = &module_template->cfg_ext->type;
198
struct acpi_nhlt_config *blob;
199
200
if (!guid_equal(type, &AVS_COPIER_MOD_UUID) &&
201
!guid_equal(type, &AVS_WOVHOSTM_MOD_UUID))
202
continue;
203
204
switch (module_template->cfg_ext->copier.dma_type) {
205
case AVS_DMA_DMIC_LINK_INPUT:
206
case AVS_DMA_I2S_LINK_OUTPUT:
207
case AVS_DMA_I2S_LINK_INPUT:
208
break;
209
default:
210
continue;
211
}
212
213
blob = avs_nhlt_config_or_default(adev, module_template);
214
if (IS_ERR(blob))
215
continue;
216
217
rlist[i] = path_template->fe_fmt->sampling_freq;
218
clist[i] = path_template->fe_fmt->num_channels;
219
slist[i] = path_template->fe_fmt->bit_depth;
220
i++;
221
}
222
}
223
}
224
225
if (i) {
226
rate_list->count = i;
227
rate_list->list = rlist;
228
channels_list->count = i;
229
channels_list->list = clist;
230
sample_bits_list->count = i;
231
sample_bits_list->list = slist;
232
} else {
233
kfree(rlist);
234
kfree(clist);
235
kfree(slist);
236
}
237
238
return i;
239
}
240
241
static void avs_init_node_id(union avs_connector_node_id *node_id,
242
struct avs_tplg_modcfg_ext *te, u32 dma_id)
243
{
244
node_id->val = 0;
245
node_id->dma_type = te->copier.dma_type;
246
247
switch (node_id->dma_type) {
248
case AVS_DMA_DMIC_LINK_INPUT:
249
case AVS_DMA_I2S_LINK_OUTPUT:
250
case AVS_DMA_I2S_LINK_INPUT:
251
/* Gateway's virtual index is statically assigned in the topology. */
252
node_id->vindex = te->copier.vindex.val;
253
break;
254
255
case AVS_DMA_HDA_HOST_OUTPUT:
256
case AVS_DMA_HDA_HOST_INPUT:
257
/* Gateway's virtual index is dynamically assigned with DMA ID */
258
node_id->vindex = dma_id;
259
break;
260
261
case AVS_DMA_HDA_LINK_OUTPUT:
262
case AVS_DMA_HDA_LINK_INPUT:
263
node_id->vindex = te->copier.vindex.val | dma_id;
264
break;
265
266
default:
267
*node_id = INVALID_NODE_ID;
268
break;
269
}
270
}
271
272
/* Every BLOB contains at least gateway attributes. */
273
static struct acpi_nhlt_config *default_blob = (struct acpi_nhlt_config *)&(u32[2]) {4};
274
275
static struct acpi_nhlt_config *
276
avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t)
277
{
278
struct acpi_nhlt_format_config *fmtcfg;
279
struct avs_tplg_modcfg_ext *te;
280
struct avs_audio_format *fmt;
281
int link_type, dev_type;
282
int bus_id, dir;
283
284
te = t->cfg_ext;
285
286
switch (te->copier.dma_type) {
287
case AVS_DMA_I2S_LINK_OUTPUT:
288
link_type = ACPI_NHLT_LINKTYPE_SSP;
289
dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
290
bus_id = te->copier.vindex.i2s.instance;
291
dir = SNDRV_PCM_STREAM_PLAYBACK;
292
fmt = te->copier.out_fmt;
293
break;
294
295
case AVS_DMA_I2S_LINK_INPUT:
296
link_type = ACPI_NHLT_LINKTYPE_SSP;
297
dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
298
bus_id = te->copier.vindex.i2s.instance;
299
dir = SNDRV_PCM_STREAM_CAPTURE;
300
fmt = t->in_fmt;
301
break;
302
303
case AVS_DMA_DMIC_LINK_INPUT:
304
link_type = ACPI_NHLT_LINKTYPE_PDM;
305
dev_type = -1; /* ignored */
306
bus_id = 0;
307
dir = SNDRV_PCM_STREAM_CAPTURE;
308
fmt = t->in_fmt;
309
break;
310
311
default:
312
return default_blob;
313
}
314
315
/* Override format selection if necessary. */
316
if (te->copier.blob_fmt)
317
fmt = te->copier.blob_fmt;
318
319
fmtcfg = acpi_nhlt_find_fmtcfg(link_type, dev_type, dir, bus_id,
320
fmt->num_channels, fmt->sampling_freq, fmt->valid_bit_depth,
321
fmt->bit_depth);
322
if (!fmtcfg) {
323
dev_warn(adev->dev, "Endpoint format configuration not found.\n");
324
return ERR_PTR(-ENOENT);
325
}
326
327
if (fmtcfg->config.capabilities_size < default_blob->capabilities_size)
328
return ERR_PTR(-ETOOSMALL);
329
/* The firmware expects the payload to be DWORD-aligned. */
330
if (fmtcfg->config.capabilities_size % sizeof(u32))
331
return ERR_PTR(-EINVAL);
332
333
return &fmtcfg->config;
334
}
335
336
static int avs_append_dma_cfg(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
337
struct avs_tplg_module *t, u32 dma_id, size_t *cfg_size)
338
{
339
u32 dma_type = t->cfg_ext->copier.dma_type;
340
struct avs_dma_cfg *dma;
341
struct avs_tlv *tlv;
342
size_t tlv_size;
343
344
if (!avs_platattr_test(adev, ALTHDA))
345
return 0;
346
347
switch (dma_type) {
348
case AVS_DMA_HDA_HOST_OUTPUT:
349
case AVS_DMA_HDA_HOST_INPUT:
350
case AVS_DMA_HDA_LINK_OUTPUT:
351
case AVS_DMA_HDA_LINK_INPUT:
352
return 0;
353
default:
354
break;
355
}
356
357
tlv_size = sizeof(*tlv) + sizeof(*dma);
358
if (*cfg_size + tlv_size > AVS_MAILBOX_SIZE)
359
return -E2BIG;
360
361
/* DMA config is a TLV tailing the existing payload. */
362
tlv = (struct avs_tlv *)&gtw->config.blob[gtw->config_length];
363
tlv->type = AVS_GTW_DMA_CONFIG_ID;
364
tlv->length = sizeof(*dma);
365
366
dma = (struct avs_dma_cfg *)tlv->value;
367
memset(dma, 0, sizeof(*dma));
368
dma->dma_method = AVS_DMA_METHOD_HDA;
369
dma->pre_allocated = true;
370
dma->dma_channel_id = dma_id;
371
dma->stream_id = dma_id + 1;
372
373
gtw->config_length += tlv_size / sizeof(u32);
374
*cfg_size += tlv_size;
375
376
return 0;
377
}
378
379
static int avs_fill_gtw_config(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
380
struct avs_tplg_module *t, u32 dma_id, size_t *cfg_size)
381
{
382
struct acpi_nhlt_config *blob;
383
size_t gtw_size;
384
385
blob = avs_nhlt_config_or_default(adev, t);
386
if (IS_ERR(blob))
387
return PTR_ERR(blob);
388
389
gtw_size = blob->capabilities_size;
390
if (*cfg_size + gtw_size > AVS_MAILBOX_SIZE)
391
return -E2BIG;
392
393
gtw->config_length = gtw_size / sizeof(u32);
394
memcpy(gtw->config.blob, blob->capabilities, blob->capabilities_size);
395
*cfg_size += gtw_size;
396
397
return avs_append_dma_cfg(adev, gtw, t, dma_id, cfg_size);
398
}
399
400
static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
401
{
402
struct avs_tplg_module *t = mod->template;
403
struct avs_tplg_modcfg_ext *te;
404
struct avs_copier_cfg *cfg;
405
size_t cfg_size;
406
u32 dma_id;
407
int ret;
408
409
te = t->cfg_ext;
410
cfg = adev->modcfg_buf;
411
dma_id = mod->owner->owner->dma_id;
412
cfg_size = offsetof(struct avs_copier_cfg, gtw_cfg.config);
413
414
ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, dma_id, &cfg_size);
415
if (ret)
416
return ret;
417
418
cfg->base.cpc = t->cfg_base->cpc;
419
cfg->base.ibs = t->cfg_base->ibs;
420
cfg->base.obs = t->cfg_base->obs;
421
cfg->base.is_pages = t->cfg_base->is_pages;
422
cfg->base.audio_fmt = *t->in_fmt;
423
cfg->out_fmt = *te->copier.out_fmt;
424
cfg->feature_mask = te->copier.feature_mask;
425
avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
426
cfg->gtw_cfg.dma_buffer_size = te->copier.dma_buffer_size;
427
mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
428
429
ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
430
t->domain, cfg, cfg_size, &mod->instance_id);
431
return ret;
432
}
433
434
static int avs_whm_create(struct avs_dev *adev, struct avs_path_module *mod)
435
{
436
struct avs_tplg_module *t = mod->template;
437
struct avs_tplg_modcfg_ext *te;
438
struct avs_whm_cfg *cfg;
439
size_t cfg_size;
440
u32 dma_id;
441
int ret;
442
443
te = t->cfg_ext;
444
cfg = adev->modcfg_buf;
445
dma_id = mod->owner->owner->dma_id;
446
cfg_size = offsetof(struct avs_whm_cfg, gtw_cfg.config);
447
448
ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, dma_id, &cfg_size);
449
if (ret)
450
return ret;
451
452
cfg->base.cpc = t->cfg_base->cpc;
453
cfg->base.ibs = t->cfg_base->ibs;
454
cfg->base.obs = t->cfg_base->obs;
455
cfg->base.is_pages = t->cfg_base->is_pages;
456
cfg->base.audio_fmt = *t->in_fmt;
457
cfg->ref_fmt = *te->whm.ref_fmt;
458
cfg->out_fmt = *te->whm.out_fmt;
459
cfg->wake_tick_period = te->whm.wake_tick_period;
460
avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
461
cfg->gtw_cfg.dma_buffer_size = te->whm.dma_buffer_size;
462
mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
463
464
ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
465
t->domain, cfg, cfg_size, &mod->instance_id);
466
return ret;
467
}
468
469
static struct soc_mixer_control *avs_get_module_control(struct avs_path_module *mod,
470
const char *name)
471
{
472
struct avs_tplg_module *t = mod->template;
473
struct avs_tplg_path_template *path_tmpl;
474
struct snd_soc_dapm_widget *w;
475
int i;
476
477
path_tmpl = t->owner->owner->owner;
478
w = path_tmpl->w;
479
480
for (i = 0; i < w->num_kcontrols; i++) {
481
struct avs_control_data *ctl_data;
482
struct soc_mixer_control *mc;
483
484
mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
485
ctl_data = (struct avs_control_data *)mc->dobj.private;
486
if (ctl_data->id == t->ctl_id && strstr(w->kcontrols[i]->id.name, name))
487
return mc;
488
}
489
490
return NULL;
491
}
492
493
int avs_peakvol_set_volume(struct avs_dev *adev, struct avs_path_module *mod,
494
struct soc_mixer_control *mc, long *input)
495
{
496
struct avs_volume_cfg vols[SND_SOC_TPLG_MAX_CHAN] = {{0}};
497
struct avs_control_data *ctl_data;
498
struct avs_tplg_module *t;
499
int ret, i;
500
501
ctl_data = mc->dobj.private;
502
t = mod->template;
503
if (!input)
504
input = ctl_data->values;
505
506
if (mc->num_channels) {
507
for (i = 0; i < mc->num_channels; i++) {
508
vols[i].channel_id = i;
509
vols[i].target_volume = input[i];
510
vols[i].curve_type = t->cfg_ext->peakvol.curve_type;
511
vols[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
512
}
513
514
ret = avs_ipc_peakvol_set_volumes(adev, mod->module_id, mod->instance_id, vols,
515
mc->num_channels);
516
return AVS_IPC_RET(ret);
517
}
518
519
/* Target all channels if no individual selected. */
520
vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
521
vols[0].target_volume = input[0];
522
vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
523
vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
524
525
ret = avs_ipc_peakvol_set_volume(adev, mod->module_id, mod->instance_id, &vols[0]);
526
return AVS_IPC_RET(ret);
527
}
528
529
int avs_peakvol_set_mute(struct avs_dev *adev, struct avs_path_module *mod,
530
struct soc_mixer_control *mc, long *input)
531
{
532
struct avs_mute_cfg mutes[SND_SOC_TPLG_MAX_CHAN] = {{0}};
533
struct avs_control_data *ctl_data;
534
struct avs_tplg_module *t;
535
int ret, i;
536
537
ctl_data = mc->dobj.private;
538
t = mod->template;
539
if (!input)
540
input = ctl_data->values;
541
542
if (mc->num_channels) {
543
for (i = 0; i < mc->num_channels; i++) {
544
mutes[i].channel_id = i;
545
mutes[i].mute = !input[i];
546
mutes[i].curve_type = t->cfg_ext->peakvol.curve_type;
547
mutes[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
548
}
549
550
ret = avs_ipc_peakvol_set_mutes(adev, mod->module_id, mod->instance_id, mutes,
551
mc->num_channels);
552
return AVS_IPC_RET(ret);
553
}
554
555
/* Target all channels if no individual selected. */
556
mutes[0].channel_id = AVS_ALL_CHANNELS_MASK;
557
mutes[0].mute = !input[0];
558
mutes[0].curve_type = t->cfg_ext->peakvol.curve_type;
559
mutes[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
560
561
ret = avs_ipc_peakvol_set_mute(adev, mod->module_id, mod->instance_id, &mutes[0]);
562
return AVS_IPC_RET(ret);
563
}
564
565
static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
566
{
567
struct avs_tplg_module *t = mod->template;
568
struct soc_mixer_control *mc;
569
struct avs_peakvol_cfg *cfg;
570
size_t cfg_size;
571
int ret;
572
573
cfg_size = struct_size(cfg, vols, 1);
574
if (cfg_size > AVS_MAILBOX_SIZE)
575
return -EINVAL;
576
577
cfg = adev->modcfg_buf;
578
memset(cfg, 0, cfg_size);
579
cfg->base.cpc = t->cfg_base->cpc;
580
cfg->base.ibs = t->cfg_base->ibs;
581
cfg->base.obs = t->cfg_base->obs;
582
cfg->base.is_pages = t->cfg_base->is_pages;
583
cfg->base.audio_fmt = *t->in_fmt;
584
cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
585
cfg->vols[0].target_volume = S32_MAX;
586
cfg->vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
587
cfg->vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
588
589
ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
590
t->domain, cfg, cfg_size, &mod->instance_id);
591
if (ret)
592
return ret;
593
594
/* Now configure both VOLUME and MUTE parameters. */
595
mc = avs_get_module_control(mod, "Volume");
596
if (mc) {
597
ret = avs_peakvol_set_volume(adev, mod, mc, NULL);
598
if (ret)
599
return ret;
600
}
601
602
mc = avs_get_module_control(mod, "Switch");
603
if (mc)
604
return avs_peakvol_set_mute(adev, mod, mc, NULL);
605
return 0;
606
}
607
608
static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
609
{
610
struct avs_tplg_module *t = mod->template;
611
struct avs_updown_mixer_cfg cfg;
612
int i;
613
614
cfg.base.cpc = t->cfg_base->cpc;
615
cfg.base.ibs = t->cfg_base->ibs;
616
cfg.base.obs = t->cfg_base->obs;
617
cfg.base.is_pages = t->cfg_base->is_pages;
618
cfg.base.audio_fmt = *t->in_fmt;
619
cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
620
cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
621
for (i = 0; i < AVS_COEFF_CHANNELS_MAX; i++)
622
cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
623
cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
624
625
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
626
t->core_id, t->domain, &cfg, sizeof(cfg),
627
&mod->instance_id);
628
}
629
630
static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
631
{
632
struct avs_tplg_module *t = mod->template;
633
struct avs_src_cfg cfg;
634
635
cfg.base.cpc = t->cfg_base->cpc;
636
cfg.base.ibs = t->cfg_base->ibs;
637
cfg.base.obs = t->cfg_base->obs;
638
cfg.base.is_pages = t->cfg_base->is_pages;
639
cfg.base.audio_fmt = *t->in_fmt;
640
cfg.out_freq = t->cfg_ext->src.out_freq;
641
642
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
643
t->core_id, t->domain, &cfg, sizeof(cfg),
644
&mod->instance_id);
645
}
646
647
static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
648
{
649
struct avs_tplg_module *t = mod->template;
650
struct avs_asrc_cfg cfg;
651
652
memset(&cfg, 0, sizeof(cfg));
653
cfg.base.cpc = t->cfg_base->cpc;
654
cfg.base.ibs = t->cfg_base->ibs;
655
cfg.base.obs = t->cfg_base->obs;
656
cfg.base.is_pages = t->cfg_base->is_pages;
657
cfg.base.audio_fmt = *t->in_fmt;
658
cfg.out_freq = t->cfg_ext->asrc.out_freq;
659
cfg.mode = t->cfg_ext->asrc.mode;
660
cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
661
662
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
663
t->core_id, t->domain, &cfg, sizeof(cfg),
664
&mod->instance_id);
665
}
666
667
static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
668
{
669
struct avs_tplg_module *t = mod->template;
670
struct avs_aec_cfg cfg;
671
672
cfg.base.cpc = t->cfg_base->cpc;
673
cfg.base.ibs = t->cfg_base->ibs;
674
cfg.base.obs = t->cfg_base->obs;
675
cfg.base.is_pages = t->cfg_base->is_pages;
676
cfg.base.audio_fmt = *t->in_fmt;
677
cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
678
cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
679
cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
680
681
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
682
t->core_id, t->domain, &cfg, sizeof(cfg),
683
&mod->instance_id);
684
}
685
686
static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
687
{
688
struct avs_tplg_module *t = mod->template;
689
struct avs_mux_cfg cfg;
690
691
cfg.base.cpc = t->cfg_base->cpc;
692
cfg.base.ibs = t->cfg_base->ibs;
693
cfg.base.obs = t->cfg_base->obs;
694
cfg.base.is_pages = t->cfg_base->is_pages;
695
cfg.base.audio_fmt = *t->in_fmt;
696
cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
697
cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
698
699
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
700
t->core_id, t->domain, &cfg, sizeof(cfg),
701
&mod->instance_id);
702
}
703
704
static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
705
{
706
struct avs_tplg_module *t = mod->template;
707
struct avs_wov_cfg cfg;
708
709
cfg.base.cpc = t->cfg_base->cpc;
710
cfg.base.ibs = t->cfg_base->ibs;
711
cfg.base.obs = t->cfg_base->obs;
712
cfg.base.is_pages = t->cfg_base->is_pages;
713
cfg.base.audio_fmt = *t->in_fmt;
714
cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
715
716
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
717
t->core_id, t->domain, &cfg, sizeof(cfg),
718
&mod->instance_id);
719
}
720
721
static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
722
{
723
struct avs_tplg_module *t = mod->template;
724
struct avs_micsel_cfg cfg;
725
726
cfg.base.cpc = t->cfg_base->cpc;
727
cfg.base.ibs = t->cfg_base->ibs;
728
cfg.base.obs = t->cfg_base->obs;
729
cfg.base.is_pages = t->cfg_base->is_pages;
730
cfg.base.audio_fmt = *t->in_fmt;
731
cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
732
733
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
734
t->core_id, t->domain, &cfg, sizeof(cfg),
735
&mod->instance_id);
736
}
737
738
static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
739
{
740
struct avs_tplg_module *t = mod->template;
741
struct avs_modcfg_base cfg;
742
743
cfg.cpc = t->cfg_base->cpc;
744
cfg.ibs = t->cfg_base->ibs;
745
cfg.obs = t->cfg_base->obs;
746
cfg.is_pages = t->cfg_base->is_pages;
747
cfg.audio_fmt = *t->in_fmt;
748
749
return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
750
t->core_id, t->domain, &cfg, sizeof(cfg),
751
&mod->instance_id);
752
}
753
754
static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
755
{
756
struct avs_tplg_module *t = mod->template;
757
struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
758
struct avs_modcfg_ext *cfg;
759
size_t cfg_size, num_pins;
760
int ret, i;
761
762
num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
763
cfg_size = struct_size(cfg, pin_fmts, num_pins);
764
765
if (cfg_size > AVS_MAILBOX_SIZE)
766
return -EINVAL;
767
768
cfg = adev->modcfg_buf;
769
memset(cfg, 0, cfg_size);
770
cfg->base.cpc = t->cfg_base->cpc;
771
cfg->base.ibs = t->cfg_base->ibs;
772
cfg->base.obs = t->cfg_base->obs;
773
cfg->base.is_pages = t->cfg_base->is_pages;
774
cfg->base.audio_fmt = *t->in_fmt;
775
cfg->num_input_pins = tcfg->generic.num_input_pins;
776
cfg->num_output_pins = tcfg->generic.num_output_pins;
777
778
/* configure pin formats */
779
for (i = 0; i < num_pins; i++) {
780
struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
781
struct avs_pin_format *pin = &cfg->pin_fmts[i];
782
783
pin->pin_index = tpin->pin_index;
784
pin->iobs = tpin->iobs;
785
pin->audio_fmt = *tpin->fmt;
786
}
787
788
ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
789
t->core_id, t->domain, cfg, cfg_size,
790
&mod->instance_id);
791
return ret;
792
}
793
794
static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
795
{
796
dev_err(adev->dev, "Probe module can't be instantiated by topology");
797
return -EINVAL;
798
}
799
800
struct avs_module_create {
801
guid_t *guid;
802
int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
803
};
804
805
static struct avs_module_create avs_module_create[] = {
806
{ &AVS_MIXIN_MOD_UUID, avs_modbase_create },
807
{ &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
808
{ &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
809
{ &AVS_COPIER_MOD_UUID, avs_copier_create },
810
{ &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
811
{ &AVS_GAIN_MOD_UUID, avs_peakvol_create },
812
{ &AVS_MICSEL_MOD_UUID, avs_micsel_create },
813
{ &AVS_MUX_MOD_UUID, avs_mux_create },
814
{ &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
815
{ &AVS_SRCINTC_MOD_UUID, avs_src_create },
816
{ &AVS_AEC_MOD_UUID, avs_aec_create },
817
{ &AVS_ASRC_MOD_UUID, avs_asrc_create },
818
{ &AVS_INTELWOV_MOD_UUID, avs_wov_create },
819
{ &AVS_PROBE_MOD_UUID, avs_probe_create },
820
{ &AVS_WOVHOSTM_MOD_UUID, avs_whm_create },
821
};
822
823
static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
824
{
825
const guid_t *type = &mod->template->cfg_ext->type;
826
827
for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
828
if (guid_equal(type, avs_module_create[i].guid))
829
return avs_module_create[i].create(adev, mod);
830
831
return avs_modext_create(adev, mod);
832
}
833
834
static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod)
835
{
836
struct avs_soc_component *acomp;
837
838
acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp);
839
840
u32 num_ids = mod->template->num_config_ids;
841
u32 *ids = mod->template->config_ids;
842
843
for (int i = 0; i < num_ids; i++) {
844
struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]];
845
size_t len = config->length;
846
void *data = config->data;
847
u32 param = config->param;
848
int ret;
849
850
ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id,
851
param, data, len);
852
if (ret) {
853
dev_err(adev->dev, "send initial module config failed: %d\n", ret);
854
return AVS_IPC_RET(ret);
855
}
856
}
857
858
return 0;
859
}
860
861
static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
862
{
863
kfree(mod);
864
}
865
866
static struct avs_path_module *
867
avs_path_module_create(struct avs_dev *adev,
868
struct avs_path_pipeline *owner,
869
struct avs_tplg_module *template)
870
{
871
struct avs_path_module *mod;
872
int module_id, ret;
873
874
module_id = avs_get_module_id(adev, &template->cfg_ext->type);
875
if (module_id < 0)
876
return ERR_PTR(module_id);
877
878
mod = kzalloc(sizeof(*mod), GFP_KERNEL);
879
if (!mod)
880
return ERR_PTR(-ENOMEM);
881
882
mod->template = template;
883
mod->module_id = module_id;
884
mod->owner = owner;
885
INIT_LIST_HEAD(&mod->node);
886
887
ret = avs_path_module_type_create(adev, mod);
888
if (ret) {
889
dev_err(adev->dev, "module-type create failed: %d\n", ret);
890
kfree(mod);
891
return ERR_PTR(ret);
892
}
893
894
ret = avs_path_module_send_init_configs(adev, mod);
895
if (ret) {
896
kfree(mod);
897
return ERR_PTR(ret);
898
}
899
900
return mod;
901
}
902
903
static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
904
{
905
struct avs_path_module *this_mod, *target_mod;
906
struct avs_path_pipeline *target_ppl;
907
struct avs_path *target_path;
908
struct avs_tplg_binding *t;
909
910
t = binding->template;
911
this_mod = avs_path_find_module(binding->owner,
912
t->mod_id);
913
if (!this_mod) {
914
dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
915
return -EINVAL;
916
}
917
918
/* update with target_tplg_name too */
919
target_path = avs_path_find_path(adev, t->target_tplg_name,
920
t->target_path_tmpl_id);
921
if (!target_path) {
922
dev_err(adev->dev, "target path %s:%d not found\n",
923
t->target_tplg_name, t->target_path_tmpl_id);
924
return -EINVAL;
925
}
926
927
target_ppl = avs_path_find_pipeline(target_path,
928
t->target_ppl_id);
929
if (!target_ppl) {
930
dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
931
return -EINVAL;
932
}
933
934
target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
935
if (!target_mod) {
936
dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
937
return -EINVAL;
938
}
939
940
if (t->is_sink) {
941
binding->sink = this_mod;
942
binding->sink_pin = t->mod_pin;
943
binding->source = target_mod;
944
binding->source_pin = t->target_mod_pin;
945
} else {
946
binding->sink = target_mod;
947
binding->sink_pin = t->target_mod_pin;
948
binding->source = this_mod;
949
binding->source_pin = t->mod_pin;
950
}
951
952
return 0;
953
}
954
955
static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
956
{
957
kfree(binding);
958
}
959
960
static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
961
struct avs_path_pipeline *owner,
962
struct avs_tplg_binding *t)
963
{
964
struct avs_path_binding *binding;
965
966
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
967
if (!binding)
968
return ERR_PTR(-ENOMEM);
969
970
binding->template = t;
971
binding->owner = owner;
972
INIT_LIST_HEAD(&binding->node);
973
974
return binding;
975
}
976
977
static int avs_path_pipeline_arm(struct avs_dev *adev,
978
struct avs_path_pipeline *ppl)
979
{
980
struct avs_path_module *mod;
981
982
list_for_each_entry(mod, &ppl->mod_list, node) {
983
struct avs_path_module *source, *sink;
984
int ret;
985
986
/*
987
* Only one module (so it's implicitly last) or it is the last
988
* one, either way we don't have next module to bind it to.
989
*/
990
if (mod == list_last_entry(&ppl->mod_list,
991
struct avs_path_module, node))
992
break;
993
994
/* bind current module to next module on list */
995
source = mod;
996
sink = list_next_entry(mod, node);
997
998
ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
999
sink->module_id, sink->instance_id, 0, 0);
1000
if (ret)
1001
return AVS_IPC_RET(ret);
1002
}
1003
1004
return 0;
1005
}
1006
1007
static void avs_path_pipeline_free(struct avs_dev *adev,
1008
struct avs_path_pipeline *ppl)
1009
{
1010
struct avs_path_binding *binding, *bsave;
1011
struct avs_path_module *mod, *save;
1012
1013
list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
1014
list_del(&binding->node);
1015
avs_path_binding_free(adev, binding);
1016
}
1017
1018
avs_dsp_delete_pipeline(adev, ppl->instance_id);
1019
1020
/* Unload resources occupied by owned modules */
1021
list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
1022
avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
1023
mod->owner->instance_id,
1024
mod->template->core_id);
1025
avs_path_module_free(adev, mod);
1026
}
1027
1028
list_del(&ppl->node);
1029
kfree(ppl);
1030
}
1031
1032
static struct avs_path_pipeline *
1033
avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
1034
struct avs_tplg_pipeline *template)
1035
{
1036
struct avs_path_pipeline *ppl;
1037
struct avs_tplg_pplcfg *cfg = template->cfg;
1038
struct avs_tplg_module *tmod;
1039
int ret, i;
1040
1041
ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
1042
if (!ppl)
1043
return ERR_PTR(-ENOMEM);
1044
1045
ppl->template = template;
1046
ppl->owner = owner;
1047
INIT_LIST_HEAD(&ppl->binding_list);
1048
INIT_LIST_HEAD(&ppl->mod_list);
1049
INIT_LIST_HEAD(&ppl->node);
1050
1051
ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
1052
cfg->lp, cfg->attributes,
1053
&ppl->instance_id);
1054
if (ret) {
1055
dev_err(adev->dev, "error creating pipeline %d\n", ret);
1056
kfree(ppl);
1057
return ERR_PTR(ret);
1058
}
1059
1060
list_for_each_entry(tmod, &template->mod_list, node) {
1061
struct avs_path_module *mod;
1062
1063
mod = avs_path_module_create(adev, ppl, tmod);
1064
if (IS_ERR(mod)) {
1065
ret = PTR_ERR(mod);
1066
dev_err(adev->dev, "error creating module %d\n", ret);
1067
goto init_err;
1068
}
1069
1070
list_add_tail(&mod->node, &ppl->mod_list);
1071
}
1072
1073
for (i = 0; i < template->num_bindings; i++) {
1074
struct avs_path_binding *binding;
1075
1076
binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
1077
if (IS_ERR(binding)) {
1078
ret = PTR_ERR(binding);
1079
dev_err(adev->dev, "error creating binding %d\n", ret);
1080
goto init_err;
1081
}
1082
1083
list_add_tail(&binding->node, &ppl->binding_list);
1084
}
1085
1086
return ppl;
1087
1088
init_err:
1089
avs_path_pipeline_free(adev, ppl);
1090
return ERR_PTR(ret);
1091
}
1092
1093
static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
1094
struct avs_tplg_path *template, u32 dma_id)
1095
{
1096
struct avs_tplg_pipeline *tppl;
1097
1098
path->owner = adev;
1099
path->template = template;
1100
path->dma_id = dma_id;
1101
INIT_LIST_HEAD(&path->ppl_list);
1102
INIT_LIST_HEAD(&path->node);
1103
INIT_LIST_HEAD(&path->source_list);
1104
INIT_LIST_HEAD(&path->sink_list);
1105
INIT_LIST_HEAD(&path->source_node);
1106
INIT_LIST_HEAD(&path->sink_node);
1107
1108
/* create all the pipelines */
1109
list_for_each_entry(tppl, &template->ppl_list, node) {
1110
struct avs_path_pipeline *ppl;
1111
1112
ppl = avs_path_pipeline_create(adev, path, tppl);
1113
if (IS_ERR(ppl))
1114
return PTR_ERR(ppl);
1115
1116
list_add_tail(&ppl->node, &path->ppl_list);
1117
}
1118
1119
spin_lock(&adev->path_list_lock);
1120
list_add_tail(&path->node, &adev->path_list);
1121
spin_unlock(&adev->path_list_lock);
1122
1123
return 0;
1124
}
1125
1126
static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
1127
{
1128
struct avs_path_pipeline *ppl;
1129
struct avs_path_binding *binding;
1130
int ret;
1131
1132
list_for_each_entry(ppl, &path->ppl_list, node) {
1133
/*
1134
* Arm all ppl bindings before binding internal modules
1135
* as it costs no IPCs which isn't true for the latter.
1136
*/
1137
list_for_each_entry(binding, &ppl->binding_list, node) {
1138
ret = avs_path_binding_arm(adev, binding);
1139
if (ret < 0)
1140
return ret;
1141
}
1142
1143
ret = avs_path_pipeline_arm(adev, ppl);
1144
if (ret < 0)
1145
return ret;
1146
}
1147
1148
return 0;
1149
}
1150
1151
static void avs_path_free_unlocked(struct avs_path *path)
1152
{
1153
struct avs_path_pipeline *ppl, *save;
1154
1155
spin_lock(&path->owner->path_list_lock);
1156
list_del(&path->node);
1157
spin_unlock(&path->owner->path_list_lock);
1158
1159
list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
1160
avs_path_pipeline_free(path->owner, ppl);
1161
1162
kfree(path);
1163
}
1164
1165
static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
1166
struct avs_tplg_path *template)
1167
{
1168
struct avs_path *path;
1169
int ret;
1170
1171
path = kzalloc(sizeof(*path), GFP_KERNEL);
1172
if (!path)
1173
return ERR_PTR(-ENOMEM);
1174
1175
ret = avs_path_init(adev, path, template, dma_id);
1176
if (ret < 0)
1177
goto err;
1178
1179
ret = avs_path_arm(adev, path);
1180
if (ret < 0)
1181
goto err;
1182
1183
path->state = AVS_PPL_STATE_INVALID;
1184
return path;
1185
err:
1186
avs_path_free_unlocked(path);
1187
return ERR_PTR(ret);
1188
}
1189
1190
static void avs_condpath_free(struct avs_dev *adev, struct avs_path *path)
1191
{
1192
int ret;
1193
1194
list_del(&path->source_node);
1195
list_del(&path->sink_node);
1196
1197
ret = avs_path_reset(path);
1198
if (ret < 0)
1199
dev_err(adev->dev, "reset condpath failed: %d\n", ret);
1200
1201
ret = avs_path_unbind(path);
1202
if (ret < 0)
1203
dev_err(adev->dev, "unbind condpath failed: %d\n", ret);
1204
1205
avs_path_free_unlocked(path);
1206
}
1207
1208
static struct avs_path *avs_condpath_create(struct avs_dev *adev,
1209
struct avs_tplg_path *template,
1210
struct avs_path *source,
1211
struct avs_path *sink)
1212
{
1213
struct avs_path *path;
1214
int ret;
1215
1216
path = avs_path_create_unlocked(adev, 0, template);
1217
if (IS_ERR(path))
1218
return path;
1219
1220
ret = avs_path_bind(path);
1221
if (ret)
1222
goto err_bind;
1223
1224
ret = avs_path_reset(path);
1225
if (ret)
1226
goto err_reset;
1227
1228
path->source = source;
1229
path->sink = sink;
1230
list_add_tail(&path->source_node, &source->source_list);
1231
list_add_tail(&path->sink_node, &sink->sink_list);
1232
1233
return path;
1234
1235
err_reset:
1236
avs_path_unbind(path);
1237
err_bind:
1238
avs_path_free_unlocked(path);
1239
return ERR_PTR(ret);
1240
}
1241
1242
static int avs_condpaths_walk(struct avs_dev *adev, struct avs_path *path, int dir)
1243
{
1244
struct avs_soc_component *acomp;
1245
struct avs_path *source, *sink;
1246
struct avs_path **other;
1247
1248
if (dir) {
1249
source = path;
1250
other = &sink;
1251
} else {
1252
sink = path;
1253
other = &source;
1254
}
1255
1256
list_for_each_entry(acomp, &adev->comp_list, node) {
1257
for (int i = 0; i < acomp->tplg->num_condpath_tmpls; i++) {
1258
struct avs_tplg_path_template *template;
1259
struct avs_tplg_path *variant;
1260
struct avs_path *cpath;
1261
1262
template = &acomp->tplg->condpath_tmpls[i];
1263
1264
/* Do not create unidirectional condpaths twice. */
1265
if (avs_tplg_path_template_id_equal(&template->source,
1266
&template->sink) && dir)
1267
continue;
1268
1269
*other = avs_condpath_find_match(adev, template, path, dir);
1270
if (!*other)
1271
continue;
1272
1273
variant = avs_condpath_find_variant(adev, template, source, sink);
1274
if (!variant)
1275
continue;
1276
1277
cpath = avs_condpath_create(adev, variant, source, sink);
1278
if (IS_ERR(cpath))
1279
return PTR_ERR(cpath);
1280
}
1281
}
1282
1283
return 0;
1284
}
1285
1286
/* Caller responsible for holding adev->path_mutex. */
1287
static int avs_condpaths_walk_all(struct avs_dev *adev, struct avs_path *path)
1288
{
1289
int ret;
1290
1291
ret = avs_condpaths_walk(adev, path, SNDRV_PCM_STREAM_CAPTURE);
1292
if (ret)
1293
return ret;
1294
1295
return avs_condpaths_walk(adev, path, SNDRV_PCM_STREAM_PLAYBACK);
1296
}
1297
1298
void avs_path_free(struct avs_path *path)
1299
{
1300
struct avs_path *cpath, *csave;
1301
struct avs_dev *adev = path->owner;
1302
1303
mutex_lock(&adev->path_mutex);
1304
1305
/* Free all condpaths this path spawned. */
1306
list_for_each_entry_safe(cpath, csave, &path->source_list, source_node)
1307
avs_condpath_free(path->owner, cpath);
1308
list_for_each_entry_safe(cpath, csave, &path->sink_list, sink_node)
1309
avs_condpath_free(path->owner, cpath);
1310
1311
avs_path_free_unlocked(path);
1312
1313
mutex_unlock(&adev->path_mutex);
1314
}
1315
1316
struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
1317
struct avs_tplg_path_template *template,
1318
struct snd_pcm_hw_params *fe_params,
1319
struct snd_pcm_hw_params *be_params)
1320
{
1321
struct avs_tplg_path *variant;
1322
struct avs_path *path;
1323
int ret;
1324
1325
variant = avs_path_find_variant(adev, template, fe_params, be_params);
1326
if (!variant) {
1327
dev_err(adev->dev, "no matching variant found\n");
1328
return ERR_PTR(-ENOENT);
1329
}
1330
1331
/* Serialize path and its components creation. */
1332
mutex_lock(&adev->path_mutex);
1333
/* Satisfy needs of avs_path_find_tplg(). */
1334
mutex_lock(&adev->comp_list_mutex);
1335
1336
path = avs_path_create_unlocked(adev, dma_id, variant);
1337
if (IS_ERR(path))
1338
goto exit;
1339
1340
ret = avs_condpaths_walk_all(adev, path);
1341
if (ret) {
1342
avs_path_free_unlocked(path);
1343
path = ERR_PTR(ret);
1344
}
1345
1346
exit:
1347
mutex_unlock(&adev->comp_list_mutex);
1348
mutex_unlock(&adev->path_mutex);
1349
1350
return path;
1351
}
1352
1353
static int avs_path_bind_prepare(struct avs_dev *adev,
1354
struct avs_path_binding *binding)
1355
{
1356
const struct avs_audio_format *src_fmt, *sink_fmt;
1357
struct avs_tplg_module *tsource = binding->source->template;
1358
struct avs_path_module *source = binding->source;
1359
int ret;
1360
1361
/*
1362
* only copier modules about to be bound
1363
* to output pin other than 0 need preparation
1364
*/
1365
if (!binding->source_pin)
1366
return 0;
1367
if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
1368
return 0;
1369
1370
src_fmt = tsource->in_fmt;
1371
sink_fmt = binding->sink->template->in_fmt;
1372
1373
ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
1374
source->instance_id, binding->source_pin,
1375
src_fmt, sink_fmt);
1376
if (ret) {
1377
dev_err(adev->dev, "config copier failed: %d\n", ret);
1378
return AVS_IPC_RET(ret);
1379
}
1380
1381
return 0;
1382
}
1383
1384
int avs_path_bind(struct avs_path *path)
1385
{
1386
struct avs_path_pipeline *ppl;
1387
struct avs_dev *adev = path->owner;
1388
int ret;
1389
1390
list_for_each_entry(ppl, &path->ppl_list, node) {
1391
struct avs_path_binding *binding;
1392
1393
list_for_each_entry(binding, &ppl->binding_list, node) {
1394
struct avs_path_module *source, *sink;
1395
1396
source = binding->source;
1397
sink = binding->sink;
1398
1399
ret = avs_path_bind_prepare(adev, binding);
1400
if (ret < 0)
1401
return ret;
1402
1403
ret = avs_ipc_bind(adev, source->module_id,
1404
source->instance_id, sink->module_id,
1405
sink->instance_id, binding->sink_pin,
1406
binding->source_pin);
1407
if (ret) {
1408
dev_err(adev->dev, "bind path failed: %d\n", ret);
1409
return AVS_IPC_RET(ret);
1410
}
1411
}
1412
}
1413
1414
return 0;
1415
}
1416
1417
int avs_path_unbind(struct avs_path *path)
1418
{
1419
struct avs_path_pipeline *ppl;
1420
struct avs_dev *adev = path->owner;
1421
int ret;
1422
1423
list_for_each_entry(ppl, &path->ppl_list, node) {
1424
struct avs_path_binding *binding;
1425
1426
list_for_each_entry(binding, &ppl->binding_list, node) {
1427
struct avs_path_module *source, *sink;
1428
1429
source = binding->source;
1430
sink = binding->sink;
1431
1432
ret = avs_ipc_unbind(adev, source->module_id,
1433
source->instance_id, sink->module_id,
1434
sink->instance_id, binding->sink_pin,
1435
binding->source_pin);
1436
if (ret) {
1437
dev_err(adev->dev, "unbind path failed: %d\n", ret);
1438
return AVS_IPC_RET(ret);
1439
}
1440
}
1441
}
1442
1443
return 0;
1444
}
1445
1446
int avs_path_reset(struct avs_path *path)
1447
{
1448
struct avs_path_pipeline *ppl;
1449
struct avs_dev *adev = path->owner;
1450
int ret;
1451
1452
if (path->state == AVS_PPL_STATE_RESET)
1453
return 0;
1454
1455
list_for_each_entry(ppl, &path->ppl_list, node) {
1456
ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1457
AVS_PPL_STATE_RESET);
1458
if (ret) {
1459
dev_err(adev->dev, "reset path failed: %d\n", ret);
1460
path->state = AVS_PPL_STATE_INVALID;
1461
return AVS_IPC_RET(ret);
1462
}
1463
}
1464
1465
path->state = AVS_PPL_STATE_RESET;
1466
return 0;
1467
}
1468
1469
static int avs_condpath_pause(struct avs_dev *adev, struct avs_path *cpath)
1470
{
1471
struct avs_path_pipeline *ppl;
1472
int ret;
1473
1474
if (cpath->state == AVS_PPL_STATE_PAUSED)
1475
return 0;
1476
1477
list_for_each_entry_reverse(ppl, &cpath->ppl_list, node) {
1478
ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_PAUSED);
1479
if (ret) {
1480
dev_err(adev->dev, "pause cpath failed: %d\n", ret);
1481
cpath->state = AVS_PPL_STATE_INVALID;
1482
return AVS_IPC_RET(ret);
1483
}
1484
}
1485
1486
cpath->state = AVS_PPL_STATE_PAUSED;
1487
return 0;
1488
}
1489
1490
static void avs_condpaths_pause(struct avs_dev *adev, struct avs_path *path)
1491
{
1492
struct avs_path *cpath;
1493
1494
mutex_lock(&adev->path_mutex);
1495
1496
/* If either source or sink stops, so do the attached conditional paths. */
1497
list_for_each_entry(cpath, &path->source_list, source_node)
1498
avs_condpath_pause(adev, cpath);
1499
list_for_each_entry(cpath, &path->sink_list, sink_node)
1500
avs_condpath_pause(adev, cpath);
1501
1502
mutex_unlock(&adev->path_mutex);
1503
}
1504
1505
int avs_path_pause(struct avs_path *path)
1506
{
1507
struct avs_path_pipeline *ppl;
1508
struct avs_dev *adev = path->owner;
1509
int ret;
1510
1511
if (path->state == AVS_PPL_STATE_PAUSED)
1512
return 0;
1513
1514
avs_condpaths_pause(adev, path);
1515
1516
list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1517
ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1518
AVS_PPL_STATE_PAUSED);
1519
if (ret) {
1520
dev_err(adev->dev, "pause path failed: %d\n", ret);
1521
path->state = AVS_PPL_STATE_INVALID;
1522
return AVS_IPC_RET(ret);
1523
}
1524
}
1525
1526
path->state = AVS_PPL_STATE_PAUSED;
1527
return 0;
1528
}
1529
1530
static int avs_condpath_run(struct avs_dev *adev, struct avs_path *cpath, int trigger)
1531
{
1532
struct avs_path_pipeline *ppl;
1533
int ret;
1534
1535
if (cpath->state == AVS_PPL_STATE_RUNNING)
1536
return 0;
1537
1538
list_for_each_entry(ppl, &cpath->ppl_list, node) {
1539
if (ppl->template->cfg->trigger != trigger)
1540
continue;
1541
1542
ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_RUNNING);
1543
if (ret) {
1544
dev_err(adev->dev, "run cpath failed: %d\n", ret);
1545
cpath->state = AVS_PPL_STATE_INVALID;
1546
return AVS_IPC_RET(ret);
1547
}
1548
}
1549
1550
cpath->state = AVS_PPL_STATE_RUNNING;
1551
return 0;
1552
}
1553
1554
static void avs_condpaths_run(struct avs_dev *adev, struct avs_path *path, int trigger)
1555
{
1556
struct avs_path *cpath;
1557
1558
mutex_lock(&adev->path_mutex);
1559
1560
/* Run conditional paths only if source and sink are both running. */
1561
list_for_each_entry(cpath, &path->source_list, source_node)
1562
if (cpath->source->state == AVS_PPL_STATE_RUNNING &&
1563
cpath->sink->state == AVS_PPL_STATE_RUNNING)
1564
avs_condpath_run(adev, cpath, trigger);
1565
1566
list_for_each_entry(cpath, &path->sink_list, sink_node)
1567
if (cpath->source->state == AVS_PPL_STATE_RUNNING &&
1568
cpath->sink->state == AVS_PPL_STATE_RUNNING)
1569
avs_condpath_run(adev, cpath, trigger);
1570
1571
mutex_unlock(&adev->path_mutex);
1572
}
1573
1574
int avs_path_run(struct avs_path *path, int trigger)
1575
{
1576
struct avs_path_pipeline *ppl;
1577
struct avs_dev *adev = path->owner;
1578
int ret;
1579
1580
if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1581
return 0;
1582
1583
list_for_each_entry(ppl, &path->ppl_list, node) {
1584
if (ppl->template->cfg->trigger != trigger)
1585
continue;
1586
1587
ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1588
AVS_PPL_STATE_RUNNING);
1589
if (ret) {
1590
dev_err(adev->dev, "run path failed: %d\n", ret);
1591
path->state = AVS_PPL_STATE_INVALID;
1592
return AVS_IPC_RET(ret);
1593
}
1594
}
1595
1596
path->state = AVS_PPL_STATE_RUNNING;
1597
1598
/* Granular pipeline triggering not intended for conditional paths. */
1599
if (trigger == AVS_TPLG_TRIGGER_AUTO)
1600
avs_condpaths_run(adev, path, trigger);
1601
1602
return 0;
1603
}
1604
1605