Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/soc/intel/avs/core.c
29270 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
//
3
// Copyright(c) 2021-2022 Intel Corporation
4
//
5
// Authors: Cezary Rojewski <[email protected]>
6
// Amadeusz Slawinski <[email protected]>
7
//
8
// Special thanks to:
9
// Krzysztof Hejmowski <[email protected]>
10
// Michal Sienkiewicz <[email protected]>
11
// Filip Proborszcz
12
//
13
// for sharing Intel AudioDSP expertise and helping shape the very
14
// foundation of this driver
15
//
16
17
#include <linux/acpi.h>
18
#include <linux/module.h>
19
#include <linux/pci.h>
20
#include <acpi/nhlt.h>
21
#include <sound/hda_codec.h>
22
#include <sound/hda_i915.h>
23
#include <sound/hda_register.h>
24
#include <sound/hdaudio.h>
25
#include <sound/hdaudio_ext.h>
26
#include <sound/intel-dsp-config.h>
27
#include "../../codecs/hda.h"
28
#include "avs.h"
29
#include "cldma.h"
30
#include "debug.h"
31
#include "messages.h"
32
#include "pcm.h"
33
34
static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
35
module_param(pgctl_mask, uint, 0444);
36
MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
37
38
static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
39
module_param(cgctl_mask, uint, 0444);
40
MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
41
42
static void
43
avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
44
{
45
struct pci_dev *pci = to_pci_dev(bus->dev);
46
u32 data;
47
48
pci_read_config_dword(pci, reg, &data);
49
data &= ~mask;
50
data |= (value & mask);
51
pci_write_config_dword(pci, reg, data);
52
}
53
54
void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
55
{
56
u32 value = enable ? 0 : pgctl_mask;
57
58
if (!avs_platattr_test(adev, ACE))
59
avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
60
}
61
62
static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
63
{
64
struct avs_dev *adev = hdac_to_avs(bus);
65
u32 value = enable ? cgctl_mask : 0;
66
67
if (!avs_platattr_test(adev, ACE))
68
avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
69
}
70
71
void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
72
{
73
avs_hdac_clock_gating_enable(&adev->base.core, enable);
74
}
75
76
void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
77
{
78
if (avs_platattr_test(adev, ACE))
79
return;
80
if (enable) {
81
if (atomic_inc_and_test(&adev->l1sen_counter))
82
snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN,
83
AZX_VS_EM2_L1SEN);
84
} else {
85
if (atomic_dec_return(&adev->l1sen_counter) == -1)
86
snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0);
87
}
88
}
89
90
static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
91
{
92
unsigned int cp_streams, pb_streams;
93
unsigned int gcap;
94
95
gcap = snd_hdac_chip_readw(bus, GCAP);
96
cp_streams = (gcap >> 8) & 0x0F;
97
pb_streams = (gcap >> 12) & 0x0F;
98
bus->num_streams = cp_streams + pb_streams;
99
100
snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
101
snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
102
103
return snd_hdac_bus_alloc_stream_pages(bus);
104
}
105
106
static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
107
{
108
struct avs_dev *adev = hdac_to_avs(bus);
109
struct hdac_ext_link *hlink;
110
bool ret;
111
112
avs_hdac_clock_gating_enable(bus, false);
113
ret = snd_hdac_bus_init_chip(bus, full_reset);
114
115
/* Reset stream-to-link mapping */
116
list_for_each_entry(hlink, &bus->hlink_list, list)
117
writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
118
119
avs_hdac_clock_gating_enable(bus, true);
120
121
/* Set DUM bit to address incorrect position reporting for capture
122
* streams. In order to do so, CTRL needs to be out of reset state
123
*/
124
if (!avs_platattr_test(adev, ACE))
125
snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
126
127
return ret;
128
}
129
130
static int probe_codec(struct hdac_bus *bus, int addr)
131
{
132
struct hda_codec *codec;
133
unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
134
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
135
unsigned int res = -1;
136
int ret;
137
138
mutex_lock(&bus->cmd_mutex);
139
snd_hdac_bus_send_cmd(bus, cmd);
140
snd_hdac_bus_get_response(bus, addr, &res);
141
mutex_unlock(&bus->cmd_mutex);
142
if (res == -1)
143
return -EIO;
144
145
dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
146
147
codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
148
if (IS_ERR(codec)) {
149
dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
150
return PTR_ERR(codec);
151
}
152
/*
153
* Allow avs_core suspend by forcing suspended state on all
154
* of its codec child devices. Component interested in
155
* dealing with hda codecs directly takes pm responsibilities
156
*/
157
pm_runtime_set_suspended(hda_codec_dev(codec));
158
159
/* configure effectively creates new ASoC component */
160
ret = snd_hda_codec_configure(codec);
161
if (ret < 0) {
162
dev_warn(bus->dev, "failed to config codec #%d: %d\n", addr, ret);
163
return ret;
164
}
165
166
return 0;
167
}
168
169
static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
170
{
171
int ret, c;
172
173
/* First try to probe all given codec slots */
174
for (c = 0; c < HDA_MAX_CODECS; c++) {
175
if (!(bus->codec_mask & BIT(c)))
176
continue;
177
178
ret = probe_codec(bus, c);
179
/* Ignore codecs with no supporting driver. */
180
if (!ret || ret == -ENODEV)
181
continue;
182
183
/*
184
* Some BIOSen give you wrong codec addresses
185
* that don't exist
186
*/
187
dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
188
bus->codec_mask &= ~BIT(c);
189
/*
190
* More badly, accessing to a non-existing
191
* codec often screws up the controller bus,
192
* and disturbs the further communications.
193
* Thus if an error occurs during probing,
194
* better to reset the controller bus to get
195
* back to the sanity state.
196
*/
197
snd_hdac_bus_stop_chip(bus);
198
avs_hdac_bus_init_chip(bus, true);
199
}
200
}
201
202
static void avs_hda_probe_work(struct work_struct *work)
203
{
204
struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
205
struct hdac_bus *bus = &adev->base.core;
206
struct hdac_ext_link *hlink;
207
int ret;
208
209
pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
210
211
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
212
avs_hdac_bus_init_chip(bus, true);
213
avs_hdac_bus_probe_codecs(bus);
214
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
215
216
/* with all codecs probed, links can be powered down */
217
list_for_each_entry(hlink, &bus->hlink_list, list)
218
snd_hdac_ext_bus_link_put(bus, hlink);
219
220
snd_hdac_ext_bus_ppcap_enable(bus, true);
221
snd_hdac_ext_bus_ppcap_int_enable(bus, true);
222
avs_debugfs_init(adev);
223
224
ret = avs_dsp_first_boot_firmware(adev);
225
if (ret < 0)
226
return;
227
228
acpi_nhlt_get_gbl_table();
229
230
avs_register_all_boards(adev);
231
232
/* configure PM */
233
pm_runtime_set_autosuspend_delay(bus->dev, 2000);
234
pm_runtime_use_autosuspend(bus->dev);
235
pm_runtime_put_autosuspend(bus->dev);
236
pm_runtime_allow(bus->dev);
237
}
238
239
static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
240
{
241
u64 prev_pos, pos, num_bytes;
242
243
div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
244
pos = snd_hdac_stream_get_pos_posbuf(stream);
245
246
if (pos < prev_pos)
247
num_bytes = (buffer_size - prev_pos) + pos;
248
else
249
num_bytes = pos - prev_pos;
250
251
stream->curr_pos += num_bytes;
252
}
253
254
/* called from IRQ */
255
static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
256
{
257
if (stream->substream) {
258
avs_period_elapsed(stream->substream);
259
} else if (stream->cstream) {
260
u64 buffer_size = stream->cstream->runtime->buffer_size;
261
262
hdac_stream_update_pos(stream, buffer_size);
263
snd_compr_fragment_elapsed(stream->cstream);
264
}
265
}
266
267
static irqreturn_t avs_hda_interrupt(struct hdac_bus *bus)
268
{
269
irqreturn_t ret = IRQ_NONE;
270
u32 status;
271
272
status = snd_hdac_chip_readl(bus, INTSTS);
273
if (snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream))
274
ret = IRQ_HANDLED;
275
276
spin_lock_irq(&bus->reg_lock);
277
/* Clear RIRB interrupt. */
278
status = snd_hdac_chip_readb(bus, RIRBSTS);
279
if (status & RIRB_INT_MASK) {
280
if (status & RIRB_INT_RESPONSE)
281
snd_hdac_bus_update_rirb(bus);
282
snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
283
ret = IRQ_HANDLED;
284
}
285
286
spin_unlock_irq(&bus->reg_lock);
287
return ret;
288
}
289
290
static irqreturn_t avs_hda_irq_handler(int irq, void *dev_id)
291
{
292
struct hdac_bus *bus = dev_id;
293
u32 intsts;
294
295
intsts = snd_hdac_chip_readl(bus, INTSTS);
296
if (intsts == UINT_MAX || !(intsts & AZX_INT_GLOBAL_EN))
297
return IRQ_NONE;
298
299
/* Mask GIE, unmasked in irq_thread(). */
300
snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, 0);
301
302
return IRQ_WAKE_THREAD;
303
}
304
305
static irqreturn_t avs_hda_irq_thread(int irq, void *dev_id)
306
{
307
struct hdac_bus *bus = dev_id;
308
u32 status;
309
310
status = snd_hdac_chip_readl(bus, INTSTS);
311
if (status & ~AZX_INT_GLOBAL_EN)
312
avs_hda_interrupt(bus);
313
314
/* Unmask GIE, masked in irq_handler(). */
315
snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
316
317
return IRQ_HANDLED;
318
}
319
320
static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
321
{
322
struct avs_dev *adev = dev_id;
323
324
return avs_hda_irq_handler(irq, &adev->base.core);
325
}
326
327
static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
328
{
329
struct avs_dev *adev = dev_id;
330
struct hdac_bus *bus = &adev->base.core;
331
u32 status;
332
333
status = readl(bus->ppcap + AZX_REG_PP_PPSTS);
334
if (status & AZX_PPCTL_PIE)
335
avs_dsp_op(adev, dsp_interrupt);
336
337
/* Unmask GIE, masked in irq_handler(). */
338
snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_GLOBAL_EN, AZX_INT_GLOBAL_EN);
339
340
return IRQ_HANDLED;
341
}
342
343
static int avs_hdac_acquire_irq(struct avs_dev *adev)
344
{
345
struct hdac_bus *bus = &adev->base.core;
346
struct pci_dev *pci = to_pci_dev(bus->dev);
347
int ret;
348
349
/* request one and check that we only got one interrupt */
350
ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX);
351
if (ret != 1) {
352
dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
353
return ret;
354
}
355
356
ret = pci_request_irq(pci, 0, avs_hda_irq_handler, avs_hda_irq_thread, bus,
357
KBUILD_MODNAME);
358
if (ret < 0) {
359
dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
360
goto free_vector;
361
}
362
363
ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
364
KBUILD_MODNAME);
365
if (ret < 0) {
366
dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
367
goto free_stream_irq;
368
}
369
370
return 0;
371
372
free_stream_irq:
373
pci_free_irq(pci, 0, bus);
374
free_vector:
375
pci_free_irq_vectors(pci);
376
return ret;
377
}
378
379
static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
380
{
381
struct hda_bus *bus = &adev->base;
382
struct avs_ipc *ipc;
383
struct device *dev = &pci->dev;
384
int ret;
385
386
ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
387
if (ret < 0)
388
return ret;
389
390
bus->core.use_posbuf = 1;
391
bus->core.bdl_pos_adj = 0;
392
bus->core.sync_write = 1;
393
bus->pci = pci;
394
bus->mixer_assigned = -1;
395
mutex_init(&bus->prepare_mutex);
396
397
ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
398
if (!ipc)
399
return -ENOMEM;
400
ret = avs_ipc_init(ipc, dev);
401
if (ret < 0)
402
return ret;
403
404
adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
405
if (!adev->modcfg_buf)
406
return -ENOMEM;
407
408
adev->dev = dev;
409
adev->spec = (const struct avs_spec *)id->driver_data;
410
adev->ipc = ipc;
411
adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
412
INIT_WORK(&adev->probe_work, avs_hda_probe_work);
413
INIT_LIST_HEAD(&adev->comp_list);
414
INIT_LIST_HEAD(&adev->path_list);
415
INIT_LIST_HEAD(&adev->fw_list);
416
init_completion(&adev->fw_ready);
417
spin_lock_init(&adev->path_list_lock);
418
mutex_init(&adev->modres_mutex);
419
mutex_init(&adev->comp_list_mutex);
420
mutex_init(&adev->path_mutex);
421
422
return 0;
423
}
424
425
static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
426
{
427
struct hdac_bus *bus;
428
struct avs_dev *adev;
429
struct device *dev = &pci->dev;
430
int ret;
431
432
ret = snd_intel_dsp_driver_probe(pci);
433
switch (ret) {
434
case SND_INTEL_DSP_DRIVER_ANY:
435
case SND_INTEL_DSP_DRIVER_SST:
436
case SND_INTEL_DSP_DRIVER_AVS:
437
break;
438
default:
439
return -ENODEV;
440
}
441
442
ret = pcim_enable_device(pci);
443
if (ret < 0)
444
return ret;
445
446
adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
447
if (!adev)
448
return -ENOMEM;
449
bus = &adev->base.core;
450
451
ret = avs_bus_init(adev, pci, id);
452
if (ret < 0) {
453
dev_err(dev, "failed to init avs bus: %d\n", ret);
454
return ret;
455
}
456
457
ret = pcim_request_all_regions(pci, "AVS HDAudio");
458
if (ret < 0)
459
return ret;
460
461
bus->addr = pci_resource_start(pci, 0);
462
bus->remap_addr = pci_ioremap_bar(pci, 0);
463
if (!bus->remap_addr) {
464
dev_err(bus->dev, "ioremap error\n");
465
return -ENXIO;
466
}
467
468
adev->dsp_ba = pci_ioremap_bar(pci, 4);
469
if (!adev->dsp_ba) {
470
dev_err(bus->dev, "ioremap error\n");
471
ret = -ENXIO;
472
goto err_remap_bar4;
473
}
474
475
snd_hdac_bus_parse_capabilities(bus);
476
if (bus->mlcap)
477
snd_hdac_ext_bus_get_ml_capabilities(bus);
478
479
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
480
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
481
dma_set_max_seg_size(dev, UINT_MAX);
482
483
ret = avs_hdac_bus_init_streams(bus);
484
if (ret < 0) {
485
dev_err(dev, "failed to init streams: %d\n", ret);
486
goto err_init_streams;
487
}
488
489
ret = avs_hdac_acquire_irq(adev);
490
if (ret < 0) {
491
dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
492
goto err_acquire_irq;
493
}
494
495
pci_set_master(pci);
496
pci_set_drvdata(pci, bus);
497
device_disable_async_suspend(dev);
498
499
ret = snd_hdac_i915_init(bus);
500
if (ret == -EPROBE_DEFER)
501
goto err_i915_init;
502
else if (ret < 0)
503
dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
504
505
schedule_work(&adev->probe_work);
506
507
return 0;
508
509
err_i915_init:
510
pci_free_irq(pci, 0, adev);
511
pci_free_irq(pci, 0, bus);
512
pci_free_irq_vectors(pci);
513
pci_clear_master(pci);
514
pci_set_drvdata(pci, NULL);
515
err_acquire_irq:
516
snd_hdac_bus_free_stream_pages(bus);
517
snd_hdac_ext_stream_free_all(bus);
518
err_init_streams:
519
iounmap(adev->dsp_ba);
520
err_remap_bar4:
521
iounmap(bus->remap_addr);
522
return ret;
523
}
524
525
static void avs_pci_shutdown(struct pci_dev *pci)
526
{
527
struct hdac_bus *bus = pci_get_drvdata(pci);
528
struct avs_dev *adev = hdac_to_avs(bus);
529
530
cancel_work_sync(&adev->probe_work);
531
avs_ipc_block(adev->ipc);
532
533
snd_hdac_stop_streams(bus);
534
avs_dsp_op(adev, int_control, false);
535
snd_hdac_ext_bus_ppcap_int_enable(bus, false);
536
snd_hdac_ext_bus_link_power_down_all(bus);
537
538
snd_hdac_bus_stop_chip(bus);
539
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
540
541
pci_free_irq(pci, 0, adev);
542
pci_free_irq(pci, 0, bus);
543
pci_free_irq_vectors(pci);
544
}
545
546
static void avs_pci_remove(struct pci_dev *pci)
547
{
548
struct hdac_device *hdev, *save;
549
struct hdac_bus *bus = pci_get_drvdata(pci);
550
struct avs_dev *adev = hdac_to_avs(bus);
551
552
cancel_work_sync(&adev->probe_work);
553
avs_ipc_block(adev->ipc);
554
555
avs_unregister_all_boards(adev);
556
557
acpi_nhlt_put_gbl_table();
558
avs_debugfs_exit(adev);
559
560
if (avs_platattr_test(adev, CLDMA))
561
hda_cldma_free(&code_loader);
562
563
snd_hdac_stop_streams_and_chip(bus);
564
avs_dsp_op(adev, int_control, false);
565
snd_hdac_ext_bus_ppcap_int_enable(bus, false);
566
567
/* it is safe to remove all codecs from the system now */
568
list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
569
snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
570
571
snd_hdac_bus_free_stream_pages(bus);
572
snd_hdac_ext_stream_free_all(bus);
573
/* reverse ml_capabilities */
574
snd_hdac_ext_link_free_all(bus);
575
snd_hdac_ext_bus_exit(bus);
576
577
avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
578
snd_hdac_ext_bus_ppcap_enable(bus, false);
579
580
/* snd_hdac_stop_streams_and_chip does that already? */
581
snd_hdac_bus_stop_chip(bus);
582
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
583
if (bus->audio_component)
584
snd_hdac_i915_exit(bus);
585
586
avs_module_info_free(adev);
587
pci_free_irq(pci, 0, adev);
588
pci_free_irq(pci, 0, bus);
589
pci_free_irq_vectors(pci);
590
iounmap(bus->remap_addr);
591
iounmap(adev->dsp_ba);
592
593
/* Firmware is not needed anymore */
594
avs_release_firmwares(adev);
595
596
/* pm_runtime_forbid() can rpm_resume() which we do not want */
597
pm_runtime_disable(&pci->dev);
598
pm_runtime_forbid(&pci->dev);
599
pm_runtime_enable(&pci->dev);
600
pm_runtime_get_noresume(&pci->dev);
601
}
602
603
static int avs_suspend_standby(struct avs_dev *adev)
604
{
605
struct hdac_bus *bus = &adev->base.core;
606
struct pci_dev *pci = adev->base.pci;
607
608
if (bus->cmd_dma_state)
609
snd_hdac_bus_stop_cmd_io(bus);
610
611
snd_hdac_ext_bus_link_power_down_all(bus);
612
613
enable_irq_wake(pci->irq);
614
pci_save_state(pci);
615
616
return 0;
617
}
618
619
static int avs_suspend_common(struct avs_dev *adev, bool low_power)
620
{
621
struct hdac_bus *bus = &adev->base.core;
622
int ret;
623
624
flush_work(&adev->probe_work);
625
if (low_power && adev->num_lp_paths)
626
return avs_suspend_standby(adev);
627
628
snd_hdac_ext_bus_link_power_down_all(bus);
629
630
ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
631
/*
632
* pm_runtime is blocked on DSP failure but system-wide suspend is not.
633
* Do not block entire system from suspending if that's the case.
634
*/
635
if (ret && ret != -EPERM) {
636
dev_err(adev->dev, "set dx failed: %d\n", ret);
637
return AVS_IPC_RET(ret);
638
}
639
640
avs_ipc_block(adev->ipc);
641
avs_dsp_op(adev, int_control, false);
642
snd_hdac_ext_bus_ppcap_int_enable(bus, false);
643
644
ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
645
if (ret < 0) {
646
dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
647
return ret;
648
}
649
650
snd_hdac_ext_bus_ppcap_enable(bus, false);
651
/* disable LP SRAM retention */
652
avs_hda_power_gating_enable(adev, false);
653
snd_hdac_bus_stop_chip(bus);
654
/* disable CG when putting controller to reset */
655
avs_hdac_clock_gating_enable(bus, false);
656
snd_hdac_bus_enter_link_reset(bus);
657
avs_hdac_clock_gating_enable(bus, true);
658
659
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
660
661
return 0;
662
}
663
664
static int avs_resume_standby(struct avs_dev *adev)
665
{
666
struct hdac_bus *bus = &adev->base.core;
667
struct pci_dev *pci = adev->base.pci;
668
669
pci_restore_state(pci);
670
disable_irq_wake(pci->irq);
671
672
snd_hdac_ext_bus_link_power_up_all(bus);
673
674
if (bus->cmd_dma_state)
675
snd_hdac_bus_init_cmd_io(bus);
676
677
return 0;
678
}
679
680
static int avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
681
{
682
struct hdac_bus *bus = &adev->base.core;
683
int ret;
684
685
if (low_power && adev->num_lp_paths)
686
return avs_resume_standby(adev);
687
688
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
689
avs_hdac_bus_init_chip(bus, true);
690
691
snd_hdac_ext_bus_ppcap_enable(bus, true);
692
snd_hdac_ext_bus_ppcap_int_enable(bus, true);
693
694
ret = avs_dsp_boot_firmware(adev, purge);
695
if (ret < 0) {
696
dev_err(adev->dev, "firmware boot failed: %d\n", ret);
697
return ret;
698
}
699
700
return 0;
701
}
702
703
static int avs_suspend(struct device *dev)
704
{
705
return avs_suspend_common(to_avs_dev(dev), true);
706
}
707
708
static int avs_resume(struct device *dev)
709
{
710
return avs_resume_common(to_avs_dev(dev), true, true);
711
}
712
713
static int avs_runtime_suspend(struct device *dev)
714
{
715
return avs_suspend_common(to_avs_dev(dev), true);
716
}
717
718
static int avs_runtime_resume(struct device *dev)
719
{
720
return avs_resume_common(to_avs_dev(dev), true, false);
721
}
722
723
static int avs_freeze(struct device *dev)
724
{
725
return avs_suspend_common(to_avs_dev(dev), false);
726
}
727
static int avs_thaw(struct device *dev)
728
{
729
return avs_resume_common(to_avs_dev(dev), false, true);
730
}
731
732
static int avs_poweroff(struct device *dev)
733
{
734
return avs_suspend_common(to_avs_dev(dev), false);
735
}
736
737
static int avs_restore(struct device *dev)
738
{
739
return avs_resume_common(to_avs_dev(dev), false, true);
740
}
741
742
static const struct dev_pm_ops avs_dev_pm = {
743
.suspend = avs_suspend,
744
.resume = avs_resume,
745
.freeze = avs_freeze,
746
.thaw = avs_thaw,
747
.poweroff = avs_poweroff,
748
.restore = avs_restore,
749
RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
750
};
751
752
static const struct avs_sram_spec skl_sram_spec = {
753
.base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
754
.window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
755
};
756
757
static const struct avs_sram_spec apl_sram_spec = {
758
.base_offset = APL_ADSP_SRAM_BASE_OFFSET,
759
.window_size = APL_ADSP_SRAM_WINDOW_SIZE,
760
};
761
762
static const struct avs_sram_spec mtl_sram_spec = {
763
.base_offset = MTL_ADSP_SRAM_BASE_OFFSET,
764
.window_size = MTL_ADSP_SRAM_WINDOW_SIZE,
765
};
766
767
static const struct avs_hipc_spec skl_hipc_spec = {
768
.req_offset = SKL_ADSP_REG_HIPCI,
769
.req_ext_offset = SKL_ADSP_REG_HIPCIE,
770
.req_busy_mask = SKL_ADSP_HIPCI_BUSY,
771
.ack_offset = SKL_ADSP_REG_HIPCIE,
772
.ack_done_mask = SKL_ADSP_HIPCIE_DONE,
773
.rsp_offset = SKL_ADSP_REG_HIPCT,
774
.rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
775
.ctl_offset = SKL_ADSP_REG_HIPCCTL,
776
.sts_offset = SKL_ADSP_SRAM_BASE_OFFSET,
777
};
778
779
static const struct avs_hipc_spec apl_hipc_spec = {
780
.req_offset = SKL_ADSP_REG_HIPCI,
781
.req_ext_offset = SKL_ADSP_REG_HIPCIE,
782
.req_busy_mask = SKL_ADSP_HIPCI_BUSY,
783
.ack_offset = SKL_ADSP_REG_HIPCIE,
784
.ack_done_mask = SKL_ADSP_HIPCIE_DONE,
785
.rsp_offset = SKL_ADSP_REG_HIPCT,
786
.rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
787
.ctl_offset = SKL_ADSP_REG_HIPCCTL,
788
.sts_offset = APL_ADSP_SRAM_BASE_OFFSET,
789
};
790
791
static const struct avs_hipc_spec cnl_hipc_spec = {
792
.req_offset = CNL_ADSP_REG_HIPCIDR,
793
.req_ext_offset = CNL_ADSP_REG_HIPCIDD,
794
.req_busy_mask = CNL_ADSP_HIPCIDR_BUSY,
795
.ack_offset = CNL_ADSP_REG_HIPCIDA,
796
.ack_done_mask = CNL_ADSP_HIPCIDA_DONE,
797
.rsp_offset = CNL_ADSP_REG_HIPCTDR,
798
.rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY,
799
.ctl_offset = CNL_ADSP_REG_HIPCCTL,
800
.sts_offset = APL_ADSP_SRAM_BASE_OFFSET,
801
};
802
803
static const struct avs_hipc_spec lnl_hipc_spec = {
804
.req_offset = MTL_REG_HfIPCxIDR,
805
.req_ext_offset = MTL_REG_HfIPCxIDD,
806
.req_busy_mask = MTL_HfIPCxIDR_BUSY,
807
.ack_offset = MTL_REG_HfIPCxIDA,
808
.ack_done_mask = MTL_HfIPCxIDA_DONE,
809
.rsp_offset = MTL_REG_HfIPCxTDR,
810
.rsp_busy_mask = MTL_HfIPCxTDR_BUSY,
811
.ctl_offset = MTL_REG_HfIPCxCTL,
812
.sts_offset = LNL_REG_HfDFR(0),
813
};
814
815
static const struct avs_spec skl_desc = {
816
.name = "skl",
817
.min_fw_version = { 9, 21, 0, 4732 },
818
.dsp_ops = &avs_skl_dsp_ops,
819
.core_init_mask = 1,
820
.attributes = AVS_PLATATTR_CLDMA,
821
.sram = &skl_sram_spec,
822
.hipc = &skl_hipc_spec,
823
};
824
825
static const struct avs_spec apl_desc = {
826
.name = "apl",
827
.min_fw_version = { 9, 22, 1, 4323 },
828
.dsp_ops = &avs_apl_dsp_ops,
829
.core_init_mask = 3,
830
.attributes = AVS_PLATATTR_IMR,
831
.sram = &apl_sram_spec,
832
.hipc = &apl_hipc_spec,
833
};
834
835
static const struct avs_spec cnl_desc = {
836
.name = "cnl",
837
.min_fw_version = { 10, 23, 0, 5314 },
838
.dsp_ops = &avs_cnl_dsp_ops,
839
.core_init_mask = 1,
840
.attributes = AVS_PLATATTR_IMR,
841
.sram = &apl_sram_spec,
842
.hipc = &cnl_hipc_spec,
843
};
844
845
static const struct avs_spec icl_desc = {
846
.name = "icl",
847
.min_fw_version = { 10, 23, 0, 5040 },
848
.dsp_ops = &avs_icl_dsp_ops,
849
.core_init_mask = 1,
850
.attributes = AVS_PLATATTR_IMR,
851
.sram = &apl_sram_spec,
852
.hipc = &cnl_hipc_spec,
853
};
854
855
static const struct avs_spec jsl_desc = {
856
.name = "jsl",
857
.min_fw_version = { 10, 26, 0, 5872 },
858
.dsp_ops = &avs_icl_dsp_ops,
859
.core_init_mask = 1,
860
.attributes = AVS_PLATATTR_IMR,
861
.sram = &apl_sram_spec,
862
.hipc = &cnl_hipc_spec,
863
};
864
865
#define AVS_TGL_BASED_SPEC(sname, min) \
866
static const struct avs_spec sname##_desc = { \
867
.name = #sname, \
868
.min_fw_version = { 10, min, 0, 5646 }, \
869
.dsp_ops = &avs_tgl_dsp_ops, \
870
.core_init_mask = 1, \
871
.attributes = AVS_PLATATTR_IMR, \
872
.sram = &apl_sram_spec, \
873
.hipc = &cnl_hipc_spec, \
874
}
875
876
AVS_TGL_BASED_SPEC(lkf, 28);
877
AVS_TGL_BASED_SPEC(tgl, 29);
878
AVS_TGL_BASED_SPEC(ehl, 30);
879
AVS_TGL_BASED_SPEC(adl, 35);
880
AVS_TGL_BASED_SPEC(adl_n, 35);
881
882
static const struct avs_spec fcl_desc = {
883
.name = "fcl",
884
.min_fw_version = { 0 },
885
.dsp_ops = &avs_ptl_dsp_ops,
886
.core_init_mask = 1,
887
.attributes = AVS_PLATATTR_IMR | AVS_PLATATTR_ACE | AVS_PLATATTR_ALTHDA,
888
.sram = &mtl_sram_spec,
889
.hipc = &lnl_hipc_spec,
890
};
891
892
static const struct pci_device_id avs_ids[] = {
893
{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
894
{ PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
895
{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
896
{ PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
897
{ PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
898
{ PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
899
{ PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
900
{ PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
901
{ PCI_DEVICE_DATA(INTEL, HDA_CNL_LP, &cnl_desc) },
902
{ PCI_DEVICE_DATA(INTEL, HDA_CNL_H, &cnl_desc) },
903
{ PCI_DEVICE_DATA(INTEL, HDA_CML_LP, &cnl_desc) },
904
{ PCI_DEVICE_DATA(INTEL, HDA_CML_H, &cnl_desc) },
905
{ PCI_DEVICE_DATA(INTEL, HDA_RKL_S, &cnl_desc) },
906
{ PCI_DEVICE_DATA(INTEL, HDA_ICL_LP, &icl_desc) },
907
{ PCI_DEVICE_DATA(INTEL, HDA_ICL_N, &icl_desc) },
908
{ PCI_DEVICE_DATA(INTEL, HDA_ICL_H, &icl_desc) },
909
{ PCI_DEVICE_DATA(INTEL, HDA_JSL_N, &jsl_desc) },
910
{ PCI_DEVICE_DATA(INTEL, HDA_LKF, &lkf_desc) },
911
{ PCI_DEVICE_DATA(INTEL, HDA_TGL_LP, &tgl_desc) },
912
{ PCI_DEVICE_DATA(INTEL, HDA_TGL_H, &tgl_desc) },
913
{ PCI_DEVICE_DATA(INTEL, HDA_CML_R, &tgl_desc) },
914
{ PCI_DEVICE_DATA(INTEL, HDA_EHL_0, &ehl_desc) },
915
{ PCI_DEVICE_DATA(INTEL, HDA_EHL_3, &ehl_desc) },
916
{ PCI_DEVICE_DATA(INTEL, HDA_ADL_S, &adl_desc) },
917
{ PCI_DEVICE_DATA(INTEL, HDA_ADL_P, &adl_desc) },
918
{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PS, &adl_desc) },
919
{ PCI_DEVICE_DATA(INTEL, HDA_ADL_M, &adl_desc) },
920
{ PCI_DEVICE_DATA(INTEL, HDA_ADL_PX, &adl_desc) },
921
{ PCI_DEVICE_DATA(INTEL, HDA_ADL_N, &adl_n_desc) },
922
{ PCI_DEVICE_DATA(INTEL, HDA_RPL_S, &adl_desc) },
923
{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0, &adl_desc) },
924
{ PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1, &adl_desc) },
925
{ PCI_DEVICE_DATA(INTEL, HDA_RPL_M, &adl_desc) },
926
{ PCI_DEVICE_DATA(INTEL, HDA_RPL_PX, &adl_desc) },
927
{ PCI_DEVICE_DATA(INTEL, HDA_FCL, &fcl_desc) },
928
{ 0 }
929
};
930
MODULE_DEVICE_TABLE(pci, avs_ids);
931
932
static struct pci_driver avs_pci_driver = {
933
.name = KBUILD_MODNAME,
934
.id_table = avs_ids,
935
.probe = avs_pci_probe,
936
.remove = avs_pci_remove,
937
.shutdown = avs_pci_shutdown,
938
.dev_groups = avs_attr_groups,
939
.driver = {
940
.pm = pm_ptr(&avs_dev_pm),
941
},
942
};
943
module_pci_driver(avs_pci_driver);
944
945
MODULE_AUTHOR("Cezary Rojewski <[email protected]>");
946
MODULE_AUTHOR("Amadeusz Slawinski <[email protected]>");
947
MODULE_DESCRIPTION("Intel cAVS sound driver");
948
MODULE_LICENSE("GPL");
949
MODULE_FIRMWARE("intel/avs/skl/dsp_basefw.bin");
950
MODULE_FIRMWARE("intel/avs/apl/dsp_basefw.bin");
951
MODULE_FIRMWARE("intel/avs/cnl/dsp_basefw.bin");
952
MODULE_FIRMWARE("intel/avs/icl/dsp_basefw.bin");
953
MODULE_FIRMWARE("intel/avs/jsl/dsp_basefw.bin");
954
MODULE_FIRMWARE("intel/avs/lkf/dsp_basefw.bin");
955
MODULE_FIRMWARE("intel/avs/tgl/dsp_basefw.bin");
956
MODULE_FIRMWARE("intel/avs/ehl/dsp_basefw.bin");
957
MODULE_FIRMWARE("intel/avs/adl/dsp_basefw.bin");
958
MODULE_FIRMWARE("intel/avs/adl_n/dsp_basefw.bin");
959
MODULE_FIRMWARE("intel/fcl/dsp_basefw.bin");
960
961