Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/soc/intel/avs/ipc.c
29268 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
//
3
// Copyright(c) 2021-2022 Intel Corporation
4
//
5
// Authors: Cezary Rojewski <[email protected]>
6
// Amadeusz Slawinski <[email protected]>
7
//
8
9
#include <linux/io-64-nonatomic-lo-hi.h>
10
#include <linux/slab.h>
11
#include <sound/hdaudio_ext.h>
12
#include "avs.h"
13
#include "debug.h"
14
#include "messages.h"
15
#include "registers.h"
16
#include "trace.h"
17
18
#define AVS_IPC_TIMEOUT_MS 300
19
#define AVS_D0IX_DELAY_MS 300
20
21
static int
22
avs_dsp_set_d0ix(struct avs_dev *adev, bool enable)
23
{
24
struct avs_ipc *ipc = adev->ipc;
25
int ret;
26
27
/* Is transition required? */
28
if (ipc->in_d0ix == enable)
29
return 0;
30
31
ret = avs_dsp_op(adev, set_d0ix, enable);
32
if (ret) {
33
/* Prevent further d0ix attempts on conscious IPC failure. */
34
if (ret == -AVS_EIPC)
35
atomic_inc(&ipc->d0ix_disable_depth);
36
37
ipc->in_d0ix = false;
38
return ret;
39
}
40
41
ipc->in_d0ix = enable;
42
return 0;
43
}
44
45
static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx)
46
{
47
if (atomic_read(&adev->ipc->d0ix_disable_depth))
48
return;
49
50
mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work,
51
msecs_to_jiffies(AVS_D0IX_DELAY_MS));
52
}
53
54
static void avs_dsp_d0ix_work(struct work_struct *work)
55
{
56
struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work);
57
58
avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true);
59
}
60
61
static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx)
62
{
63
struct avs_ipc *ipc = adev->ipc;
64
65
if (!atomic_read(&ipc->d0ix_disable_depth)) {
66
cancel_delayed_work_sync(&ipc->d0ix_work);
67
return avs_dsp_set_d0ix(adev, false);
68
}
69
70
return 0;
71
}
72
73
int avs_dsp_disable_d0ix(struct avs_dev *adev)
74
{
75
struct avs_ipc *ipc = adev->ipc;
76
77
/* Prevent PG only on the first disable. */
78
if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) {
79
cancel_delayed_work_sync(&ipc->d0ix_work);
80
return avs_dsp_set_d0ix(adev, false);
81
}
82
83
return 0;
84
}
85
86
int avs_dsp_enable_d0ix(struct avs_dev *adev)
87
{
88
struct avs_ipc *ipc = adev->ipc;
89
90
if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
91
queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
92
msecs_to_jiffies(AVS_D0IX_DELAY_MS));
93
return 0;
94
}
95
96
static void avs_dsp_recovery(struct avs_dev *adev)
97
{
98
struct avs_soc_component *acomp;
99
unsigned int core_mask;
100
int ret;
101
102
mutex_lock(&adev->comp_list_mutex);
103
/* disconnect all running streams */
104
list_for_each_entry(acomp, &adev->comp_list, node) {
105
struct snd_soc_pcm_runtime *rtd;
106
struct snd_soc_card *card;
107
108
card = acomp->base.card;
109
if (!card)
110
continue;
111
112
for_each_card_rtds(card, rtd) {
113
struct snd_pcm *pcm;
114
int dir;
115
116
pcm = rtd->pcm;
117
if (!pcm || rtd->dai_link->no_pcm)
118
continue;
119
120
for_each_pcm_streams(dir) {
121
struct snd_pcm_substream *substream;
122
123
substream = pcm->streams[dir].substream;
124
if (!substream || !substream->runtime)
125
continue;
126
127
/* No need for _irq() as we are in nonatomic context. */
128
snd_pcm_stream_lock(substream);
129
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
130
snd_pcm_stream_unlock(substream);
131
}
132
}
133
}
134
mutex_unlock(&adev->comp_list_mutex);
135
136
/* forcibly shutdown all cores */
137
core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0);
138
avs_dsp_core_disable(adev, core_mask);
139
140
/* attempt dsp reboot */
141
ret = avs_dsp_boot_firmware(adev, true);
142
if (ret < 0)
143
dev_err(adev->dev, "dsp reboot failed: %d\n", ret);
144
145
pm_runtime_enable(adev->dev);
146
pm_request_autosuspend(adev->dev);
147
148
atomic_set(&adev->ipc->recovering, 0);
149
}
150
151
static void avs_dsp_recovery_work(struct work_struct *work)
152
{
153
struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work);
154
155
avs_dsp_recovery(to_avs_dev(ipc->dev));
156
}
157
158
static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg)
159
{
160
struct avs_ipc *ipc = adev->ipc;
161
162
/* Account for the double-exception case. */
163
ipc->ready = false;
164
165
if (!atomic_add_unless(&ipc->recovering, 1, 1)) {
166
dev_err(adev->dev, "dsp recovery is already in progress\n");
167
return;
168
}
169
170
dev_crit(adev->dev, "communication severed, rebooting dsp..\n");
171
172
/* Avoid deadlock as the exception may be the response to SET_D0IX. */
173
if (current_work() != &ipc->d0ix_work.work)
174
cancel_delayed_work_sync(&ipc->d0ix_work);
175
ipc->in_d0ix = false;
176
/* Re-enabled on recovery completion. */
177
pm_runtime_disable(adev->dev);
178
179
/* Process received notification. */
180
avs_dsp_op(adev, coredump, msg);
181
182
schedule_work(&ipc->recovery_work);
183
}
184
185
static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
186
{
187
struct avs_ipc *ipc = adev->ipc;
188
union avs_reply_msg msg = AVS_MSG(header);
189
u32 sts, lec;
190
191
sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
192
lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
193
trace_avs_ipc_reply_msg(header, sts, lec);
194
195
ipc->rx.header = header;
196
/* Abort copying payload if request processing was unsuccessful. */
197
if (!msg.status) {
198
/* update size in case of LARGE_CONFIG_GET */
199
if (msg.msg_target == AVS_MOD_MSG &&
200
msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
201
ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE,
202
msg.ext.large_config.data_off_size);
203
204
memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
205
trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
206
}
207
}
208
209
static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
210
{
211
struct avs_notify_mod_data mod_data;
212
union avs_notify_msg msg = AVS_MSG(header);
213
size_t data_size = 0;
214
void *data = NULL;
215
u32 sts, lec;
216
217
sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
218
lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
219
trace_avs_ipc_notify_msg(header, sts, lec);
220
221
/* Ignore spurious notifications until handshake is established. */
222
if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
223
dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary);
224
return;
225
}
226
227
/* Calculate notification payload size. */
228
switch (msg.notify_msg_type) {
229
case AVS_NOTIFY_FW_READY:
230
break;
231
232
case AVS_NOTIFY_PHRASE_DETECTED:
233
data_size = sizeof(struct avs_notify_voice_data);
234
break;
235
236
case AVS_NOTIFY_RESOURCE_EVENT:
237
data_size = sizeof(struct avs_notify_res_data);
238
break;
239
240
case AVS_NOTIFY_LOG_BUFFER_STATUS:
241
case AVS_NOTIFY_EXCEPTION_CAUGHT:
242
break;
243
244
case AVS_NOTIFY_MODULE_EVENT:
245
/* To know the total payload size, header needs to be read first. */
246
memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data));
247
data_size = sizeof(mod_data) + mod_data.data_size;
248
break;
249
250
default:
251
dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary);
252
break;
253
}
254
255
if (data_size) {
256
data = kmalloc(data_size, GFP_KERNEL);
257
if (!data)
258
return;
259
260
memcpy_fromio(data, avs_uplink_addr(adev), data_size);
261
trace_avs_msg_payload(data, data_size);
262
}
263
264
/* Perform notification-specific operations. */
265
switch (msg.notify_msg_type) {
266
case AVS_NOTIFY_FW_READY:
267
dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary);
268
adev->ipc->ready = true;
269
complete(&adev->fw_ready);
270
break;
271
272
case AVS_NOTIFY_LOG_BUFFER_STATUS:
273
avs_log_buffer_status_locked(adev, &msg);
274
break;
275
276
case AVS_NOTIFY_EXCEPTION_CAUGHT:
277
avs_dsp_exception_caught(adev, &msg);
278
break;
279
280
default:
281
break;
282
}
283
284
kfree(data);
285
}
286
287
void avs_dsp_process_response(struct avs_dev *adev, u64 header)
288
{
289
struct avs_ipc *ipc = adev->ipc;
290
291
/*
292
* Response may either be solicited - a reply for a request that has
293
* been sent beforehand - or unsolicited (notification).
294
*/
295
if (avs_msg_is_reply(header)) {
296
/* Response processing is invoked from IRQ thread. */
297
spin_lock_irq(&ipc->rx_lock);
298
avs_dsp_receive_rx(adev, header);
299
ipc->rx_completed = true;
300
spin_unlock_irq(&ipc->rx_lock);
301
} else {
302
avs_dsp_process_notification(adev, header);
303
}
304
305
complete(&ipc->busy_completion);
306
}
307
308
static bool avs_ipc_is_busy(struct avs_ipc *ipc)
309
{
310
struct avs_dev *adev = to_avs_dev(ipc->dev);
311
const struct avs_spec *const spec = adev->spec;
312
u32 hipc_rsp;
313
314
hipc_rsp = snd_hdac_adsp_readl(adev, spec->hipc->rsp_offset);
315
return hipc_rsp & spec->hipc->rsp_busy_mask;
316
}
317
318
static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
319
{
320
u32 repeats_left = 128; /* to avoid infinite looping */
321
int ret;
322
323
again:
324
ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
325
326
/* DSP could be unresponsive at this point. */
327
if (!ipc->ready)
328
return -EPERM;
329
330
if (!ret) {
331
if (!avs_ipc_is_busy(ipc))
332
return -ETIMEDOUT;
333
/*
334
* Firmware did its job, either notification or reply
335
* has been received - now wait until it's processed.
336
*/
337
wait_for_completion_killable(&ipc->busy_completion);
338
}
339
340
/* Ongoing notification's bottom-half may cause early wakeup */
341
spin_lock(&ipc->rx_lock);
342
if (!ipc->rx_completed) {
343
if (repeats_left) {
344
/* Reply delayed due to notification. */
345
repeats_left--;
346
reinit_completion(&ipc->busy_completion);
347
spin_unlock(&ipc->rx_lock);
348
goto again;
349
}
350
351
spin_unlock(&ipc->rx_lock);
352
return -ETIMEDOUT;
353
}
354
355
spin_unlock(&ipc->rx_lock);
356
return 0;
357
}
358
359
static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
360
{
361
lockdep_assert_held(&ipc->rx_lock);
362
363
ipc->rx.header = 0;
364
ipc->rx.size = reply ? reply->size : 0;
365
ipc->rx_completed = false;
366
367
reinit_completion(&ipc->done_completion);
368
reinit_completion(&ipc->busy_completion);
369
}
370
371
static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs)
372
{
373
const struct avs_spec *const spec = adev->spec;
374
u32 sts = UINT_MAX;
375
u32 lec = UINT_MAX;
376
377
tx->header |= spec->hipc->req_busy_mask;
378
if (read_fwregs) {
379
sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
380
lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
381
}
382
383
trace_avs_request(tx, sts, lec);
384
385
if (tx->size)
386
memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
387
snd_hdac_adsp_writel(adev, spec->hipc->req_ext_offset, tx->header >> 32);
388
snd_hdac_adsp_writel(adev, spec->hipc->req_offset, tx->header & UINT_MAX);
389
}
390
391
static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
392
struct avs_ipc_msg *reply, int timeout, const char *name)
393
{
394
struct avs_ipc *ipc = adev->ipc;
395
int ret;
396
397
if (!ipc->ready)
398
return -EPERM;
399
400
mutex_lock(&ipc->msg_mutex);
401
402
spin_lock(&ipc->rx_lock);
403
avs_ipc_msg_init(ipc, reply);
404
avs_dsp_send_tx(adev, request, true);
405
spin_unlock(&ipc->rx_lock);
406
407
ret = avs_ipc_wait_busy_completion(ipc, timeout);
408
if (ret) {
409
if (ret == -ETIMEDOUT) {
410
union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT);
411
412
/* Same treatment as on exception, just stack_dump=0. */
413
avs_dsp_exception_caught(adev, &msg);
414
}
415
goto exit;
416
}
417
418
ret = ipc->rx.rsp.status;
419
/*
420
* If IPC channel is blocked e.g.: due to ongoing recovery,
421
* -EPERM error code is expected and thus it's not an actual error.
422
*
423
* Unsupported IPCs are of no harm either.
424
*/
425
if (ret == -EPERM || ret == AVS_IPC_NOT_SUPPORTED)
426
dev_dbg(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
427
name, request->glb.primary, request->glb.ext.val, ret);
428
else if (ret)
429
dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
430
name, request->glb.primary, request->glb.ext.val, ret);
431
432
if (reply) {
433
reply->header = ipc->rx.header;
434
reply->size = ipc->rx.size;
435
if (reply->data && ipc->rx.size)
436
memcpy(reply->data, ipc->rx.data, reply->size);
437
}
438
439
exit:
440
mutex_unlock(&ipc->msg_mutex);
441
return ret;
442
}
443
444
static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request,
445
struct avs_ipc_msg *reply, int timeout, bool wake_d0i0,
446
bool schedule_d0ix, const char *name)
447
{
448
int ret;
449
450
trace_avs_d0ix("wake", wake_d0i0, request->header);
451
if (wake_d0i0) {
452
ret = avs_dsp_wake_d0i0(adev, request);
453
if (ret)
454
return ret;
455
}
456
457
ret = avs_dsp_do_send_msg(adev, request, reply, timeout, name);
458
if (ret)
459
return ret;
460
461
trace_avs_d0ix("schedule", schedule_d0ix, request->header);
462
if (schedule_d0ix)
463
avs_dsp_schedule_d0ix(adev, request);
464
465
return 0;
466
}
467
468
int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
469
struct avs_ipc_msg *reply, int timeout, const char *name)
470
{
471
bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true);
472
bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false);
473
474
return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix,
475
name);
476
}
477
478
int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
479
struct avs_ipc_msg *reply, const char *name)
480
{
481
return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, name);
482
}
483
484
int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
485
struct avs_ipc_msg *reply, int timeout, bool wake_d0i0,
486
const char *name)
487
{
488
return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false, name);
489
}
490
491
int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
492
struct avs_ipc_msg *reply, bool wake_d0i0, const char *name)
493
{
494
return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms,
495
wake_d0i0, name);
496
}
497
498
static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout,
499
const char *name)
500
{
501
struct avs_ipc *ipc = adev->ipc;
502
int ret;
503
504
mutex_lock(&ipc->msg_mutex);
505
506
spin_lock(&ipc->rx_lock);
507
avs_ipc_msg_init(ipc, NULL);
508
/*
509
* with hw still stalled, memory windows may not be
510
* configured properly so avoid accessing SRAM
511
*/
512
avs_dsp_send_tx(adev, request, false);
513
spin_unlock(&ipc->rx_lock);
514
515
/* ROM messages must be sent before main core is unstalled */
516
ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false);
517
if (!ret) {
518
ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
519
ret = ret ? 0 : -ETIMEDOUT;
520
}
521
if (ret)
522
dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
523
name, request->glb.primary, request->glb.ext.val, ret);
524
525
mutex_unlock(&ipc->msg_mutex);
526
527
return ret;
528
}
529
530
int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout,
531
const char *name)
532
{
533
return avs_dsp_do_send_rom_msg(adev, request, timeout, name);
534
}
535
536
int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, const char *name)
537
{
538
return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms, name);
539
}
540
541
void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable)
542
{
543
const struct avs_spec *const spec = adev->spec;
544
u32 value, mask;
545
546
/*
547
* No particular bit setting order. All of these are required
548
* to have a functional SW <-> FW communication.
549
*/
550
value = enable ? AVS_ADSP_ADSPIC_IPC : 0;
551
snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value);
552
553
mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY;
554
value = enable ? mask : 0;
555
snd_hdac_adsp_updatel(adev, spec->hipc->ctl_offset, mask, value);
556
}
557
558
int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
559
{
560
ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
561
if (!ipc->rx.data)
562
return -ENOMEM;
563
564
ipc->dev = dev;
565
ipc->ready = false;
566
ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
567
INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
568
INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work);
569
init_completion(&ipc->done_completion);
570
init_completion(&ipc->busy_completion);
571
spin_lock_init(&ipc->rx_lock);
572
mutex_init(&ipc->msg_mutex);
573
574
return 0;
575
}
576
577
void avs_ipc_block(struct avs_ipc *ipc)
578
{
579
ipc->ready = false;
580
cancel_work_sync(&ipc->recovery_work);
581
cancel_delayed_work_sync(&ipc->d0ix_work);
582
ipc->in_d0ix = false;
583
}
584
585