Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/xen/xen_snd_front_evtchnl.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3
/*
4
* Xen para-virtual sound device
5
*
6
* Copyright (C) 2016-2018 EPAM Systems Inc.
7
*
8
* Author: Oleksandr Andrushchenko <[email protected]>
9
*/
10
11
#include <xen/events.h>
12
#include <xen/grant_table.h>
13
#include <xen/xen.h>
14
#include <xen/xenbus.h>
15
16
#include "xen_snd_front.h"
17
#include "xen_snd_front_alsa.h"
18
#include "xen_snd_front_cfg.h"
19
#include "xen_snd_front_evtchnl.h"
20
21
static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
22
{
23
struct xen_snd_front_evtchnl *channel = dev_id;
24
struct xen_snd_front_info *front_info = channel->front_info;
25
struct xensnd_resp *resp;
26
RING_IDX i, rp;
27
28
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
29
return IRQ_HANDLED;
30
31
guard(mutex)(&channel->ring_io_lock);
32
33
again:
34
rp = channel->u.req.ring.sring->rsp_prod;
35
/* Ensure we see queued responses up to rp. */
36
rmb();
37
38
/*
39
* Assume that the backend is trusted to always write sane values
40
* to the ring counters, so no overflow checks on frontend side
41
* are required.
42
*/
43
for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
44
resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
45
if (resp->id != channel->evt_id)
46
continue;
47
switch (resp->operation) {
48
case XENSND_OP_OPEN:
49
case XENSND_OP_CLOSE:
50
case XENSND_OP_READ:
51
case XENSND_OP_WRITE:
52
case XENSND_OP_TRIGGER:
53
channel->u.req.resp_status = resp->status;
54
complete(&channel->u.req.completion);
55
break;
56
case XENSND_OP_HW_PARAM_QUERY:
57
channel->u.req.resp_status = resp->status;
58
channel->u.req.resp.hw_param =
59
resp->resp.hw_param;
60
complete(&channel->u.req.completion);
61
break;
62
63
default:
64
dev_err(&front_info->xb_dev->dev,
65
"Operation %d is not supported\n",
66
resp->operation);
67
break;
68
}
69
}
70
71
channel->u.req.ring.rsp_cons = i;
72
if (i != channel->u.req.ring.req_prod_pvt) {
73
int more_to_do;
74
75
RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
76
more_to_do);
77
if (more_to_do)
78
goto again;
79
} else {
80
channel->u.req.ring.sring->rsp_event = i + 1;
81
}
82
83
return IRQ_HANDLED;
84
}
85
86
static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
87
{
88
struct xen_snd_front_evtchnl *channel = dev_id;
89
struct xensnd_event_page *page = channel->u.evt.page;
90
u32 cons, prod;
91
92
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
93
return IRQ_HANDLED;
94
95
guard(mutex)(&channel->ring_io_lock);
96
97
prod = page->in_prod;
98
/* Ensure we see ring contents up to prod. */
99
virt_rmb();
100
if (prod == page->in_cons)
101
return IRQ_HANDLED;
102
103
/*
104
* Assume that the backend is trusted to always write sane values
105
* to the ring counters, so no overflow checks on frontend side
106
* are required.
107
*/
108
for (cons = page->in_cons; cons != prod; cons++) {
109
struct xensnd_evt *event;
110
111
event = &XENSND_IN_RING_REF(page, cons);
112
if (unlikely(event->id != channel->evt_id++))
113
continue;
114
115
switch (event->type) {
116
case XENSND_EVT_CUR_POS:
117
xen_snd_front_alsa_handle_cur_pos(channel,
118
event->op.cur_pos.position);
119
break;
120
}
121
}
122
123
page->in_cons = cons;
124
/* Ensure ring contents. */
125
virt_wmb();
126
127
return IRQ_HANDLED;
128
}
129
130
void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
131
{
132
int notify;
133
134
channel->u.req.ring.req_prod_pvt++;
135
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
136
if (notify)
137
notify_remote_via_irq(channel->irq);
138
}
139
140
static void evtchnl_free(struct xen_snd_front_info *front_info,
141
struct xen_snd_front_evtchnl *channel)
142
{
143
void *page = NULL;
144
145
if (channel->type == EVTCHNL_TYPE_REQ)
146
page = channel->u.req.ring.sring;
147
else if (channel->type == EVTCHNL_TYPE_EVT)
148
page = channel->u.evt.page;
149
150
if (!page)
151
return;
152
153
channel->state = EVTCHNL_STATE_DISCONNECTED;
154
if (channel->type == EVTCHNL_TYPE_REQ) {
155
/* Release all who still waits for response if any. */
156
channel->u.req.resp_status = -EIO;
157
complete_all(&channel->u.req.completion);
158
}
159
160
if (channel->irq)
161
unbind_from_irqhandler(channel->irq, channel);
162
163
if (channel->port)
164
xenbus_free_evtchn(front_info->xb_dev, channel->port);
165
166
/* End access and free the page. */
167
xenbus_teardown_ring(&page, 1, &channel->gref);
168
169
memset(channel, 0, sizeof(*channel));
170
}
171
172
void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
173
{
174
int i;
175
176
if (!front_info->evt_pairs)
177
return;
178
179
for (i = 0; i < front_info->num_evt_pairs; i++) {
180
evtchnl_free(front_info, &front_info->evt_pairs[i].req);
181
evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
182
}
183
184
kfree(front_info->evt_pairs);
185
front_info->evt_pairs = NULL;
186
}
187
188
static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
189
struct xen_snd_front_evtchnl *channel,
190
enum xen_snd_front_evtchnl_type type)
191
{
192
struct xenbus_device *xb_dev = front_info->xb_dev;
193
void *page;
194
irq_handler_t handler;
195
char *handler_name = NULL;
196
int ret;
197
198
memset(channel, 0, sizeof(*channel));
199
channel->type = type;
200
channel->index = index;
201
channel->front_info = front_info;
202
channel->state = EVTCHNL_STATE_DISCONNECTED;
203
ret = xenbus_setup_ring(xb_dev, GFP_KERNEL, &page, 1, &channel->gref);
204
if (ret)
205
goto fail;
206
207
handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
208
type == EVTCHNL_TYPE_REQ ?
209
XENSND_FIELD_RING_REF :
210
XENSND_FIELD_EVT_RING_REF);
211
if (!handler_name) {
212
ret = -ENOMEM;
213
goto fail;
214
}
215
216
mutex_init(&channel->ring_io_lock);
217
218
if (type == EVTCHNL_TYPE_REQ) {
219
struct xen_sndif_sring *sring = page;
220
221
init_completion(&channel->u.req.completion);
222
mutex_init(&channel->u.req.req_io_lock);
223
XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
224
225
handler = evtchnl_interrupt_req;
226
} else {
227
channel->u.evt.page = page;
228
handler = evtchnl_interrupt_evt;
229
}
230
231
ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
232
if (ret < 0)
233
goto fail;
234
235
ret = bind_evtchn_to_irq(channel->port);
236
if (ret < 0) {
237
dev_err(&xb_dev->dev,
238
"Failed to bind IRQ for domid %d port %d: %d\n",
239
front_info->xb_dev->otherend_id, channel->port, ret);
240
goto fail;
241
}
242
243
channel->irq = ret;
244
245
ret = request_threaded_irq(channel->irq, NULL, handler,
246
IRQF_ONESHOT, handler_name, channel);
247
if (ret < 0) {
248
dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
249
channel->irq, ret);
250
goto fail;
251
}
252
253
kfree(handler_name);
254
return 0;
255
256
fail:
257
kfree(handler_name);
258
dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
259
return ret;
260
}
261
262
int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
263
int num_streams)
264
{
265
struct xen_front_cfg_card *cfg = &front_info->cfg;
266
struct device *dev = &front_info->xb_dev->dev;
267
int d, ret = 0;
268
269
front_info->evt_pairs =
270
kcalloc(num_streams,
271
sizeof(struct xen_snd_front_evtchnl_pair),
272
GFP_KERNEL);
273
if (!front_info->evt_pairs)
274
return -ENOMEM;
275
276
/* Iterate over devices and their streams and create event channels. */
277
for (d = 0; d < cfg->num_pcm_instances; d++) {
278
struct xen_front_cfg_pcm_instance *pcm_instance;
279
int s, index;
280
281
pcm_instance = &cfg->pcm_instances[d];
282
283
for (s = 0; s < pcm_instance->num_streams_pb; s++) {
284
index = pcm_instance->streams_pb[s].index;
285
286
ret = evtchnl_alloc(front_info, index,
287
&front_info->evt_pairs[index].req,
288
EVTCHNL_TYPE_REQ);
289
if (ret < 0) {
290
dev_err(dev, "Error allocating control channel\n");
291
goto fail;
292
}
293
294
ret = evtchnl_alloc(front_info, index,
295
&front_info->evt_pairs[index].evt,
296
EVTCHNL_TYPE_EVT);
297
if (ret < 0) {
298
dev_err(dev, "Error allocating in-event channel\n");
299
goto fail;
300
}
301
}
302
303
for (s = 0; s < pcm_instance->num_streams_cap; s++) {
304
index = pcm_instance->streams_cap[s].index;
305
306
ret = evtchnl_alloc(front_info, index,
307
&front_info->evt_pairs[index].req,
308
EVTCHNL_TYPE_REQ);
309
if (ret < 0) {
310
dev_err(dev, "Error allocating control channel\n");
311
goto fail;
312
}
313
314
ret = evtchnl_alloc(front_info, index,
315
&front_info->evt_pairs[index].evt,
316
EVTCHNL_TYPE_EVT);
317
if (ret < 0) {
318
dev_err(dev, "Error allocating in-event channel\n");
319
goto fail;
320
}
321
}
322
}
323
324
front_info->num_evt_pairs = num_streams;
325
return 0;
326
327
fail:
328
xen_snd_front_evtchnl_free_all(front_info);
329
return ret;
330
}
331
332
static int evtchnl_publish(struct xenbus_transaction xbt,
333
struct xen_snd_front_evtchnl *channel,
334
const char *path, const char *node_ring,
335
const char *node_chnl)
336
{
337
struct xenbus_device *xb_dev = channel->front_info->xb_dev;
338
int ret;
339
340
/* Write control channel ring reference. */
341
ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
342
if (ret < 0) {
343
dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
344
return ret;
345
}
346
347
/* Write event channel ring reference. */
348
ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
349
if (ret < 0) {
350
dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
351
return ret;
352
}
353
354
return 0;
355
}
356
357
int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
358
{
359
struct xen_front_cfg_card *cfg = &front_info->cfg;
360
struct xenbus_transaction xbt;
361
int ret, d;
362
363
again:
364
ret = xenbus_transaction_start(&xbt);
365
if (ret < 0) {
366
xenbus_dev_fatal(front_info->xb_dev, ret,
367
"starting transaction");
368
return ret;
369
}
370
371
for (d = 0; d < cfg->num_pcm_instances; d++) {
372
struct xen_front_cfg_pcm_instance *pcm_instance;
373
int s, index;
374
375
pcm_instance = &cfg->pcm_instances[d];
376
377
for (s = 0; s < pcm_instance->num_streams_pb; s++) {
378
index = pcm_instance->streams_pb[s].index;
379
380
ret = evtchnl_publish(xbt,
381
&front_info->evt_pairs[index].req,
382
pcm_instance->streams_pb[s].xenstore_path,
383
XENSND_FIELD_RING_REF,
384
XENSND_FIELD_EVT_CHNL);
385
if (ret < 0)
386
goto fail;
387
388
ret = evtchnl_publish(xbt,
389
&front_info->evt_pairs[index].evt,
390
pcm_instance->streams_pb[s].xenstore_path,
391
XENSND_FIELD_EVT_RING_REF,
392
XENSND_FIELD_EVT_EVT_CHNL);
393
if (ret < 0)
394
goto fail;
395
}
396
397
for (s = 0; s < pcm_instance->num_streams_cap; s++) {
398
index = pcm_instance->streams_cap[s].index;
399
400
ret = evtchnl_publish(xbt,
401
&front_info->evt_pairs[index].req,
402
pcm_instance->streams_cap[s].xenstore_path,
403
XENSND_FIELD_RING_REF,
404
XENSND_FIELD_EVT_CHNL);
405
if (ret < 0)
406
goto fail;
407
408
ret = evtchnl_publish(xbt,
409
&front_info->evt_pairs[index].evt,
410
pcm_instance->streams_cap[s].xenstore_path,
411
XENSND_FIELD_EVT_RING_REF,
412
XENSND_FIELD_EVT_EVT_CHNL);
413
if (ret < 0)
414
goto fail;
415
}
416
}
417
ret = xenbus_transaction_end(xbt, 0);
418
if (ret < 0) {
419
if (ret == -EAGAIN)
420
goto again;
421
422
xenbus_dev_fatal(front_info->xb_dev, ret,
423
"completing transaction");
424
goto fail_to_end;
425
}
426
return 0;
427
fail:
428
xenbus_transaction_end(xbt, 1);
429
fail_to_end:
430
xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
431
return ret;
432
}
433
434
void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
435
bool is_connected)
436
{
437
enum xen_snd_front_evtchnl_state state;
438
439
if (is_connected)
440
state = EVTCHNL_STATE_CONNECTED;
441
else
442
state = EVTCHNL_STATE_DISCONNECTED;
443
444
scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
445
evt_pair->req.state = state;
446
}
447
448
scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
449
evt_pair->evt.state = state;
450
}
451
}
452
453
void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
454
{
455
scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
456
evt_pair->req.evt_next_id = 0;
457
}
458
459
scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
460
evt_pair->evt.evt_next_id = 0;
461
}
462
}
463
464
465