Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/core/seq/seq_queue.c
29267 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* ALSA sequencer Timing queue handling
4
* Copyright (c) 1998-1999 by Frank van de Pol <[email protected]>
5
*
6
* MAJOR CHANGES
7
* Nov. 13, 1999 Takashi Iwai <[email protected]>
8
* - Queues are allocated dynamically via ioctl.
9
* - When owner client is deleted, all owned queues are deleted, too.
10
* - Owner of unlocked queue is kept unmodified even if it is
11
* manipulated by other clients.
12
* - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
13
* caller client. i.e. Changing owner to a third client is not
14
* allowed.
15
*
16
* Aug. 30, 2000 Takashi Iwai
17
* - Queues are managed in static array again, but with better way.
18
* The API itself is identical.
19
* - The queue is locked when struct snd_seq_queue pointer is returned via
20
* queueptr(). This pointer *MUST* be released afterward by
21
* queuefree(ptr).
22
* - Addition of experimental sync support.
23
*/
24
25
#include <linux/init.h>
26
#include <linux/slab.h>
27
#include <sound/core.h>
28
29
#include "seq_memory.h"
30
#include "seq_queue.h"
31
#include "seq_clientmgr.h"
32
#include "seq_fifo.h"
33
#include "seq_timer.h"
34
#include "seq_info.h"
35
36
/* list of allocated queues */
37
static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
38
static DEFINE_SPINLOCK(queue_list_lock);
39
/* number of queues allocated */
40
static int num_queues;
41
42
int snd_seq_queue_get_cur_queues(void)
43
{
44
return num_queues;
45
}
46
47
/*----------------------------------------------------------------*/
48
49
/* assign queue id and insert to list */
50
static int queue_list_add(struct snd_seq_queue *q)
51
{
52
int i;
53
54
guard(spinlock_irqsave)(&queue_list_lock);
55
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
56
if (! queue_list[i]) {
57
queue_list[i] = q;
58
q->queue = i;
59
num_queues++;
60
return i;
61
}
62
}
63
return -1;
64
}
65
66
static struct snd_seq_queue *queue_list_remove(int id, int client)
67
{
68
struct snd_seq_queue *q;
69
70
guard(spinlock_irqsave)(&queue_list_lock);
71
q = queue_list[id];
72
if (q) {
73
guard(spinlock)(&q->owner_lock);
74
if (q->owner == client) {
75
/* found */
76
q->klocked = 1;
77
queue_list[id] = NULL;
78
num_queues--;
79
return q;
80
}
81
}
82
return NULL;
83
}
84
85
/*----------------------------------------------------------------*/
86
87
/* create new queue (constructor) */
88
static struct snd_seq_queue *queue_new(int owner, int locked)
89
{
90
struct snd_seq_queue *q;
91
92
q = kzalloc(sizeof(*q), GFP_KERNEL);
93
if (!q)
94
return NULL;
95
96
spin_lock_init(&q->owner_lock);
97
spin_lock_init(&q->check_lock);
98
mutex_init(&q->timer_mutex);
99
snd_use_lock_init(&q->use_lock);
100
q->queue = -1;
101
102
q->tickq = snd_seq_prioq_new();
103
q->timeq = snd_seq_prioq_new();
104
q->timer = snd_seq_timer_new();
105
if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
106
snd_seq_prioq_delete(&q->tickq);
107
snd_seq_prioq_delete(&q->timeq);
108
snd_seq_timer_delete(&q->timer);
109
kfree(q);
110
return NULL;
111
}
112
113
q->owner = owner;
114
q->locked = locked;
115
q->klocked = 0;
116
117
return q;
118
}
119
120
/* delete queue (destructor) */
121
static void queue_delete(struct snd_seq_queue *q)
122
{
123
/* stop and release the timer */
124
mutex_lock(&q->timer_mutex);
125
snd_seq_timer_stop(q->timer);
126
snd_seq_timer_close(q);
127
mutex_unlock(&q->timer_mutex);
128
/* wait until access free */
129
snd_use_lock_sync(&q->use_lock);
130
/* release resources... */
131
snd_seq_prioq_delete(&q->tickq);
132
snd_seq_prioq_delete(&q->timeq);
133
snd_seq_timer_delete(&q->timer);
134
135
kfree(q);
136
}
137
138
139
/*----------------------------------------------------------------*/
140
141
/* delete all existing queues */
142
void snd_seq_queues_delete(void)
143
{
144
int i;
145
146
/* clear list */
147
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
148
if (queue_list[i])
149
queue_delete(queue_list[i]);
150
}
151
}
152
153
static void queue_use(struct snd_seq_queue *queue, int client, int use);
154
155
/* allocate a new queue -
156
* return pointer to new queue or ERR_PTR(-errno) for error
157
* The new queue's use_lock is set to 1. It is the caller's responsibility to
158
* call snd_use_lock_free(&q->use_lock).
159
*/
160
struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
161
{
162
struct snd_seq_queue *q;
163
164
q = queue_new(client, locked);
165
if (q == NULL)
166
return ERR_PTR(-ENOMEM);
167
q->info_flags = info_flags;
168
queue_use(q, client, 1);
169
snd_use_lock_use(&q->use_lock);
170
if (queue_list_add(q) < 0) {
171
snd_use_lock_free(&q->use_lock);
172
queue_delete(q);
173
return ERR_PTR(-ENOMEM);
174
}
175
return q;
176
}
177
178
/* delete a queue - queue must be owned by the client */
179
int snd_seq_queue_delete(int client, int queueid)
180
{
181
struct snd_seq_queue *q;
182
183
if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
184
return -EINVAL;
185
q = queue_list_remove(queueid, client);
186
if (q == NULL)
187
return -EINVAL;
188
queue_delete(q);
189
190
return 0;
191
}
192
193
194
/* return pointer to queue structure for specified id */
195
struct snd_seq_queue *queueptr(int queueid)
196
{
197
struct snd_seq_queue *q;
198
199
if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
200
return NULL;
201
guard(spinlock_irqsave)(&queue_list_lock);
202
q = queue_list[queueid];
203
if (q)
204
snd_use_lock_use(&q->use_lock);
205
return q;
206
}
207
208
/* return the (first) queue matching with the specified name */
209
struct snd_seq_queue *snd_seq_queue_find_name(char *name)
210
{
211
int i;
212
213
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
214
struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
215
q = queueptr(i);
216
if (q) {
217
if (strncmp(q->name, name, sizeof(q->name)) == 0)
218
return no_free_ptr(q);
219
}
220
}
221
return NULL;
222
}
223
224
225
/* -------------------------------------------------------- */
226
227
#define MAX_CELL_PROCESSES_IN_QUEUE 1000
228
229
void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
230
{
231
struct snd_seq_event_cell *cell;
232
snd_seq_tick_time_t cur_tick;
233
snd_seq_real_time_t cur_time;
234
int processed = 0;
235
236
if (q == NULL)
237
return;
238
239
/* make this function non-reentrant */
240
scoped_guard(spinlock_irqsave, &q->check_lock) {
241
if (q->check_blocked) {
242
q->check_again = 1;
243
return; /* other thread is already checking queues */
244
}
245
q->check_blocked = 1;
246
}
247
248
__again:
249
/* Process tick queue... */
250
cur_tick = snd_seq_timer_get_cur_tick(q->timer);
251
for (;;) {
252
cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
253
if (!cell)
254
break;
255
snd_seq_dispatch_event(cell, atomic, hop);
256
if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
257
goto out; /* the rest processed at the next batch */
258
}
259
260
/* Process time queue... */
261
cur_time = snd_seq_timer_get_cur_time(q->timer, false);
262
for (;;) {
263
cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
264
if (!cell)
265
break;
266
snd_seq_dispatch_event(cell, atomic, hop);
267
if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
268
goto out; /* the rest processed at the next batch */
269
}
270
271
out:
272
/* free lock */
273
scoped_guard(spinlock_irqsave, &q->check_lock) {
274
if (q->check_again) {
275
q->check_again = 0;
276
if (processed < MAX_CELL_PROCESSES_IN_QUEUE)
277
goto __again;
278
}
279
q->check_blocked = 0;
280
}
281
}
282
283
284
/* enqueue a event to singe queue */
285
int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
286
{
287
int dest, err;
288
struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
289
290
if (snd_BUG_ON(!cell))
291
return -EINVAL;
292
dest = cell->event.queue; /* destination queue */
293
q = queueptr(dest);
294
if (q == NULL)
295
return -EINVAL;
296
/* handle relative time stamps, convert them into absolute */
297
if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
298
switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
299
case SNDRV_SEQ_TIME_STAMP_TICK:
300
cell->event.time.tick += q->timer->tick.cur_tick;
301
break;
302
303
case SNDRV_SEQ_TIME_STAMP_REAL:
304
snd_seq_inc_real_time(&cell->event.time.time,
305
&q->timer->cur_time);
306
break;
307
}
308
cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
309
cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
310
}
311
/* enqueue event in the real-time or midi queue */
312
switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
313
case SNDRV_SEQ_TIME_STAMP_TICK:
314
err = snd_seq_prioq_cell_in(q->tickq, cell);
315
break;
316
317
case SNDRV_SEQ_TIME_STAMP_REAL:
318
default:
319
err = snd_seq_prioq_cell_in(q->timeq, cell);
320
break;
321
}
322
323
if (err < 0)
324
return err;
325
326
/* trigger dispatching */
327
snd_seq_check_queue(q, atomic, hop);
328
329
return 0;
330
}
331
332
333
/*----------------------------------------------------------------*/
334
335
static inline int check_access(struct snd_seq_queue *q, int client)
336
{
337
return (q->owner == client) || (!q->locked && !q->klocked);
338
}
339
340
/* check if the client has permission to modify queue parameters.
341
* if it does, lock the queue
342
*/
343
static int queue_access_lock(struct snd_seq_queue *q, int client)
344
{
345
int access_ok;
346
347
guard(spinlock_irqsave)(&q->owner_lock);
348
access_ok = check_access(q, client);
349
if (access_ok)
350
q->klocked = 1;
351
return access_ok;
352
}
353
354
/* unlock the queue */
355
static inline void queue_access_unlock(struct snd_seq_queue *q)
356
{
357
guard(spinlock_irqsave)(&q->owner_lock);
358
q->klocked = 0;
359
}
360
361
/* exported - only checking permission */
362
int snd_seq_queue_check_access(int queueid, int client)
363
{
364
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
365
366
if (! q)
367
return 0;
368
guard(spinlock_irqsave)(&q->owner_lock);
369
return check_access(q, client);
370
}
371
372
/*----------------------------------------------------------------*/
373
374
/*
375
* change queue's owner and permission
376
*/
377
int snd_seq_queue_set_owner(int queueid, int client, int locked)
378
{
379
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
380
381
if (q == NULL)
382
return -EINVAL;
383
384
if (!queue_access_lock(q, client))
385
return -EPERM;
386
387
scoped_guard(spinlock_irqsave, &q->owner_lock) {
388
q->locked = locked ? 1 : 0;
389
q->owner = client;
390
}
391
queue_access_unlock(q);
392
393
return 0;
394
}
395
396
397
/*----------------------------------------------------------------*/
398
399
/* open timer -
400
* q->use mutex should be down before calling this function to avoid
401
* confliction with snd_seq_queue_use()
402
*/
403
int snd_seq_queue_timer_open(int queueid)
404
{
405
int result = 0;
406
struct snd_seq_queue *queue __free(snd_seq_queue) = NULL;
407
struct snd_seq_timer *tmr;
408
409
queue = queueptr(queueid);
410
if (queue == NULL)
411
return -EINVAL;
412
tmr = queue->timer;
413
result = snd_seq_timer_open(queue);
414
if (result < 0) {
415
snd_seq_timer_defaults(tmr);
416
result = snd_seq_timer_open(queue);
417
}
418
return result;
419
}
420
421
/* close timer -
422
* q->use mutex should be down before calling this function
423
*/
424
int snd_seq_queue_timer_close(int queueid)
425
{
426
struct snd_seq_queue *queue __free(snd_seq_queue) = NULL;
427
int result = 0;
428
429
queue = queueptr(queueid);
430
if (queue == NULL)
431
return -EINVAL;
432
snd_seq_timer_close(queue);
433
return result;
434
}
435
436
/* change queue tempo and ppq */
437
int snd_seq_queue_timer_set_tempo(int queueid, int client,
438
struct snd_seq_queue_tempo *info)
439
{
440
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
441
int result;
442
443
if (q == NULL)
444
return -EINVAL;
445
if (!queue_access_lock(q, client))
446
return -EPERM;
447
448
result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq,
449
info->tempo_base);
450
if (result >= 0 && info->skew_base > 0)
451
result = snd_seq_timer_set_skew(q->timer, info->skew_value,
452
info->skew_base);
453
queue_access_unlock(q);
454
return result;
455
}
456
457
/* use or unuse this queue */
458
static void queue_use(struct snd_seq_queue *queue, int client, int use)
459
{
460
if (use) {
461
if (!test_and_set_bit(client, queue->clients_bitmap))
462
queue->clients++;
463
} else {
464
if (test_and_clear_bit(client, queue->clients_bitmap))
465
queue->clients--;
466
}
467
if (queue->clients) {
468
if (use && queue->clients == 1)
469
snd_seq_timer_defaults(queue->timer);
470
snd_seq_timer_open(queue);
471
} else {
472
snd_seq_timer_close(queue);
473
}
474
}
475
476
/* use or unuse this queue -
477
* if it is the first client, starts the timer.
478
* if it is not longer used by any clients, stop the timer.
479
*/
480
int snd_seq_queue_use(int queueid, int client, int use)
481
{
482
struct snd_seq_queue *queue __free(snd_seq_queue) = NULL;
483
484
queue = queueptr(queueid);
485
if (queue == NULL)
486
return -EINVAL;
487
guard(mutex)(&queue->timer_mutex);
488
queue_use(queue, client, use);
489
return 0;
490
}
491
492
/*
493
* check if queue is used by the client
494
* return negative value if the queue is invalid.
495
* return 0 if not used, 1 if used.
496
*/
497
int snd_seq_queue_is_used(int queueid, int client)
498
{
499
struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
500
501
q = queueptr(queueid);
502
if (q == NULL)
503
return -EINVAL; /* invalid queue */
504
return test_bit(client, q->clients_bitmap) ? 1 : 0;
505
}
506
507
508
/*----------------------------------------------------------------*/
509
510
/* final stage notification -
511
* remove cells for no longer exist client (for non-owned queue)
512
* or delete this queue (for owned queue)
513
*/
514
void snd_seq_queue_client_leave(int client)
515
{
516
int i;
517
518
/* delete own queues from queue list */
519
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
520
struct snd_seq_queue *q = queue_list_remove(i, client);
521
if (q)
522
queue_delete(q);
523
}
524
525
/* remove cells from existing queues -
526
* they are not owned by this client
527
*/
528
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
529
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
530
if (!q)
531
continue;
532
if (test_bit(client, q->clients_bitmap)) {
533
snd_seq_prioq_leave(q->tickq, client, 0);
534
snd_seq_prioq_leave(q->timeq, client, 0);
535
snd_seq_queue_use(q->queue, client, 0);
536
}
537
}
538
}
539
540
541
542
/*----------------------------------------------------------------*/
543
544
/* remove cells based on flush criteria */
545
void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
546
{
547
int i;
548
549
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
550
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
551
if (!q)
552
continue;
553
if (test_bit(client, q->clients_bitmap) &&
554
(! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
555
q->queue == info->queue)) {
556
snd_seq_prioq_remove_events(q->tickq, client, info);
557
snd_seq_prioq_remove_events(q->timeq, client, info);
558
}
559
}
560
}
561
562
/*----------------------------------------------------------------*/
563
564
/*
565
* send events to all subscribed ports
566
*/
567
static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
568
int atomic, int hop)
569
{
570
struct snd_seq_event sev;
571
572
sev = *ev;
573
574
sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
575
sev.time.tick = q->timer->tick.cur_tick;
576
sev.queue = q->queue;
577
sev.data.queue.queue = q->queue;
578
579
/* broadcast events from Timer port */
580
sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
581
sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
582
sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
583
snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
584
}
585
586
/*
587
* process a received queue-control event.
588
* this function is exported for seq_sync.c.
589
*/
590
static void snd_seq_queue_process_event(struct snd_seq_queue *q,
591
struct snd_seq_event *ev,
592
int atomic, int hop)
593
{
594
switch (ev->type) {
595
case SNDRV_SEQ_EVENT_START:
596
snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
597
snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
598
if (! snd_seq_timer_start(q->timer))
599
queue_broadcast_event(q, ev, atomic, hop);
600
break;
601
602
case SNDRV_SEQ_EVENT_CONTINUE:
603
if (! snd_seq_timer_continue(q->timer))
604
queue_broadcast_event(q, ev, atomic, hop);
605
break;
606
607
case SNDRV_SEQ_EVENT_STOP:
608
snd_seq_timer_stop(q->timer);
609
queue_broadcast_event(q, ev, atomic, hop);
610
break;
611
612
case SNDRV_SEQ_EVENT_TEMPO:
613
snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
614
queue_broadcast_event(q, ev, atomic, hop);
615
break;
616
617
case SNDRV_SEQ_EVENT_SETPOS_TICK:
618
if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
619
queue_broadcast_event(q, ev, atomic, hop);
620
}
621
break;
622
623
case SNDRV_SEQ_EVENT_SETPOS_TIME:
624
if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
625
queue_broadcast_event(q, ev, atomic, hop);
626
}
627
break;
628
case SNDRV_SEQ_EVENT_QUEUE_SKEW:
629
if (snd_seq_timer_set_skew(q->timer,
630
ev->data.queue.param.skew.value,
631
ev->data.queue.param.skew.base) == 0) {
632
queue_broadcast_event(q, ev, atomic, hop);
633
}
634
break;
635
}
636
}
637
638
639
/*
640
* Queue control via timer control port:
641
* this function is exported as a callback of timer port.
642
*/
643
int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
644
{
645
struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
646
647
if (snd_BUG_ON(!ev))
648
return -EINVAL;
649
q = queueptr(ev->data.queue.queue);
650
651
if (q == NULL)
652
return -EINVAL;
653
654
if (!queue_access_lock(q, ev->source.client))
655
return -EPERM;
656
657
snd_seq_queue_process_event(q, ev, atomic, hop);
658
659
queue_access_unlock(q);
660
return 0;
661
}
662
663
664
/*----------------------------------------------------------------*/
665
666
#ifdef CONFIG_SND_PROC_FS
667
/* exported to seq_info.c */
668
void snd_seq_info_queues_read(struct snd_info_entry *entry,
669
struct snd_info_buffer *buffer)
670
{
671
int i, bpm;
672
struct snd_seq_timer *tmr;
673
bool locked;
674
int owner;
675
676
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
677
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
678
if (!q)
679
continue;
680
681
tmr = q->timer;
682
if (tmr->tempo)
683
bpm = (60000 * tmr->tempo_base) / tmr->tempo;
684
else
685
bpm = 0;
686
687
scoped_guard(spinlock_irq, &q->owner_lock) {
688
locked = q->locked;
689
owner = q->owner;
690
}
691
692
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
693
snd_iprintf(buffer, "owned by client : %d\n", owner);
694
snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
695
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
696
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
697
snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
698
snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq);
699
snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo);
700
snd_iprintf(buffer, "tempo base : %d ns\n", tmr->tempo_base);
701
snd_iprintf(buffer, "current BPM : %d\n", bpm);
702
snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
703
snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick);
704
snd_iprintf(buffer, "\n");
705
}
706
}
707
#endif /* CONFIG_SND_PROC_FS */
708
709
710