Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/core/compress_offload.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* compress_core.c - compress offload core
4
*
5
* Copyright (C) 2011 Intel Corporation
6
* Authors: Vinod Koul <[email protected]>
7
* Pierre-Louis Bossart <[email protected]>
8
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9
*
10
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11
*/
12
#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
13
#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
14
15
#include <linux/file.h>
16
#include <linux/fs.h>
17
#include <linux/list.h>
18
#include <linux/math64.h>
19
#include <linux/mm.h>
20
#include <linux/mutex.h>
21
#include <linux/poll.h>
22
#include <linux/slab.h>
23
#include <linux/sched.h>
24
#include <linux/types.h>
25
#include <linux/uio.h>
26
#include <linux/uaccess.h>
27
#include <linux/dma-buf.h>
28
#include <linux/module.h>
29
#include <linux/compat.h>
30
#include <sound/core.h>
31
#include <sound/initval.h>
32
#include <sound/info.h>
33
#include <sound/compress_params.h>
34
#include <sound/compress_offload.h>
35
#include <sound/compress_driver.h>
36
37
/* struct snd_compr_codec_caps overflows the ioctl bit size for some
38
* architectures, so we need to disable the relevant ioctls.
39
*/
40
#if _IOC_SIZEBITS < 14
41
#define COMPR_CODEC_CAPS_OVERFLOW
42
#endif
43
44
/* TODO:
45
* - add substream support for multiple devices in case of
46
* SND_DYNAMIC_MINORS is not used
47
* - Multiple node representation
48
* driver should be able to register multiple nodes
49
*/
50
51
struct snd_compr_file {
52
unsigned long caps;
53
struct snd_compr_stream stream;
54
};
55
56
static void error_delayed_work(struct work_struct *work);
57
58
#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
59
static void snd_compr_task_free_all(struct snd_compr_stream *stream);
60
#else
61
static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
62
#endif
63
64
/*
65
* a note on stream states used:
66
* we use following states in the compressed core
67
* SNDRV_PCM_STATE_OPEN: When stream has been opened.
68
* SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
69
* calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
70
* state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
71
* SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
72
* playback only). User after setting up stream writes the data buffer
73
* before starting the stream.
74
* SNDRV_PCM_STATE_RUNNING: When stream has been started and is
75
* decoding/encoding and rendering/capturing data.
76
* SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
77
* by calling SNDRV_COMPRESS_DRAIN.
78
* SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
79
* SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
80
* SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
81
*/
82
static int snd_compr_open(struct inode *inode, struct file *f)
83
{
84
struct snd_compr *compr;
85
struct snd_compr_file *data;
86
struct snd_compr_runtime *runtime;
87
enum snd_compr_direction dirn;
88
int maj = imajor(inode);
89
int ret;
90
91
if ((f->f_flags & O_ACCMODE) == O_WRONLY)
92
dirn = SND_COMPRESS_PLAYBACK;
93
else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
94
dirn = SND_COMPRESS_CAPTURE;
95
else if ((f->f_flags & O_ACCMODE) == O_RDWR)
96
dirn = SND_COMPRESS_ACCEL;
97
else
98
return -EINVAL;
99
100
if (maj == snd_major)
101
compr = snd_lookup_minor_data(iminor(inode),
102
SNDRV_DEVICE_TYPE_COMPRESS);
103
else
104
return -EBADFD;
105
106
if (compr == NULL) {
107
pr_err("no device data!!!\n");
108
return -ENODEV;
109
}
110
111
if (dirn != compr->direction) {
112
pr_err("this device doesn't support this direction\n");
113
snd_card_unref(compr->card);
114
return -EINVAL;
115
}
116
117
data = kzalloc(sizeof(*data), GFP_KERNEL);
118
if (!data) {
119
snd_card_unref(compr->card);
120
return -ENOMEM;
121
}
122
123
INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
124
125
data->stream.ops = compr->ops;
126
data->stream.direction = dirn;
127
data->stream.private_data = compr->private_data;
128
data->stream.device = compr;
129
runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
130
if (!runtime) {
131
kfree(data);
132
snd_card_unref(compr->card);
133
return -ENOMEM;
134
}
135
runtime->state = SNDRV_PCM_STATE_OPEN;
136
init_waitqueue_head(&runtime->sleep);
137
#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
138
INIT_LIST_HEAD(&runtime->tasks);
139
#endif
140
data->stream.runtime = runtime;
141
f->private_data = (void *)data;
142
scoped_guard(mutex, &compr->lock)
143
ret = compr->ops->open(&data->stream);
144
if (ret) {
145
kfree(runtime);
146
kfree(data);
147
}
148
snd_card_unref(compr->card);
149
return ret;
150
}
151
152
static int snd_compr_free(struct inode *inode, struct file *f)
153
{
154
struct snd_compr_file *data = f->private_data;
155
struct snd_compr_runtime *runtime = data->stream.runtime;
156
157
cancel_delayed_work_sync(&data->stream.error_work);
158
159
switch (runtime->state) {
160
case SNDRV_PCM_STATE_RUNNING:
161
case SNDRV_PCM_STATE_DRAINING:
162
case SNDRV_PCM_STATE_PAUSED:
163
data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
164
break;
165
default:
166
break;
167
}
168
169
snd_compr_task_free_all(&data->stream);
170
171
data->stream.ops->free(&data->stream);
172
if (!data->stream.runtime->dma_buffer_p)
173
kfree(data->stream.runtime->buffer);
174
kfree(data->stream.runtime);
175
kfree(data);
176
return 0;
177
}
178
179
static void
180
snd_compr_tstamp32_from_64(struct snd_compr_tstamp *tstamp32,
181
const struct snd_compr_tstamp64 *tstamp64)
182
{
183
tstamp32->byte_offset = tstamp64->byte_offset;
184
tstamp32->copied_total = (u32)tstamp64->copied_total;
185
tstamp32->pcm_frames = (u32)tstamp64->pcm_frames;
186
tstamp32->pcm_io_frames = (u32)tstamp64->pcm_io_frames;
187
tstamp32->sampling_rate = tstamp64->sampling_rate;
188
}
189
190
static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
191
struct snd_compr_tstamp64 *tstamp)
192
{
193
if (!stream->ops->pointer)
194
return -ENOTSUPP;
195
stream->ops->pointer(stream, tstamp);
196
pr_debug("dsp consumed till %u total %llu bytes\n", tstamp->byte_offset,
197
tstamp->copied_total);
198
if (stream->direction == SND_COMPRESS_PLAYBACK)
199
stream->runtime->total_bytes_transferred = tstamp->copied_total;
200
else
201
stream->runtime->total_bytes_available = tstamp->copied_total;
202
return 0;
203
}
204
205
static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
206
struct snd_compr_avail64 *avail)
207
{
208
memset(avail, 0, sizeof(*avail));
209
snd_compr_update_tstamp(stream, &avail->tstamp);
210
/* Still need to return avail even if tstamp can't be filled in */
211
212
if (stream->runtime->total_bytes_available == 0 &&
213
stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
214
stream->direction == SND_COMPRESS_PLAYBACK) {
215
pr_debug("detected init and someone forgot to do a write\n");
216
return stream->runtime->buffer_size;
217
}
218
pr_debug("app wrote %llu, DSP consumed %llu\n",
219
stream->runtime->total_bytes_available,
220
stream->runtime->total_bytes_transferred);
221
if (stream->runtime->total_bytes_available ==
222
stream->runtime->total_bytes_transferred) {
223
if (stream->direction == SND_COMPRESS_PLAYBACK) {
224
pr_debug("both pointers are same, returning full avail\n");
225
return stream->runtime->buffer_size;
226
} else {
227
pr_debug("both pointers are same, returning no avail\n");
228
return 0;
229
}
230
}
231
232
avail->avail = stream->runtime->total_bytes_available -
233
stream->runtime->total_bytes_transferred;
234
if (stream->direction == SND_COMPRESS_PLAYBACK)
235
avail->avail = stream->runtime->buffer_size - avail->avail;
236
237
pr_debug("ret avail as %zu\n", (size_t)avail->avail);
238
return avail->avail;
239
}
240
241
static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
242
{
243
struct snd_compr_avail64 avail;
244
245
return snd_compr_calc_avail(stream, &avail);
246
}
247
248
static void snd_compr_avail32_from_64(struct snd_compr_avail *avail32,
249
const struct snd_compr_avail64 *avail64)
250
{
251
avail32->avail = avail64->avail;
252
snd_compr_tstamp32_from_64(&avail32->tstamp, &avail64->tstamp);
253
}
254
255
static int snd_compr_ioctl_avail(struct snd_compr_stream *stream,
256
unsigned long arg, bool is_32bit)
257
{
258
struct snd_compr_avail64 ioctl_avail64;
259
struct snd_compr_avail ioctl_avail32;
260
size_t avail;
261
const void *copy_from = &ioctl_avail64;
262
size_t copy_size = sizeof(ioctl_avail64);
263
264
if (stream->direction == SND_COMPRESS_ACCEL)
265
return -EBADFD;
266
267
avail = snd_compr_calc_avail(stream, &ioctl_avail64);
268
ioctl_avail64.avail = avail;
269
if (is_32bit) {
270
snd_compr_avail32_from_64(&ioctl_avail32, &ioctl_avail64);
271
copy_from = &ioctl_avail32;
272
copy_size = sizeof(ioctl_avail32);
273
}
274
275
switch (stream->runtime->state) {
276
case SNDRV_PCM_STATE_OPEN:
277
return -EBADFD;
278
case SNDRV_PCM_STATE_XRUN:
279
return -EPIPE;
280
default:
281
break;
282
}
283
284
if (copy_to_user((__u64 __user *)arg, copy_from, copy_size))
285
return -EFAULT;
286
return 0;
287
}
288
289
static int snd_compr_write_data(struct snd_compr_stream *stream,
290
const char __user *buf, size_t count)
291
{
292
void *dstn;
293
size_t copy;
294
struct snd_compr_runtime *runtime = stream->runtime;
295
/* 64-bit Modulus */
296
u64 app_pointer = div64_u64(runtime->total_bytes_available,
297
runtime->buffer_size);
298
app_pointer = runtime->total_bytes_available -
299
(app_pointer * runtime->buffer_size);
300
301
dstn = runtime->buffer + app_pointer;
302
pr_debug("copying %lu at %llu\n", (unsigned long)count, app_pointer);
303
if (count < runtime->buffer_size - app_pointer) {
304
if (copy_from_user(dstn, buf, count))
305
return -EFAULT;
306
} else {
307
copy = runtime->buffer_size - app_pointer;
308
if (copy_from_user(dstn, buf, copy))
309
return -EFAULT;
310
if (copy_from_user(runtime->buffer, buf + copy, count - copy))
311
return -EFAULT;
312
}
313
/* if DSP cares, let it know data has been written */
314
if (stream->ops->ack)
315
stream->ops->ack(stream, count);
316
return count;
317
}
318
319
static ssize_t snd_compr_write(struct file *f, const char __user *buf,
320
size_t count, loff_t *offset)
321
{
322
struct snd_compr_file *data = f->private_data;
323
struct snd_compr_stream *stream;
324
size_t avail;
325
int retval;
326
327
if (snd_BUG_ON(!data))
328
return -EFAULT;
329
330
stream = &data->stream;
331
if (stream->direction == SND_COMPRESS_ACCEL)
332
return -EBADFD;
333
guard(mutex)(&stream->device->lock);
334
/* write is allowed when stream is running or has been setup */
335
switch (stream->runtime->state) {
336
case SNDRV_PCM_STATE_SETUP:
337
case SNDRV_PCM_STATE_PREPARED:
338
case SNDRV_PCM_STATE_RUNNING:
339
break;
340
default:
341
return -EBADFD;
342
}
343
344
avail = snd_compr_get_avail(stream);
345
pr_debug("avail returned %lu\n", (unsigned long)avail);
346
/* calculate how much we can write to buffer */
347
if (avail > count)
348
avail = count;
349
350
if (stream->ops->copy) {
351
char __user* cbuf = (char __user*)buf;
352
retval = stream->ops->copy(stream, cbuf, avail);
353
} else {
354
retval = snd_compr_write_data(stream, buf, avail);
355
}
356
if (retval > 0)
357
stream->runtime->total_bytes_available += retval;
358
359
/* while initiating the stream, write should be called before START
360
* call, so in setup move state */
361
if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
362
stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
363
pr_debug("stream prepared, Houston we are good to go\n");
364
}
365
366
return retval;
367
}
368
369
370
static ssize_t snd_compr_read(struct file *f, char __user *buf,
371
size_t count, loff_t *offset)
372
{
373
struct snd_compr_file *data = f->private_data;
374
struct snd_compr_stream *stream;
375
size_t avail;
376
int retval;
377
378
if (snd_BUG_ON(!data))
379
return -EFAULT;
380
381
stream = &data->stream;
382
if (stream->direction == SND_COMPRESS_ACCEL)
383
return -EBADFD;
384
guard(mutex)(&stream->device->lock);
385
386
/* read is allowed when stream is running, paused, draining and setup
387
* (yes setup is state which we transition to after stop, so if user
388
* wants to read data after stop we allow that)
389
*/
390
switch (stream->runtime->state) {
391
case SNDRV_PCM_STATE_OPEN:
392
case SNDRV_PCM_STATE_PREPARED:
393
case SNDRV_PCM_STATE_SUSPENDED:
394
case SNDRV_PCM_STATE_DISCONNECTED:
395
return -EBADFD;
396
case SNDRV_PCM_STATE_XRUN:
397
return -EPIPE;
398
}
399
400
avail = snd_compr_get_avail(stream);
401
pr_debug("avail returned %lu\n", (unsigned long)avail);
402
/* calculate how much we can read from buffer */
403
if (avail > count)
404
avail = count;
405
406
if (stream->ops->copy)
407
retval = stream->ops->copy(stream, buf, avail);
408
else
409
return -ENXIO;
410
if (retval > 0)
411
stream->runtime->total_bytes_transferred += retval;
412
413
return retval;
414
}
415
416
static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
417
{
418
return -ENXIO;
419
}
420
421
static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
422
{
423
if (stream->direction == SND_COMPRESS_PLAYBACK)
424
return EPOLLOUT | EPOLLWRNORM;
425
else
426
return EPOLLIN | EPOLLRDNORM;
427
}
428
429
static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
430
{
431
struct snd_compr_file *data = f->private_data;
432
struct snd_compr_stream *stream;
433
struct snd_compr_runtime *runtime;
434
size_t avail;
435
__poll_t retval = 0;
436
437
if (snd_BUG_ON(!data))
438
return EPOLLERR;
439
440
stream = &data->stream;
441
runtime = stream->runtime;
442
443
guard(mutex)(&stream->device->lock);
444
445
switch (runtime->state) {
446
case SNDRV_PCM_STATE_OPEN:
447
case SNDRV_PCM_STATE_XRUN:
448
return snd_compr_get_poll(stream) | EPOLLERR;
449
default:
450
break;
451
}
452
453
poll_wait(f, &runtime->sleep, wait);
454
455
#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
456
if (stream->direction == SND_COMPRESS_ACCEL) {
457
struct snd_compr_task_runtime *task;
458
if (runtime->fragments > runtime->active_tasks)
459
retval |= EPOLLOUT | EPOLLWRNORM;
460
task = list_first_entry_or_null(&runtime->tasks,
461
struct snd_compr_task_runtime,
462
list);
463
if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
464
retval |= EPOLLIN | EPOLLRDNORM;
465
return retval;
466
}
467
#endif
468
469
avail = snd_compr_get_avail(stream);
470
pr_debug("avail is %lu\n", (unsigned long)avail);
471
/* check if we have at least one fragment to fill */
472
switch (runtime->state) {
473
case SNDRV_PCM_STATE_DRAINING:
474
/* stream has been woken up after drain is complete
475
* draining done so set stream state to stopped
476
*/
477
retval = snd_compr_get_poll(stream);
478
runtime->state = SNDRV_PCM_STATE_SETUP;
479
break;
480
case SNDRV_PCM_STATE_RUNNING:
481
case SNDRV_PCM_STATE_PREPARED:
482
case SNDRV_PCM_STATE_PAUSED:
483
if (avail >= runtime->fragment_size)
484
retval = snd_compr_get_poll(stream);
485
break;
486
default:
487
return snd_compr_get_poll(stream) | EPOLLERR;
488
}
489
490
return retval;
491
}
492
493
static int
494
snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
495
{
496
int retval;
497
struct snd_compr_caps caps;
498
499
if (!stream->ops->get_caps)
500
return -ENXIO;
501
502
memset(&caps, 0, sizeof(caps));
503
retval = stream->ops->get_caps(stream, &caps);
504
if (retval)
505
goto out;
506
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
507
retval = -EFAULT;
508
out:
509
return retval;
510
}
511
512
#ifndef COMPR_CODEC_CAPS_OVERFLOW
513
static int
514
snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
515
{
516
int retval;
517
struct snd_compr_codec_caps *caps __free(kfree) = NULL;
518
519
if (!stream->ops->get_codec_caps)
520
return -ENXIO;
521
522
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
523
if (!caps)
524
return -ENOMEM;
525
526
retval = stream->ops->get_codec_caps(stream, caps);
527
if (retval)
528
return retval;
529
if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
530
return -EFAULT;
531
return retval;
532
}
533
#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
534
535
int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
536
{
537
struct snd_dma_buffer *dmab;
538
int ret;
539
540
if (snd_BUG_ON(!(stream) || !(stream)->runtime))
541
return -EINVAL;
542
dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
543
if (!dmab)
544
return -ENOMEM;
545
dmab->dev = stream->dma_buffer.dev;
546
ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
547
if (ret < 0) {
548
kfree(dmab);
549
return ret;
550
}
551
552
snd_compr_set_runtime_buffer(stream, dmab);
553
stream->runtime->dma_bytes = size;
554
return 1;
555
}
556
EXPORT_SYMBOL(snd_compr_malloc_pages);
557
558
int snd_compr_free_pages(struct snd_compr_stream *stream)
559
{
560
struct snd_compr_runtime *runtime;
561
562
if (snd_BUG_ON(!(stream) || !(stream)->runtime))
563
return -EINVAL;
564
runtime = stream->runtime;
565
if (runtime->dma_area == NULL)
566
return 0;
567
if (runtime->dma_buffer_p != &stream->dma_buffer) {
568
/* It's a newly allocated buffer. Release it now. */
569
snd_dma_free_pages(runtime->dma_buffer_p);
570
kfree(runtime->dma_buffer_p);
571
}
572
573
snd_compr_set_runtime_buffer(stream, NULL);
574
return 0;
575
}
576
EXPORT_SYMBOL(snd_compr_free_pages);
577
578
/* revisit this with snd_pcm_preallocate_xxx */
579
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
580
struct snd_compr_params *params)
581
{
582
unsigned int buffer_size;
583
void *buffer = NULL;
584
585
if (stream->direction == SND_COMPRESS_ACCEL)
586
goto params;
587
588
buffer_size = params->buffer.fragment_size * params->buffer.fragments;
589
if (stream->ops->copy) {
590
buffer = NULL;
591
/* if copy is defined the driver will be required to copy
592
* the data from core
593
*/
594
} else {
595
if (stream->runtime->dma_buffer_p) {
596
597
if (buffer_size > stream->runtime->dma_buffer_p->bytes)
598
dev_err(stream->device->dev,
599
"Not enough DMA buffer");
600
else
601
buffer = stream->runtime->dma_buffer_p->area;
602
603
} else {
604
buffer = kmalloc(buffer_size, GFP_KERNEL);
605
}
606
607
if (!buffer)
608
return -ENOMEM;
609
}
610
611
stream->runtime->buffer = buffer;
612
stream->runtime->buffer_size = buffer_size;
613
params:
614
stream->runtime->fragment_size = params->buffer.fragment_size;
615
stream->runtime->fragments = params->buffer.fragments;
616
return 0;
617
}
618
619
static int
620
snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
621
{
622
u32 max_fragments;
623
624
/* first let's check the buffer parameter's */
625
if (params->buffer.fragment_size == 0)
626
return -EINVAL;
627
628
if (stream->direction == SND_COMPRESS_ACCEL)
629
max_fragments = 64; /* safe value */
630
else
631
max_fragments = U32_MAX / params->buffer.fragment_size;
632
633
if (params->buffer.fragments > max_fragments ||
634
params->buffer.fragments == 0)
635
return -EINVAL;
636
637
/* now codec parameters */
638
if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
639
return -EINVAL;
640
641
if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
642
return -EINVAL;
643
644
return 0;
645
}
646
647
static int
648
snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
649
{
650
struct snd_compr_params *params __free(kfree) = NULL;
651
int retval;
652
653
if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
654
/*
655
* we should allow parameter change only when stream has been
656
* opened not in other cases
657
*/
658
params = memdup_user((void __user *)arg, sizeof(*params));
659
if (IS_ERR(params))
660
return PTR_ERR(params);
661
662
retval = snd_compress_check_input(stream, params);
663
if (retval)
664
return retval;
665
666
retval = snd_compr_allocate_buffer(stream, params);
667
if (retval)
668
return -ENOMEM;
669
670
retval = stream->ops->set_params(stream, params);
671
if (retval)
672
return retval;
673
674
if (stream->next_track)
675
return retval;
676
677
stream->metadata_set = false;
678
stream->next_track = false;
679
680
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
681
} else {
682
return -EPERM;
683
}
684
return retval;
685
}
686
687
static int
688
snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
689
{
690
struct snd_codec *params __free(kfree) = NULL;
691
int retval;
692
693
if (!stream->ops->get_params)
694
return -EBADFD;
695
696
params = kzalloc(sizeof(*params), GFP_KERNEL);
697
if (!params)
698
return -ENOMEM;
699
retval = stream->ops->get_params(stream, params);
700
if (retval)
701
return retval;
702
if (copy_to_user((char __user *)arg, params, sizeof(*params)))
703
return -EFAULT;
704
return retval;
705
}
706
707
static int
708
snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
709
{
710
struct snd_compr_metadata metadata;
711
int retval;
712
713
if (!stream->ops->get_metadata)
714
return -ENXIO;
715
716
if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
717
return -EFAULT;
718
719
retval = stream->ops->get_metadata(stream, &metadata);
720
if (retval != 0)
721
return retval;
722
723
if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
724
return -EFAULT;
725
726
return 0;
727
}
728
729
static int
730
snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
731
{
732
struct snd_compr_metadata metadata;
733
int retval;
734
735
if (!stream->ops->set_metadata)
736
return -ENXIO;
737
/*
738
* we should allow parameter change only when stream has been
739
* opened not in other cases
740
*/
741
if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
742
return -EFAULT;
743
744
retval = stream->ops->set_metadata(stream, &metadata);
745
stream->metadata_set = true;
746
747
return retval;
748
}
749
750
static inline int snd_compr_tstamp(struct snd_compr_stream *stream,
751
unsigned long arg, bool is_32bit)
752
{
753
struct snd_compr_tstamp64 tstamp64 = { 0 };
754
struct snd_compr_tstamp tstamp32 = { 0 };
755
const void *copy_from = &tstamp64;
756
size_t copy_size = sizeof(tstamp64);
757
int ret;
758
759
ret = snd_compr_update_tstamp(stream, &tstamp64);
760
if (ret == 0) {
761
if (is_32bit) {
762
snd_compr_tstamp32_from_64(&tstamp32, &tstamp64);
763
copy_from = &tstamp32;
764
copy_size = sizeof(tstamp32);
765
}
766
ret = copy_to_user((void __user *)arg, copy_from, copy_size) ?
767
-EFAULT :
768
0;
769
}
770
return ret;
771
}
772
773
static int snd_compr_pause(struct snd_compr_stream *stream)
774
{
775
int retval;
776
777
switch (stream->runtime->state) {
778
case SNDRV_PCM_STATE_RUNNING:
779
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
780
if (!retval)
781
stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
782
break;
783
case SNDRV_PCM_STATE_DRAINING:
784
if (!stream->device->use_pause_in_draining)
785
return -EPERM;
786
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
787
if (!retval)
788
stream->pause_in_draining = true;
789
break;
790
default:
791
return -EPERM;
792
}
793
return retval;
794
}
795
796
static int snd_compr_resume(struct snd_compr_stream *stream)
797
{
798
int retval;
799
800
switch (stream->runtime->state) {
801
case SNDRV_PCM_STATE_PAUSED:
802
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
803
if (!retval)
804
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
805
break;
806
case SNDRV_PCM_STATE_DRAINING:
807
if (!stream->pause_in_draining)
808
return -EPERM;
809
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
810
if (!retval)
811
stream->pause_in_draining = false;
812
break;
813
default:
814
return -EPERM;
815
}
816
return retval;
817
}
818
819
static int snd_compr_start(struct snd_compr_stream *stream)
820
{
821
int retval;
822
823
switch (stream->runtime->state) {
824
case SNDRV_PCM_STATE_SETUP:
825
if (stream->direction != SND_COMPRESS_CAPTURE)
826
return -EPERM;
827
break;
828
case SNDRV_PCM_STATE_PREPARED:
829
break;
830
default:
831
return -EPERM;
832
}
833
834
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
835
if (!retval)
836
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
837
return retval;
838
}
839
840
static int snd_compr_stop(struct snd_compr_stream *stream)
841
{
842
int retval;
843
844
switch (stream->runtime->state) {
845
case SNDRV_PCM_STATE_OPEN:
846
case SNDRV_PCM_STATE_SETUP:
847
case SNDRV_PCM_STATE_PREPARED:
848
return -EPERM;
849
default:
850
break;
851
}
852
853
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
854
if (!retval) {
855
/* clear flags and stop any drain wait */
856
stream->partial_drain = false;
857
stream->metadata_set = false;
858
stream->pause_in_draining = false;
859
snd_compr_drain_notify(stream);
860
stream->runtime->total_bytes_available = 0;
861
stream->runtime->total_bytes_transferred = 0;
862
}
863
return retval;
864
}
865
866
static void error_delayed_work(struct work_struct *work)
867
{
868
struct snd_compr_stream *stream;
869
870
stream = container_of(work, struct snd_compr_stream, error_work.work);
871
872
guard(mutex)(&stream->device->lock);
873
874
stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
875
wake_up(&stream->runtime->sleep);
876
}
877
878
/**
879
* snd_compr_stop_error: Report a fatal error on a stream
880
* @stream: pointer to stream
881
* @state: state to transition the stream to
882
*
883
* Stop the stream and set its state.
884
*
885
* Should be called with compressed device lock held.
886
*
887
* Return: zero if successful, or a negative error code
888
*/
889
int snd_compr_stop_error(struct snd_compr_stream *stream,
890
snd_pcm_state_t state)
891
{
892
if (stream->runtime->state == state)
893
return 0;
894
895
stream->runtime->state = state;
896
897
pr_debug("Changing state to: %d\n", state);
898
899
queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
900
901
return 0;
902
}
903
EXPORT_SYMBOL_GPL(snd_compr_stop_error);
904
905
static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
906
{
907
int ret;
908
909
/*
910
* We are called with lock held. So drop the lock while we wait for
911
* drain complete notification from the driver
912
*
913
* It is expected that driver will notify the drain completion and then
914
* stream will be moved to SETUP state, even if draining resulted in an
915
* error. We can trigger next track after this.
916
*/
917
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
918
mutex_unlock(&stream->device->lock);
919
920
/* we wait for drain to complete here, drain can return when
921
* interruption occurred, wait returned error or success.
922
* For the first two cases we don't do anything different here and
923
* return after waking up
924
*/
925
926
ret = wait_event_interruptible(stream->runtime->sleep,
927
(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
928
if (ret == -ERESTARTSYS)
929
pr_debug("wait aborted by a signal\n");
930
else if (ret)
931
pr_debug("wait for drain failed with %d\n", ret);
932
933
934
wake_up(&stream->runtime->sleep);
935
mutex_lock(&stream->device->lock);
936
937
return ret;
938
}
939
940
static int snd_compr_drain(struct snd_compr_stream *stream)
941
{
942
int retval;
943
944
switch (stream->runtime->state) {
945
case SNDRV_PCM_STATE_OPEN:
946
case SNDRV_PCM_STATE_SETUP:
947
case SNDRV_PCM_STATE_PREPARED:
948
case SNDRV_PCM_STATE_PAUSED:
949
return -EPERM;
950
case SNDRV_PCM_STATE_XRUN:
951
return -EPIPE;
952
default:
953
break;
954
}
955
956
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
957
if (retval) {
958
pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
959
wake_up(&stream->runtime->sleep);
960
return retval;
961
}
962
963
return snd_compress_wait_for_drain(stream);
964
}
965
966
static int snd_compr_next_track(struct snd_compr_stream *stream)
967
{
968
int retval;
969
970
/* only a running stream can transition to next track */
971
if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
972
return -EPERM;
973
974
/* next track doesn't have any meaning for capture streams */
975
if (stream->direction == SND_COMPRESS_CAPTURE)
976
return -EPERM;
977
978
/* you can signal next track if this is intended to be a gapless stream
979
* and current track metadata is set
980
*/
981
if (stream->metadata_set == false)
982
return -EPERM;
983
984
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
985
if (retval != 0)
986
return retval;
987
stream->metadata_set = false;
988
stream->next_track = true;
989
return 0;
990
}
991
992
static int snd_compr_partial_drain(struct snd_compr_stream *stream)
993
{
994
int retval;
995
996
switch (stream->runtime->state) {
997
case SNDRV_PCM_STATE_OPEN:
998
case SNDRV_PCM_STATE_SETUP:
999
case SNDRV_PCM_STATE_PREPARED:
1000
case SNDRV_PCM_STATE_PAUSED:
1001
return -EPERM;
1002
case SNDRV_PCM_STATE_XRUN:
1003
return -EPIPE;
1004
default:
1005
break;
1006
}
1007
1008
/* partial drain doesn't have any meaning for capture streams */
1009
if (stream->direction == SND_COMPRESS_CAPTURE)
1010
return -EPERM;
1011
1012
/* stream can be drained only when next track has been signalled */
1013
if (stream->next_track == false)
1014
return -EPERM;
1015
1016
stream->partial_drain = true;
1017
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
1018
if (retval) {
1019
pr_debug("Partial drain returned failure\n");
1020
wake_up(&stream->runtime->sleep);
1021
return retval;
1022
}
1023
1024
stream->next_track = false;
1025
return snd_compress_wait_for_drain(stream);
1026
}
1027
1028
#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1029
1030
static struct snd_compr_task_runtime *
1031
snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
1032
{
1033
struct snd_compr_task_runtime *task;
1034
1035
list_for_each_entry(task, &stream->runtime->tasks, list) {
1036
if (task->seqno == seqno)
1037
return task;
1038
}
1039
return NULL;
1040
}
1041
1042
static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1043
{
1044
if (task->output)
1045
dma_buf_put(task->output);
1046
if (task->input)
1047
dma_buf_put(task->input);
1048
kfree(task);
1049
}
1050
1051
static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1052
{
1053
u64 seqno = ++stream->runtime->task_seqno;
1054
if (seqno == 0)
1055
seqno = ++stream->runtime->task_seqno;
1056
return seqno;
1057
}
1058
1059
static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1060
{
1061
struct snd_compr_task_runtime *task;
1062
int retval, fd_i, fd_o;
1063
1064
if (stream->runtime->total_tasks >= stream->runtime->fragments)
1065
return -EBUSY;
1066
if (utask->origin_seqno != 0 || utask->input_size != 0)
1067
return -EINVAL;
1068
task = kzalloc(sizeof(*task), GFP_KERNEL);
1069
if (task == NULL)
1070
return -ENOMEM;
1071
task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1072
task->input_size = utask->input_size;
1073
retval = stream->ops->task_create(stream, task);
1074
if (retval < 0)
1075
goto cleanup;
1076
/* similar functionality as in dma_buf_fd(), but ensure that both
1077
file descriptors are allocated before fd_install() */
1078
if (!task->input || !task->input->file || !task->output || !task->output->file) {
1079
retval = -EINVAL;
1080
goto cleanup;
1081
}
1082
fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC);
1083
if (fd_i < 0)
1084
goto cleanup;
1085
fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC);
1086
if (fd_o < 0) {
1087
put_unused_fd(fd_i);
1088
goto cleanup;
1089
}
1090
/* keep dmabuf reference until freed with task free ioctl */
1091
get_dma_buf(task->input);
1092
get_dma_buf(task->output);
1093
fd_install(fd_i, task->input->file);
1094
fd_install(fd_o, task->output->file);
1095
utask->input_fd = fd_i;
1096
utask->output_fd = fd_o;
1097
list_add_tail(&task->list, &stream->runtime->tasks);
1098
stream->runtime->total_tasks++;
1099
return 0;
1100
cleanup:
1101
snd_compr_task_free(task);
1102
return retval;
1103
}
1104
1105
static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1106
{
1107
struct snd_compr_task *task __free(kfree) = NULL;
1108
int retval;
1109
1110
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1111
return -EPERM;
1112
task = memdup_user((void __user *)arg, sizeof(*task));
1113
if (IS_ERR(task))
1114
return PTR_ERR(task);
1115
retval = snd_compr_task_new(stream, task);
1116
if (retval >= 0)
1117
if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1118
retval = -EFAULT;
1119
return retval;
1120
}
1121
1122
static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1123
struct snd_compr_task *utask)
1124
{
1125
if (task == NULL)
1126
return -EINVAL;
1127
if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1128
return -EBUSY;
1129
if (utask->input_size > task->input->size)
1130
return -EINVAL;
1131
task->flags = utask->flags;
1132
task->input_size = utask->input_size;
1133
task->state = SND_COMPRESS_TASK_STATE_IDLE;
1134
return 0;
1135
}
1136
1137
static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1138
{
1139
struct snd_compr_task_runtime *task;
1140
int retval;
1141
1142
if (utask->origin_seqno > 0) {
1143
task = snd_compr_find_task(stream, utask->origin_seqno);
1144
retval = snd_compr_task_start_prepare(task, utask);
1145
if (retval < 0)
1146
return retval;
1147
task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1148
utask->origin_seqno = 0;
1149
list_move_tail(&task->list, &stream->runtime->tasks);
1150
} else {
1151
task = snd_compr_find_task(stream, utask->seqno);
1152
if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1153
return -EBUSY;
1154
retval = snd_compr_task_start_prepare(task, utask);
1155
if (retval < 0)
1156
return retval;
1157
}
1158
retval = stream->ops->task_start(stream, task);
1159
if (retval >= 0) {
1160
task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1161
stream->runtime->active_tasks++;
1162
}
1163
return retval;
1164
}
1165
1166
static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1167
{
1168
struct snd_compr_task *task __free(kfree) = NULL;
1169
int retval;
1170
1171
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1172
return -EPERM;
1173
task = memdup_user((void __user *)arg, sizeof(*task));
1174
if (IS_ERR(task))
1175
return PTR_ERR(task);
1176
retval = snd_compr_task_start(stream, task);
1177
if (retval >= 0)
1178
if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1179
retval = -EFAULT;
1180
return retval;
1181
}
1182
1183
static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1184
struct snd_compr_task_runtime *task)
1185
{
1186
if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1187
return;
1188
stream->ops->task_stop(stream, task);
1189
if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1190
stream->runtime->active_tasks--;
1191
list_move_tail(&task->list, &stream->runtime->tasks);
1192
task->state = SND_COMPRESS_TASK_STATE_IDLE;
1193
}
1194
1195
static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1196
struct snd_compr_task_runtime *task)
1197
{
1198
snd_compr_task_stop_one(stream, task);
1199
stream->ops->task_free(stream, task);
1200
list_del(&task->list);
1201
snd_compr_task_free(task);
1202
stream->runtime->total_tasks--;
1203
}
1204
1205
static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1206
{
1207
struct snd_compr_task_runtime *task, *temp;
1208
1209
list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1210
snd_compr_task_free_one(stream, task);
1211
}
1212
1213
typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1214
struct snd_compr_task_runtime *task);
1215
1216
static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1217
snd_compr_seq_func_t fcn)
1218
{
1219
struct snd_compr_task_runtime *task, *temp;
1220
__u64 seqno;
1221
int retval;
1222
1223
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1224
return -EPERM;
1225
retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno));
1226
if (retval)
1227
return -EFAULT;
1228
retval = 0;
1229
if (seqno == 0) {
1230
list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1231
fcn(stream, task);
1232
} else {
1233
task = snd_compr_find_task(stream, seqno);
1234
if (task == NULL) {
1235
retval = -EINVAL;
1236
} else {
1237
fcn(stream, task);
1238
}
1239
}
1240
return retval;
1241
}
1242
1243
static int snd_compr_task_status(struct snd_compr_stream *stream,
1244
struct snd_compr_task_status *status)
1245
{
1246
struct snd_compr_task_runtime *task;
1247
1248
task = snd_compr_find_task(stream, status->seqno);
1249
if (task == NULL)
1250
return -EINVAL;
1251
status->input_size = task->input_size;
1252
status->output_size = task->output_size;
1253
status->state = task->state;
1254
return 0;
1255
}
1256
1257
static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1258
{
1259
struct snd_compr_task_status *status __free(kfree) = NULL;
1260
int retval;
1261
1262
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1263
return -EPERM;
1264
status = memdup_user((void __user *)arg, sizeof(*status));
1265
if (IS_ERR(status))
1266
return PTR_ERR(status);
1267
retval = snd_compr_task_status(stream, status);
1268
if (retval >= 0)
1269
if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1270
retval = -EFAULT;
1271
return retval;
1272
}
1273
1274
/**
1275
* snd_compr_task_finished: Notify that the task was finished
1276
* @stream: pointer to stream
1277
* @task: runtime task structure
1278
*
1279
* Set the finished task state and notify waiters.
1280
*/
1281
void snd_compr_task_finished(struct snd_compr_stream *stream,
1282
struct snd_compr_task_runtime *task)
1283
{
1284
guard(mutex)(&stream->device->lock);
1285
if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1286
stream->runtime->active_tasks--;
1287
task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1288
wake_up(&stream->runtime->sleep);
1289
}
1290
EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1291
1292
MODULE_IMPORT_NS("DMA_BUF");
1293
#endif /* CONFIG_SND_COMPRESS_ACCEL */
1294
1295
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1296
{
1297
struct snd_compr_file *data = f->private_data;
1298
struct snd_compr_stream *stream;
1299
1300
if (snd_BUG_ON(!data))
1301
return -EFAULT;
1302
1303
stream = &data->stream;
1304
1305
guard(mutex)(&stream->device->lock);
1306
switch (cmd) {
1307
case SNDRV_COMPRESS_IOCTL_VERSION:
1308
return put_user(SNDRV_COMPRESS_VERSION,
1309
(int __user *)arg) ? -EFAULT : 0;
1310
case SNDRV_COMPRESS_GET_CAPS:
1311
return snd_compr_get_caps(stream, arg);
1312
#ifndef COMPR_CODEC_CAPS_OVERFLOW
1313
case SNDRV_COMPRESS_GET_CODEC_CAPS:
1314
return snd_compr_get_codec_caps(stream, arg);
1315
#endif
1316
case SNDRV_COMPRESS_SET_PARAMS:
1317
return snd_compr_set_params(stream, arg);
1318
case SNDRV_COMPRESS_GET_PARAMS:
1319
return snd_compr_get_params(stream, arg);
1320
case SNDRV_COMPRESS_SET_METADATA:
1321
return snd_compr_set_metadata(stream, arg);
1322
case SNDRV_COMPRESS_GET_METADATA:
1323
return snd_compr_get_metadata(stream, arg);
1324
}
1325
1326
if (stream->direction == SND_COMPRESS_ACCEL) {
1327
#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1328
switch (cmd) {
1329
case SNDRV_COMPRESS_TASK_CREATE:
1330
return snd_compr_task_create(stream, arg);
1331
case SNDRV_COMPRESS_TASK_FREE:
1332
return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1333
case SNDRV_COMPRESS_TASK_START:
1334
return snd_compr_task_start_ioctl(stream, arg);
1335
case SNDRV_COMPRESS_TASK_STOP:
1336
return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1337
case SNDRV_COMPRESS_TASK_STATUS:
1338
return snd_compr_task_status_ioctl(stream, arg);
1339
}
1340
#endif
1341
return -ENOTTY;
1342
}
1343
1344
switch (cmd) {
1345
case SNDRV_COMPRESS_TSTAMP:
1346
return snd_compr_tstamp(stream, arg, true);
1347
case SNDRV_COMPRESS_TSTAMP64:
1348
return snd_compr_tstamp(stream, arg, false);
1349
case SNDRV_COMPRESS_AVAIL:
1350
return snd_compr_ioctl_avail(stream, arg, true);
1351
case SNDRV_COMPRESS_AVAIL64:
1352
return snd_compr_ioctl_avail(stream, arg, false);
1353
case SNDRV_COMPRESS_PAUSE:
1354
return snd_compr_pause(stream);
1355
case SNDRV_COMPRESS_RESUME:
1356
return snd_compr_resume(stream);
1357
case SNDRV_COMPRESS_START:
1358
return snd_compr_start(stream);
1359
case SNDRV_COMPRESS_STOP:
1360
return snd_compr_stop(stream);
1361
case SNDRV_COMPRESS_DRAIN:
1362
return snd_compr_drain(stream);
1363
case SNDRV_COMPRESS_PARTIAL_DRAIN:
1364
return snd_compr_partial_drain(stream);
1365
case SNDRV_COMPRESS_NEXT_TRACK:
1366
return snd_compr_next_track(stream);
1367
}
1368
1369
return -ENOTTY;
1370
}
1371
1372
/* support of 32bit userspace on 64bit platforms */
1373
#ifdef CONFIG_COMPAT
1374
static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1375
unsigned long arg)
1376
{
1377
return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1378
}
1379
#endif
1380
1381
static const struct file_operations snd_compr_file_ops = {
1382
.owner = THIS_MODULE,
1383
.open = snd_compr_open,
1384
.release = snd_compr_free,
1385
.write = snd_compr_write,
1386
.read = snd_compr_read,
1387
.unlocked_ioctl = snd_compr_ioctl,
1388
#ifdef CONFIG_COMPAT
1389
.compat_ioctl = snd_compr_ioctl_compat,
1390
#endif
1391
.mmap = snd_compr_mmap,
1392
.poll = snd_compr_poll,
1393
};
1394
1395
static int snd_compress_dev_register(struct snd_device *device)
1396
{
1397
int ret;
1398
struct snd_compr *compr;
1399
1400
if (snd_BUG_ON(!device || !device->device_data))
1401
return -EBADFD;
1402
compr = device->device_data;
1403
1404
pr_debug("reg device %s, direction %d\n", compr->name,
1405
compr->direction);
1406
/* register compressed device */
1407
ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1408
compr->card, compr->device,
1409
&snd_compr_file_ops, compr, compr->dev);
1410
if (ret < 0) {
1411
pr_err("snd_register_device failed %d\n", ret);
1412
return ret;
1413
}
1414
return ret;
1415
1416
}
1417
1418
static int snd_compress_dev_disconnect(struct snd_device *device)
1419
{
1420
struct snd_compr *compr;
1421
1422
compr = device->device_data;
1423
snd_unregister_device(compr->dev);
1424
return 0;
1425
}
1426
1427
#ifdef CONFIG_SND_VERBOSE_PROCFS
1428
static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1429
struct snd_info_buffer *buffer)
1430
{
1431
struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1432
1433
snd_iprintf(buffer, "card: %d\n", compr->card->number);
1434
snd_iprintf(buffer, "device: %d\n", compr->device);
1435
snd_iprintf(buffer, "stream: %s\n",
1436
compr->direction == SND_COMPRESS_PLAYBACK
1437
? "PLAYBACK" : "CAPTURE");
1438
snd_iprintf(buffer, "id: %s\n", compr->id);
1439
}
1440
1441
static int snd_compress_proc_init(struct snd_compr *compr)
1442
{
1443
struct snd_info_entry *entry;
1444
char name[16];
1445
1446
sprintf(name, "compr%i", compr->device);
1447
entry = snd_info_create_card_entry(compr->card, name,
1448
compr->card->proc_root);
1449
if (!entry)
1450
return -ENOMEM;
1451
entry->mode = S_IFDIR | 0555;
1452
compr->proc_root = entry;
1453
1454
entry = snd_info_create_card_entry(compr->card, "info",
1455
compr->proc_root);
1456
if (entry)
1457
snd_info_set_text_ops(entry, compr,
1458
snd_compress_proc_info_read);
1459
compr->proc_info_entry = entry;
1460
1461
return 0;
1462
}
1463
1464
static void snd_compress_proc_done(struct snd_compr *compr)
1465
{
1466
snd_info_free_entry(compr->proc_info_entry);
1467
compr->proc_info_entry = NULL;
1468
snd_info_free_entry(compr->proc_root);
1469
compr->proc_root = NULL;
1470
}
1471
1472
static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1473
{
1474
strscpy(compr->id, id, sizeof(compr->id));
1475
}
1476
#else
1477
static inline int snd_compress_proc_init(struct snd_compr *compr)
1478
{
1479
return 0;
1480
}
1481
1482
static inline void snd_compress_proc_done(struct snd_compr *compr)
1483
{
1484
}
1485
1486
static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1487
{
1488
}
1489
#endif
1490
1491
static int snd_compress_dev_free(struct snd_device *device)
1492
{
1493
struct snd_compr *compr;
1494
1495
compr = device->device_data;
1496
snd_compress_proc_done(compr);
1497
put_device(compr->dev);
1498
return 0;
1499
}
1500
1501
/**
1502
* snd_compress_new: create new compress device
1503
* @card: sound card pointer
1504
* @device: device number
1505
* @dirn: device direction, should be of type enum snd_compr_direction
1506
* @id: ID string
1507
* @compr: compress device pointer
1508
*
1509
* Return: zero if successful, or a negative error code
1510
*/
1511
int snd_compress_new(struct snd_card *card, int device,
1512
int dirn, const char *id, struct snd_compr *compr)
1513
{
1514
static const struct snd_device_ops ops = {
1515
.dev_free = snd_compress_dev_free,
1516
.dev_register = snd_compress_dev_register,
1517
.dev_disconnect = snd_compress_dev_disconnect,
1518
};
1519
int ret;
1520
1521
#if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1522
if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1523
return -EINVAL;
1524
#endif
1525
1526
compr->card = card;
1527
compr->device = device;
1528
compr->direction = dirn;
1529
mutex_init(&compr->lock);
1530
1531
snd_compress_set_id(compr, id);
1532
1533
ret = snd_device_alloc(&compr->dev, card);
1534
if (ret)
1535
return ret;
1536
dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1537
1538
ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1539
if (ret == 0)
1540
snd_compress_proc_init(compr);
1541
else
1542
put_device(compr->dev);
1543
1544
return ret;
1545
}
1546
EXPORT_SYMBOL_GPL(snd_compress_new);
1547
1548
MODULE_DESCRIPTION("ALSA Compressed offload framework");
1549
MODULE_AUTHOR("Vinod Koul <[email protected]>");
1550
MODULE_LICENSE("GPL v2");
1551
1552