Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/java.desktop/macosx/native/libjsound/PLATFORM_API_MacOSX_PCM.cpp
41149 views
1
/*
2
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation. Oracle designates this
8
* particular file as subject to the "Classpath" exception as provided
9
* by Oracle in the LICENSE file that accompanied this code.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*/
25
26
//#define USE_ERROR
27
//#define USE_TRACE
28
//#define USE_VERBOSE_TRACE
29
30
#include <AudioUnit/AudioUnit.h>
31
#include <AudioToolbox/AudioConverter.h>
32
#include <pthread.h>
33
#include <math.h>
34
/*
35
#if !defined(__COREAUDIO_USE_FLAT_INCLUDES__)
36
#include <CoreAudio/CoreAudioTypes.h>
37
#else
38
#include <CoreAudioTypes.h>
39
#endif
40
*/
41
42
#include "PLATFORM_API_MacOSX_Utils.h"
43
44
extern "C" {
45
#include "Utilities.h"
46
#include "DirectAudio.h"
47
}
48
49
#if USE_DAUDIO == TRUE
50
51
52
#ifdef USE_TRACE
53
static void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) {
54
TRACE4("ID='%c%c%c%c'", (char)(inDesc->mFormatID >> 24), (char)(inDesc->mFormatID >> 16), (char)(inDesc->mFormatID >> 8), (char)(inDesc->mFormatID));
55
TRACE2(", %f Hz, flags=0x%lX", (float)inDesc->mSampleRate, (long unsigned)inDesc->mFormatFlags);
56
TRACE2(", %ld channels, %ld bits", (long)inDesc->mChannelsPerFrame, (long)inDesc->mBitsPerChannel);
57
TRACE1(", %ld bytes per frame\n", (long)inDesc->mBytesPerFrame);
58
}
59
#else
60
static inline void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) { }
61
#endif
62
63
64
#define MAX(x, y) ((x) >= (y) ? (x) : (y))
65
#define MIN(x, y) ((x) <= (y) ? (x) : (y))
66
67
68
// =======================================
69
// MixerProvider functions implementation
70
71
static DeviceList deviceCache;
72
73
INT32 DAUDIO_GetDirectAudioDeviceCount() {
74
deviceCache.Refresh();
75
int count = deviceCache.GetCount();
76
if (count > 0) {
77
// add "default" device
78
count++;
79
TRACE1("DAUDIO_GetDirectAudioDeviceCount: returns %d devices\n", count);
80
} else {
81
TRACE0("DAUDIO_GetDirectAudioDeviceCount: no devices found\n");
82
}
83
return count;
84
}
85
86
INT32 DAUDIO_GetDirectAudioDeviceDescription(INT32 mixerIndex, DirectAudioDeviceDescription *desc) {
87
bool result = true;
88
desc->deviceID = 0;
89
if (mixerIndex == 0) {
90
// default device
91
strncpy(desc->name, "Default Audio Device", DAUDIO_STRING_LENGTH);
92
strncpy(desc->description, "Default Audio Device", DAUDIO_STRING_LENGTH);
93
desc->maxSimulLines = -1;
94
} else {
95
AudioDeviceID deviceID;
96
result = deviceCache.GetDeviceInfo(mixerIndex-1, &deviceID, DAUDIO_STRING_LENGTH,
97
desc->name, desc->vendor, desc->description, desc->version);
98
if (result) {
99
desc->deviceID = (INT32)deviceID;
100
desc->maxSimulLines = -1;
101
}
102
}
103
return result ? TRUE : FALSE;
104
}
105
106
107
void DAUDIO_GetFormats(INT32 mixerIndex, INT32 deviceID, int isSource, void* creator) {
108
TRACE3(">>DAUDIO_GetFormats mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (int)deviceID, isSource);
109
110
AudioDeviceID audioDeviceID = deviceID == 0 ? GetDefaultDevice(isSource) : (AudioDeviceID)deviceID;
111
112
if (audioDeviceID == 0) {
113
return;
114
}
115
116
int totalChannels = GetChannelCount(audioDeviceID, isSource);
117
118
if (totalChannels == 0) {
119
TRACE0("<<DAUDIO_GetFormats, no streams!\n");
120
return;
121
}
122
123
if (isSource && totalChannels < 2) {
124
// report 2 channels even if only mono is supported
125
totalChannels = 2;
126
}
127
128
int channels[] = {1, 2, totalChannels};
129
int channelsCount = MIN(totalChannels, 3);
130
131
float hardwareSampleRate = GetSampleRate(audioDeviceID, isSource);
132
TRACE2(" DAUDIO_GetFormats: got %d channels, sampleRate == %f\n", totalChannels, hardwareSampleRate);
133
134
// any sample rates are supported
135
float sampleRate = -1;
136
137
static int sampleBits[] = {8, 16, 24};
138
static int sampleBitsCount = sizeof(sampleBits)/sizeof(sampleBits[0]);
139
140
// the last audio format is the default one (used by DataLine.open() if format is not specified)
141
// consider as default 16bit PCM stereo (mono is stereo is not supported) with the current sample rate
142
int defBits = 16;
143
int defChannels = MIN(2, channelsCount);
144
float defSampleRate = hardwareSampleRate;
145
// don't add default format is sample rate is not specified
146
bool addDefault = defSampleRate > 0;
147
148
// TODO: CoreAudio can handle signed/unsigned, little-endian/big-endian
149
// TODO: register the formats (to prevent DirectAudio software conversion) - need to fix DirectAudioDevice.createDataLineInfo
150
// to avoid software conversions if both signed/unsigned or big-/little-endian are supported
151
for (int channelIndex = 0; channelIndex < channelsCount; channelIndex++) {
152
for (int bitIndex = 0; bitIndex < sampleBitsCount; bitIndex++) {
153
int bits = sampleBits[bitIndex];
154
if (addDefault && bits == defBits && channels[channelIndex] != defChannels && sampleRate == defSampleRate) {
155
// the format is the default one, don't add it now
156
continue;
157
}
158
DAUDIO_AddAudioFormat(creator,
159
bits, // sample size in bits
160
-1, // frame size (auto)
161
channels[channelIndex], // channels
162
sampleRate, // sample rate
163
DAUDIO_PCM, // only accept PCM
164
bits == 8 ? FALSE : TRUE, // signed
165
bits == 8 ? FALSE // little-endian for 8bit
166
: UTIL_IsBigEndianPlatform());
167
}
168
}
169
// add default format
170
if (addDefault) {
171
DAUDIO_AddAudioFormat(creator,
172
defBits, // 16 bits
173
-1, // automatically calculate frame size
174
defChannels, // channels
175
defSampleRate, // sample rate
176
DAUDIO_PCM, // PCM
177
TRUE, // signed
178
UTIL_IsBigEndianPlatform()); // native endianess
179
}
180
181
TRACE0("<<DAUDIO_GetFormats\n");
182
}
183
184
185
// =======================================
186
// Source/Target DataLine functions implementation
187
188
// ====
189
/* 1writer-1reader ring buffer class with flush() support */
190
class RingBuffer {
191
public:
192
RingBuffer() : pBuffer(NULL), nBufferSize(0) {
193
pthread_mutex_init(&lockMutex, NULL);
194
}
195
~RingBuffer() {
196
Deallocate();
197
pthread_mutex_destroy(&lockMutex);
198
}
199
200
// extraBytes: number of additionally allocated bytes to prevent data
201
// overlapping when almost whole buffer is filled
202
// (required only if Write() can override the buffer)
203
bool Allocate(int requestedBufferSize, int extraBytes) {
204
int fullBufferSize = requestedBufferSize + extraBytes;
205
long powerOfTwo = 1;
206
while (powerOfTwo < fullBufferSize) {
207
powerOfTwo <<= 1;
208
}
209
if (powerOfTwo > INT_MAX || fullBufferSize < 0) {
210
ERROR0("RingBuffer::Allocate: REQUESTED MEMORY SIZE IS TOO BIG\n");
211
return false;
212
}
213
pBuffer = (Byte*)malloc(powerOfTwo);
214
if (pBuffer == NULL) {
215
ERROR0("RingBuffer::Allocate: OUT OF MEMORY\n");
216
return false;
217
}
218
219
nBufferSize = requestedBufferSize;
220
nAllocatedBytes = powerOfTwo;
221
nPosMask = powerOfTwo - 1;
222
nWritePos = 0;
223
nReadPos = 0;
224
nFlushPos = -1;
225
226
TRACE2("RingBuffer::Allocate: OK, bufferSize=%d, allocated:%d\n", nBufferSize, nAllocatedBytes);
227
return true;
228
}
229
230
void Deallocate() {
231
if (pBuffer) {
232
free(pBuffer);
233
pBuffer = NULL;
234
nBufferSize = 0;
235
}
236
}
237
238
inline int GetBufferSize() {
239
return nBufferSize;
240
}
241
242
inline int GetAllocatedSize() {
243
return nAllocatedBytes;
244
}
245
246
// gets number of bytes available for reading
247
int GetValidByteCount() {
248
lock();
249
INT64 result = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
250
unlock();
251
return result > (INT64)nBufferSize ? nBufferSize : (int)result;
252
}
253
254
int Write(void *srcBuffer, int len, bool preventOverflow) {
255
lock();
256
TRACE2("RingBuffer::Write (%d bytes, preventOverflow=%d)\n", len, preventOverflow ? 1 : 0);
257
TRACE2(" writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
258
TRACE2(" readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
259
TRACE2(" flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
260
261
INT64 writePos = nWritePos;
262
if (preventOverflow) {
263
INT64 avail_read = writePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
264
if (avail_read >= (INT64)nBufferSize) {
265
// no space
266
TRACE0(" preventOverlow: OVERFLOW => len = 0;\n");
267
len = 0;
268
} else {
269
int avail_write = nBufferSize - (int)avail_read;
270
if (len > avail_write) {
271
TRACE2(" preventOverlow: desrease len: %d => %d\n", len, avail_write);
272
len = avail_write;
273
}
274
}
275
}
276
unlock();
277
278
if (len > 0) {
279
280
write((Byte *)srcBuffer, Pos2Offset(writePos), len);
281
282
lock();
283
TRACE4("--RingBuffer::Write writePos: %lld (%d) => %lld, (%d)\n",
284
(long long)nWritePos, Pos2Offset(nWritePos), (long long)nWritePos + len, Pos2Offset(nWritePos + len));
285
nWritePos += len;
286
unlock();
287
}
288
return len;
289
}
290
291
int Read(void *dstBuffer, int len) {
292
lock();
293
TRACE1("RingBuffer::Read (%d bytes)\n", len);
294
TRACE2(" writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
295
TRACE2(" readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
296
TRACE2(" flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
297
298
applyFlush();
299
INT64 avail_read = nWritePos - nReadPos;
300
// check for overflow
301
if (avail_read > (INT64)nBufferSize) {
302
nReadPos = nWritePos - nBufferSize;
303
avail_read = nBufferSize;
304
TRACE0(" OVERFLOW\n");
305
}
306
INT64 readPos = nReadPos;
307
unlock();
308
309
if (len > (int)avail_read) {
310
TRACE2(" RingBuffer::Read - don't have enough data, len: %d => %d\n", len, (int)avail_read);
311
len = (int)avail_read;
312
}
313
314
if (len > 0) {
315
316
read((Byte *)dstBuffer, Pos2Offset(readPos), len);
317
318
lock();
319
if (applyFlush()) {
320
// just got flush(), results became obsolete
321
TRACE0("--RingBuffer::Read, got Flush, return 0\n");
322
len = 0;
323
} else {
324
TRACE4("--RingBuffer::Read readPos: %lld (%d) => %lld (%d)\n",
325
(long long)nReadPos, Pos2Offset(nReadPos), (long long)nReadPos + len, Pos2Offset(nReadPos + len));
326
nReadPos += len;
327
}
328
unlock();
329
} else {
330
// underrun!
331
}
332
return len;
333
}
334
335
// returns number of the flushed bytes
336
int Flush() {
337
lock();
338
INT64 flushedBytes = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
339
nFlushPos = nWritePos;
340
unlock();
341
return flushedBytes > (INT64)nBufferSize ? nBufferSize : (int)flushedBytes;
342
}
343
344
private:
345
Byte *pBuffer;
346
int nBufferSize;
347
int nAllocatedBytes;
348
INT64 nPosMask;
349
350
pthread_mutex_t lockMutex;
351
352
volatile INT64 nWritePos;
353
volatile INT64 nReadPos;
354
// Flush() sets nFlushPos value to nWritePos;
355
// next Read() sets nReadPos to nFlushPos and resests nFlushPos to -1
356
volatile INT64 nFlushPos;
357
358
inline void lock() {
359
pthread_mutex_lock(&lockMutex);
360
}
361
inline void unlock() {
362
pthread_mutex_unlock(&lockMutex);
363
}
364
365
inline bool applyFlush() {
366
if (nFlushPos >= 0) {
367
nReadPos = nFlushPos;
368
nFlushPos = -1;
369
return true;
370
}
371
return false;
372
}
373
374
inline int Pos2Offset(INT64 pos) {
375
return (int)(pos & nPosMask);
376
}
377
378
void write(Byte *srcBuffer, int dstOffset, int len) {
379
int dstEndOffset = dstOffset + len;
380
381
int lenAfterWrap = dstEndOffset - nAllocatedBytes;
382
if (lenAfterWrap > 0) {
383
// dest.buffer does wrap
384
len = nAllocatedBytes - dstOffset;
385
memcpy(pBuffer+dstOffset, srcBuffer, len);
386
memcpy(pBuffer, srcBuffer+len, lenAfterWrap);
387
} else {
388
// dest.buffer does not wrap
389
memcpy(pBuffer+dstOffset, srcBuffer, len);
390
}
391
}
392
393
void read(Byte *dstBuffer, int srcOffset, int len) {
394
int srcEndOffset = srcOffset + len;
395
396
int lenAfterWrap = srcEndOffset - nAllocatedBytes;
397
if (lenAfterWrap > 0) {
398
// need to unwrap data
399
len = nAllocatedBytes - srcOffset;
400
memcpy(dstBuffer, pBuffer+srcOffset, len);
401
memcpy(dstBuffer+len, pBuffer, lenAfterWrap);
402
} else {
403
// source buffer is not wrapped
404
memcpy(dstBuffer, pBuffer+srcOffset, len);
405
}
406
}
407
};
408
409
410
class Resampler {
411
private:
412
enum {
413
kResamplerEndOfInputData = 1 // error to interrupt conversion (end of input data)
414
};
415
public:
416
Resampler() : converter(NULL), outBuffer(NULL) { }
417
~Resampler() {
418
if (converter != NULL) {
419
AudioConverterDispose(converter);
420
}
421
if (outBuffer != NULL) {
422
free(outBuffer);
423
}
424
}
425
426
// inFormat & outFormat must be interleaved!
427
bool Init(const AudioStreamBasicDescription *inFormat, const AudioStreamBasicDescription *outFormat,
428
int inputBufferSizeInBytes)
429
{
430
TRACE0(">>Resampler::Init\n");
431
TRACE0(" inFormat: ");
432
PrintStreamDesc(inFormat);
433
TRACE0(" outFormat: ");
434
PrintStreamDesc(outFormat);
435
TRACE1(" inputBufferSize: %d bytes\n", inputBufferSizeInBytes);
436
OSStatus err;
437
438
if ((outFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && outFormat->mChannelsPerFrame != 1) {
439
ERROR0("Resampler::Init ERROR: outFormat is non-interleaved\n");
440
return false;
441
}
442
if ((inFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && inFormat->mChannelsPerFrame != 1) {
443
ERROR0("Resampler::Init ERROR: inFormat is non-interleaved\n");
444
return false;
445
}
446
447
memcpy(&asbdIn, inFormat, sizeof(AudioStreamBasicDescription));
448
memcpy(&asbdOut, outFormat, sizeof(AudioStreamBasicDescription));
449
450
err = AudioConverterNew(inFormat, outFormat, &converter);
451
452
if (err || converter == NULL) {
453
OS_ERROR1(err, "Resampler::Init (AudioConverterNew), converter=%p", converter);
454
return false;
455
}
456
457
// allocate buffer for output data
458
int maximumInFrames = inputBufferSizeInBytes / inFormat->mBytesPerFrame;
459
// take into account trailingFrames
460
AudioConverterPrimeInfo primeInfo = {0, 0};
461
UInt32 sizePrime = sizeof(primeInfo);
462
err = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &sizePrime, &primeInfo);
463
if (err) {
464
OS_ERROR0(err, "Resampler::Init (get kAudioConverterPrimeInfo)");
465
// ignore the error
466
} else {
467
// the default primeMethod is kConverterPrimeMethod_Normal, so we need only trailingFrames
468
maximumInFrames += primeInfo.trailingFrames;
469
}
470
float outBufferSizeInFrames = (outFormat->mSampleRate / inFormat->mSampleRate) * ((float)maximumInFrames);
471
// to avoid complex calculation just set outBufferSize as double of the calculated value
472
outBufferSize = (int)outBufferSizeInFrames * outFormat->mBytesPerFrame * 2;
473
// safety check - consider 256 frame as the minimum input buffer
474
int minOutSize = 256 * outFormat->mBytesPerFrame;
475
if (outBufferSize < minOutSize) {
476
outBufferSize = minOutSize;
477
}
478
479
outBuffer = malloc(outBufferSize);
480
481
if (outBuffer == NULL) {
482
ERROR1("Resampler::Init ERROR: malloc failed (%d bytes)\n", outBufferSize);
483
AudioConverterDispose(converter);
484
converter = NULL;
485
return false;
486
}
487
488
TRACE1(" allocated: %d bytes for output buffer\n", outBufferSize);
489
490
TRACE0("<<Resampler::Init: OK\n");
491
return true;
492
}
493
494
// returns size of the internal output buffer
495
int GetOutBufferSize() {
496
return outBufferSize;
497
}
498
499
// process next part of data (writes resampled data to the ringBuffer without overflow check)
500
int Process(void *srcBuffer, int len, RingBuffer *ringBuffer) {
501
int bytesWritten = 0;
502
TRACE2(">>Resampler::Process: %d bytes, converter = %p\n", len, converter);
503
if (converter == NULL) { // sanity check
504
bytesWritten = ringBuffer->Write(srcBuffer, len, false);
505
} else {
506
InputProcData data;
507
data.pThis = this;
508
data.data = (Byte *)srcBuffer;
509
data.dataSize = len;
510
511
OSStatus err;
512
do {
513
AudioBufferList abl; // by default it contains 1 AudioBuffer
514
abl.mNumberBuffers = 1;
515
abl.mBuffers[0].mNumberChannels = asbdOut.mChannelsPerFrame;
516
abl.mBuffers[0].mDataByteSize = outBufferSize;
517
abl.mBuffers[0].mData = outBuffer;
518
519
UInt32 packets = (UInt32)outBufferSize / asbdOut.mBytesPerPacket;
520
521
TRACE2(">>AudioConverterFillComplexBuffer: request %d packets, provide %d bytes buffer\n",
522
(int)packets, (int)abl.mBuffers[0].mDataByteSize);
523
524
err = AudioConverterFillComplexBuffer(converter, ConverterInputProc, &data, &packets, &abl, NULL);
525
526
TRACE2("<<AudioConverterFillComplexBuffer: got %d packets (%d bytes)\n",
527
(int)packets, (int)abl.mBuffers[0].mDataByteSize);
528
if (packets > 0) {
529
int bytesToWrite = (int)(packets * asbdOut.mBytesPerPacket);
530
bytesWritten += ringBuffer->Write(abl.mBuffers[0].mData, bytesToWrite, false);
531
}
532
533
// if outputBuffer is small to store all available frames,
534
// we get noErr here. In the case just continue the conversion
535
} while (err == noErr);
536
537
if (err != kResamplerEndOfInputData) {
538
// unexpected error
539
OS_ERROR0(err, "Resampler::Process (AudioConverterFillComplexBuffer)");
540
}
541
}
542
TRACE2("<<Resampler::Process: written %d bytes (converted from %d bytes)\n", bytesWritten, len);
543
544
return bytesWritten;
545
}
546
547
// resets internal bufferes
548
void Discontinue() {
549
TRACE0(">>Resampler::Discontinue\n");
550
if (converter != NULL) {
551
AudioConverterReset(converter);
552
}
553
TRACE0("<<Resampler::Discontinue\n");
554
}
555
556
private:
557
AudioConverterRef converter;
558
559
// buffer for output data
560
// note that there is no problem if the buffer is not big enough to store
561
// all converted data - it's only performance issue
562
void *outBuffer;
563
int outBufferSize;
564
565
AudioStreamBasicDescription asbdIn;
566
AudioStreamBasicDescription asbdOut;
567
568
struct InputProcData {
569
Resampler *pThis;
570
Byte *data; // data == NULL means we handle Discontinue(false)
571
int dataSize; // == 0 if all data was already provided to the converted of we handle Discontinue(false)
572
};
573
574
static OSStatus ConverterInputProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets,
575
AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
576
{
577
InputProcData *data = (InputProcData *)inUserData;
578
579
TRACE3(" >>ConverterInputProc: requested %d packets, data contains %d bytes (%d packets)\n",
580
(int)*ioNumberDataPackets, (int)data->dataSize, (int)(data->dataSize / data->pThis->asbdIn.mBytesPerPacket));
581
if (data->dataSize == 0) {
582
// already called & provided all input data
583
// interrupt conversion by returning error
584
*ioNumberDataPackets = 0;
585
TRACE0(" <<ConverterInputProc: returns kResamplerEndOfInputData\n");
586
return kResamplerEndOfInputData;
587
}
588
589
ioData->mNumberBuffers = 1;
590
ioData->mBuffers[0].mNumberChannels = data->pThis->asbdIn.mChannelsPerFrame;
591
ioData->mBuffers[0].mDataByteSize = data->dataSize;
592
ioData->mBuffers[0].mData = data->data;
593
594
*ioNumberDataPackets = data->dataSize / data->pThis->asbdIn.mBytesPerPacket;
595
596
// all data has been provided to the converter
597
data->dataSize = 0;
598
599
TRACE1(" <<ConverterInputProc: returns %d packets\n", (int)(*ioNumberDataPackets));
600
return noErr;
601
}
602
603
};
604
605
606
struct OSX_DirectAudioDevice {
607
AudioUnit audioUnit;
608
RingBuffer ringBuffer;
609
AudioStreamBasicDescription asbd;
610
611
// only for target lines
612
UInt32 inputBufferSizeInBytes;
613
Resampler *resampler;
614
// to detect discontinuity (to reset resampler)
615
SInt64 lastWrittenSampleTime;
616
617
618
OSX_DirectAudioDevice() : audioUnit(NULL), asbd(), resampler(NULL), lastWrittenSampleTime(0) {
619
}
620
621
~OSX_DirectAudioDevice() {
622
if (audioUnit) {
623
AudioComponentInstanceDispose(audioUnit);
624
}
625
if (resampler) {
626
delete resampler;
627
}
628
}
629
};
630
631
static AudioUnit CreateOutputUnit(AudioDeviceID deviceID, int isSource)
632
{
633
OSStatus err;
634
AudioUnit unit;
635
636
AudioComponentDescription desc;
637
desc.componentType = kAudioUnitType_Output;
638
desc.componentSubType = (deviceID == 0 && isSource) ? kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
639
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
640
desc.componentFlags = 0;
641
desc.componentFlagsMask = 0;
642
643
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
644
err = AudioComponentInstanceNew(comp, &unit);
645
646
if (err) {
647
OS_ERROR0(err, "CreateOutputUnit:OpenAComponent");
648
return NULL;
649
}
650
651
if (!isSource) {
652
int enableIO = 0;
653
err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
654
0, &enableIO, sizeof(enableIO));
655
if (err) {
656
OS_ERROR0(err, "SetProperty (output EnableIO)");
657
}
658
enableIO = 1;
659
err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
660
1, &enableIO, sizeof(enableIO));
661
if (err) {
662
OS_ERROR0(err, "SetProperty (input EnableIO)");
663
}
664
665
if (!deviceID) {
666
// get real AudioDeviceID for default input device (macosx current input device)
667
deviceID = GetDefaultDevice(isSource);
668
if (!deviceID) {
669
AudioComponentInstanceDispose(unit);
670
return NULL;
671
}
672
}
673
}
674
675
if (deviceID) {
676
err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
677
0, &deviceID, sizeof(deviceID));
678
if (err) {
679
OS_ERROR0(err, "SetProperty (CurrentDevice)");
680
AudioComponentInstanceDispose(unit);
681
return NULL;
682
}
683
}
684
685
return unit;
686
}
687
688
static OSStatus OutputCallback(void *inRefCon,
689
AudioUnitRenderActionFlags *ioActionFlags,
690
const AudioTimeStamp *inTimeStamp,
691
UInt32 inBusNumber,
692
UInt32 inNumberFrames,
693
AudioBufferList *ioData)
694
{
695
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
696
697
int nchannels = ioData->mNumberBuffers; // should be always == 1 (interleaved channels)
698
AudioBuffer *audioBuffer = ioData->mBuffers;
699
700
TRACE3(">>OutputCallback: busNum=%d, requested %d frames (%d bytes)\n",
701
(int)inBusNumber, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
702
TRACE3(" abl: %d buffers, buffer[0].channels=%d, buffer.size=%d\n",
703
nchannels, (int)audioBuffer->mNumberChannels, (int)audioBuffer->mDataByteSize);
704
705
int bytesToRead = inNumberFrames * device->asbd.mBytesPerFrame;
706
if (bytesToRead > (int)audioBuffer->mDataByteSize) {
707
TRACE0("--OutputCallback: !!! audioBuffer IS TOO SMALL!!!\n");
708
bytesToRead = audioBuffer->mDataByteSize / device->asbd.mBytesPerFrame * device->asbd.mBytesPerFrame;
709
}
710
int bytesRead = device->ringBuffer.Read(audioBuffer->mData, bytesToRead);
711
if (bytesRead < bytesToRead) {
712
// no enough data (underrun)
713
TRACE2("--OutputCallback: !!! UNDERRUN (read %d bytes of %d)!!!\n", bytesRead, bytesToRead);
714
// silence the rest
715
memset((Byte*)audioBuffer->mData + bytesRead, 0, bytesToRead-bytesRead);
716
bytesRead = bytesToRead;
717
}
718
719
audioBuffer->mDataByteSize = (UInt32)bytesRead;
720
// SAFETY: set mDataByteSize for all other AudioBuffer in the AudioBufferList to zero
721
while (--nchannels > 0) {
722
audioBuffer++;
723
audioBuffer->mDataByteSize = 0;
724
}
725
TRACE1("<<OutputCallback (returns %d)\n", bytesRead);
726
727
return noErr;
728
}
729
730
static OSStatus InputCallback(void *inRefCon,
731
AudioUnitRenderActionFlags *ioActionFlags,
732
const AudioTimeStamp *inTimeStamp,
733
UInt32 inBusNumber,
734
UInt32 inNumberFrames,
735
AudioBufferList *ioData)
736
{
737
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
738
739
TRACE4(">>InputCallback: busNum=%d, timeStamp=%lld, %d frames (%d bytes)\n",
740
(int)inBusNumber, (long long)inTimeStamp->mSampleTime, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
741
742
AudioBufferList abl; // by default it contains 1 AudioBuffer
743
abl.mNumberBuffers = 1;
744
abl.mBuffers[0].mNumberChannels = device->asbd.mChannelsPerFrame;
745
abl.mBuffers[0].mDataByteSize = device->inputBufferSizeInBytes; // assume this is == (inNumberFrames * device->asbd.mBytesPerFrame)
746
abl.mBuffers[0].mData = NULL; // request for the audioUnit's buffer
747
748
OSStatus err = AudioUnitRender(device->audioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &abl);
749
if (err) {
750
OS_ERROR0(err, "<<InputCallback: AudioUnitRender");
751
} else {
752
if (device->resampler != NULL) {
753
// test for discontinuity
754
// AUHAL starts timestamps at zero, so test if the current timestamp less then the last written
755
SInt64 sampleTime = inTimeStamp->mSampleTime;
756
if (sampleTime < device->lastWrittenSampleTime) {
757
// discontinuity, reset the resampler
758
TRACE2(" InputCallback (RESAMPLED), DISCONTINUITY (%f -> %f)\n",
759
(float)device->lastWrittenSampleTime, (float)sampleTime);
760
761
device->resampler->Discontinue();
762
} else {
763
TRACE2(" InputCallback (RESAMPLED), continuous: lastWrittenSampleTime = %f, sampleTime=%f\n",
764
(float)device->lastWrittenSampleTime, (float)sampleTime);
765
}
766
device->lastWrittenSampleTime = sampleTime + inNumberFrames;
767
768
int bytesWritten = device->resampler->Process(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, &device->ringBuffer);
769
TRACE2("<<InputCallback (RESAMPLED, saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
770
} else {
771
int bytesWritten = device->ringBuffer.Write(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, false);
772
TRACE2("<<InputCallback (saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
773
}
774
}
775
776
return noErr;
777
}
778
779
780
static void FillASBDForNonInterleavedPCM(AudioStreamBasicDescription& asbd,
781
float sampleRate, int channels, int sampleSizeInBits, bool isFloat, int isSigned, bool isBigEndian)
782
{
783
// FillOutASBDForLPCM cannot produce unsigned integer format
784
asbd.mSampleRate = sampleRate;
785
asbd.mFormatID = kAudioFormatLinearPCM;
786
asbd.mFormatFlags = (isFloat ? kAudioFormatFlagIsFloat : (isSigned ? kAudioFormatFlagIsSignedInteger : 0))
787
| (isBigEndian ? (kAudioFormatFlagIsBigEndian) : 0)
788
| kAudioFormatFlagIsPacked;
789
asbd.mBytesPerPacket = channels * ((sampleSizeInBits + 7) / 8);
790
asbd.mFramesPerPacket = 1;
791
asbd.mBytesPerFrame = asbd.mBytesPerPacket;
792
asbd.mChannelsPerFrame = channels;
793
asbd.mBitsPerChannel = sampleSizeInBits;
794
}
795
796
void* DAUDIO_Open(INT32 mixerIndex, INT32 deviceID, int isSource,
797
int encoding, float sampleRate, int sampleSizeInBits,
798
int frameSize, int channels,
799
int isSigned, int isBigEndian, int bufferSizeInBytes)
800
{
801
TRACE3(">>DAUDIO_Open: mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (unsigned int)deviceID, isSource);
802
TRACE3(" sampleRate=%d sampleSizeInBits=%d channels=%d\n", (int)sampleRate, sampleSizeInBits, channels);
803
#ifdef USE_TRACE
804
{
805
AudioDeviceID audioDeviceID = deviceID;
806
if (audioDeviceID == 0) {
807
// default device
808
audioDeviceID = GetDefaultDevice(isSource);
809
}
810
char name[256];
811
OSStatus err = GetAudioObjectProperty(audioDeviceID, kAudioUnitScope_Global, kAudioDevicePropertyDeviceName, 256, &name, 0);
812
if (err != noErr) {
813
OS_ERROR1(err, " audioDeviceID=0x%x, name is N/A:", (int)audioDeviceID);
814
} else {
815
TRACE2(" audioDeviceID=0x%x, name=%s\n", (int)audioDeviceID, name);
816
}
817
}
818
#endif
819
820
if (encoding != DAUDIO_PCM) {
821
ERROR1("<<DAUDIO_Open: ERROR: unsupported encoding (%d)\n", encoding);
822
return NULL;
823
}
824
if (channels <= 0) {
825
ERROR1("<<DAUDIO_Open: ERROR: Invalid number of channels=%d!\n", channels);
826
return NULL;
827
}
828
829
OSX_DirectAudioDevice *device = new OSX_DirectAudioDevice();
830
831
AudioUnitScope scope = isSource ? kAudioUnitScope_Input : kAudioUnitScope_Output;
832
int element = isSource ? 0 : 1;
833
OSStatus err = noErr;
834
int extraBufferBytes = 0;
835
836
device->audioUnit = CreateOutputUnit(deviceID, isSource);
837
838
if (!device->audioUnit) {
839
delete device;
840
return NULL;
841
}
842
843
if (!isSource) {
844
AudioDeviceID actualDeviceID = deviceID != 0 ? deviceID : GetDefaultDevice(isSource);
845
float hardwareSampleRate = GetSampleRate(actualDeviceID, isSource);
846
TRACE2("--DAUDIO_Open: sampleRate = %f, hardwareSampleRate=%f\n", sampleRate, hardwareSampleRate);
847
848
if (fabs(sampleRate - hardwareSampleRate) > 1) {
849
device->resampler = new Resampler();
850
851
// request HAL for Float32 with native endianess
852
FillASBDForNonInterleavedPCM(device->asbd, hardwareSampleRate, channels, 32, true, false, kAudioFormatFlagsNativeEndian != 0);
853
} else {
854
sampleRate = hardwareSampleRate; // in case sample rates are not exactly equal
855
}
856
}
857
858
if (device->resampler == NULL) {
859
// no resampling, request HAL for the requested format
860
FillASBDForNonInterleavedPCM(device->asbd, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
861
}
862
863
err = AudioUnitSetProperty(device->audioUnit, kAudioUnitProperty_StreamFormat, scope, element, &device->asbd, sizeof(device->asbd));
864
if (err) {
865
OS_ERROR0(err, "<<DAUDIO_Open set StreamFormat");
866
delete device;
867
return NULL;
868
}
869
870
AURenderCallbackStruct output;
871
output.inputProc = isSource ? OutputCallback : InputCallback;
872
output.inputProcRefCon = device;
873
874
err = AudioUnitSetProperty(device->audioUnit,
875
isSource
876
? (AudioUnitPropertyID)kAudioUnitProperty_SetRenderCallback
877
: (AudioUnitPropertyID)kAudioOutputUnitProperty_SetInputCallback,
878
kAudioUnitScope_Global, 0, &output, sizeof(output));
879
if (err) {
880
OS_ERROR0(err, "<<DAUDIO_Open set RenderCallback");
881
delete device;
882
return NULL;
883
}
884
885
err = AudioUnitInitialize(device->audioUnit);
886
if (err) {
887
OS_ERROR0(err, "<<DAUDIO_Open UnitInitialize");
888
delete device;
889
return NULL;
890
}
891
892
if (!isSource) {
893
// for target lines we need extra bytes in the ringBuffer
894
// to prevent collisions when InputCallback overrides data on overflow
895
UInt32 size;
896
OSStatus err;
897
898
size = sizeof(device->inputBufferSizeInBytes);
899
err = AudioUnitGetProperty(device->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global,
900
0, &device->inputBufferSizeInBytes, &size);
901
if (err) {
902
OS_ERROR0(err, "<<DAUDIO_Open (TargetDataLine)GetBufferSize\n");
903
delete device;
904
return NULL;
905
}
906
device->inputBufferSizeInBytes *= device->asbd.mBytesPerFrame; // convert frames to bytes
907
extraBufferBytes = (int)device->inputBufferSizeInBytes;
908
}
909
910
if (device->resampler != NULL) {
911
// resampler output format is a user requested format (== ringBuffer format)
912
AudioStreamBasicDescription asbdOut; // ringBuffer format
913
FillASBDForNonInterleavedPCM(asbdOut, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
914
915
// set resampler input buffer size to the HAL buffer size
916
if (!device->resampler->Init(&device->asbd, &asbdOut, (int)device->inputBufferSizeInBytes)) {
917
ERROR0("<<DAUDIO_Open: resampler.Init() FAILED.\n");
918
delete device;
919
return NULL;
920
}
921
// extra bytes in the ringBuffer (extraBufferBytes) should be equal resampler output buffer size
922
extraBufferBytes = device->resampler->GetOutBufferSize();
923
}
924
925
if (!device->ringBuffer.Allocate(bufferSizeInBytes, extraBufferBytes)) {
926
ERROR0("<<DAUDIO_Open: Ring buffer allocation error\n");
927
delete device;
928
return NULL;
929
}
930
931
TRACE0("<<DAUDIO_Open: OK\n");
932
return device;
933
}
934
935
int DAUDIO_Start(void* id, int isSource) {
936
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
937
TRACE0("DAUDIO_Start\n");
938
939
OSStatus err = AudioOutputUnitStart(device->audioUnit);
940
941
if (err != noErr) {
942
OS_ERROR0(err, "DAUDIO_Start");
943
}
944
945
return err == noErr ? TRUE : FALSE;
946
}
947
948
int DAUDIO_Stop(void* id, int isSource) {
949
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
950
TRACE0("DAUDIO_Stop\n");
951
952
OSStatus err = AudioOutputUnitStop(device->audioUnit);
953
954
return err == noErr ? TRUE : FALSE;
955
}
956
957
void DAUDIO_Close(void* id, int isSource) {
958
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
959
TRACE0("DAUDIO_Close\n");
960
961
delete device;
962
}
963
964
int DAUDIO_Write(void* id, char* data, int byteSize) {
965
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
966
TRACE1(">>DAUDIO_Write: %d bytes to write\n", byteSize);
967
968
int result = device->ringBuffer.Write(data, byteSize, true);
969
970
TRACE1("<<DAUDIO_Write: %d bytes written\n", result);
971
return result;
972
}
973
974
int DAUDIO_Read(void* id, char* data, int byteSize) {
975
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
976
TRACE1(">>DAUDIO_Read: %d bytes to read\n", byteSize);
977
978
int result = device->ringBuffer.Read(data, byteSize);
979
980
TRACE1("<<DAUDIO_Read: %d bytes has been read\n", result);
981
return result;
982
}
983
984
int DAUDIO_GetBufferSize(void* id, int isSource) {
985
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
986
987
int bufferSizeInBytes = device->ringBuffer.GetBufferSize();
988
989
TRACE1("DAUDIO_GetBufferSize returns %d\n", bufferSizeInBytes);
990
return bufferSizeInBytes;
991
}
992
993
int DAUDIO_StillDraining(void* id, int isSource) {
994
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
995
996
int draining = device->ringBuffer.GetValidByteCount() > 0 ? TRUE : FALSE;
997
998
TRACE1("DAUDIO_StillDraining returns %d\n", draining);
999
return draining;
1000
}
1001
1002
int DAUDIO_Flush(void* id, int isSource) {
1003
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1004
TRACE0("DAUDIO_Flush\n");
1005
1006
device->ringBuffer.Flush();
1007
1008
return TRUE;
1009
}
1010
1011
int DAUDIO_GetAvailable(void* id, int isSource) {
1012
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1013
1014
int bytesInBuffer = device->ringBuffer.GetValidByteCount();
1015
if (isSource) {
1016
return device->ringBuffer.GetBufferSize() - bytesInBuffer;
1017
} else {
1018
return bytesInBuffer;
1019
}
1020
}
1021
1022
INT64 DAUDIO_GetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1023
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1024
INT64 position;
1025
1026
if (isSource) {
1027
position = javaBytePos - device->ringBuffer.GetValidByteCount();
1028
} else {
1029
position = javaBytePos + device->ringBuffer.GetValidByteCount();
1030
}
1031
1032
TRACE2("DAUDIO_GetBytePosition returns %lld (javaBytePos = %lld)\n", (long long)position, (long long)javaBytePos);
1033
return position;
1034
}
1035
1036
void DAUDIO_SetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1037
// no need javaBytePos (it's available in DAUDIO_GetBytePosition)
1038
}
1039
1040
int DAUDIO_RequiresServicing(void* id, int isSource) {
1041
return FALSE;
1042
}
1043
1044
void DAUDIO_Service(void* id, int isSource) {
1045
// unreachable
1046
}
1047
1048
#endif // USE_DAUDIO == TRUE
1049
1050