mirror of
https://github.com/gnif/LookingGlass.git
synced 2025-10-11 01:48:09 +00:00
Revert "[client] audio: allow the audiodev to return the periodFrames"
This reverts commit 41884bfcc5
.
PipeWire can change it's period size on the fly on us making this
approach invalid.
This commit is contained in:
@@ -109,7 +109,6 @@ typedef struct
|
||||
// avoid false sharing
|
||||
alignas(64) PlaybackDeviceData deviceData;
|
||||
alignas(64) PlaybackSpiceData spiceData;
|
||||
int targetLatencyFrames;
|
||||
}
|
||||
playback;
|
||||
|
||||
@@ -221,16 +220,15 @@ static int playbackPullFrames(uint8_t * dst, int frames)
|
||||
|
||||
if (audio.playback.buffer)
|
||||
{
|
||||
static bool first = true;
|
||||
// Measure the device clock and post to the Spice thread
|
||||
if (frames != data->periodFrames || first)
|
||||
if (frames != data->periodFrames)
|
||||
{
|
||||
if (first)
|
||||
{
|
||||
bool init = data->periodFrames == 0;
|
||||
if (init)
|
||||
data->nextTime = now;
|
||||
first = false;
|
||||
}
|
||||
|
||||
data->periodFrames = frames;
|
||||
data->periodSec = (double) frames / audio.playback.sampleRate;
|
||||
data->nextTime += llrint(data->periodSec * 1.0e9);
|
||||
data->nextPosition += frames;
|
||||
|
||||
@@ -319,6 +317,7 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
||||
audio.playback.stride = channels * sizeof(float);
|
||||
audio.playback.state = STREAM_STATE_SETUP;
|
||||
|
||||
audio.playback.deviceData.periodFrames = 0;
|
||||
audio.playback.deviceData.nextPosition = 0;
|
||||
|
||||
audio.playback.spiceData.periodFrames = 0;
|
||||
@@ -329,14 +328,7 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
||||
audio.playback.spiceData.offsetErrorIntegral = 0.0;
|
||||
audio.playback.spiceData.ratioIntegral = 0.0;
|
||||
|
||||
int frames;
|
||||
audio.audioDev->playback.setup(channels, sampleRate, playbackPullFrames,
|
||||
&frames);
|
||||
|
||||
audio.playback.deviceData.periodFrames = frames;
|
||||
audio.playback.targetLatencyFrames = frames;
|
||||
audio.playback.deviceData.periodSec =
|
||||
(double)frames / audio.playback.sampleRate;
|
||||
audio.audioDev->playback.setup(channels, sampleRate, playbackPullFrames);
|
||||
|
||||
// if a volume level was stored, set it before we return
|
||||
if (audio.playback.volumeChannels)
|
||||
@@ -405,8 +397,7 @@ void audio_playbackData(uint8_t * data, size_t size)
|
||||
if (!STREAM_ACTIVE(audio.playback.state))
|
||||
return;
|
||||
|
||||
PlaybackSpiceData * spiceData = &audio.playback.spiceData;
|
||||
PlaybackDeviceData * devData = &audio.playback.deviceData;
|
||||
PlaybackSpiceData * spiceData = &audio.playback.spiceData;
|
||||
int64_t now = nanotime();
|
||||
|
||||
// Convert from s16 to f32 samples
|
||||
@@ -455,15 +446,6 @@ void audio_playbackData(uint8_t * data, size_t size)
|
||||
spiceData->devNextPosition = deviceTick.nextPosition;
|
||||
}
|
||||
|
||||
// If the buffer is getting too empty increase the target latency
|
||||
static bool checkFill = false;
|
||||
if (checkFill && audio.playback.state == STREAM_STATE_RUN &&
|
||||
ringbuffer_getCount(audio.playback.buffer) < devData->periodFrames)
|
||||
{
|
||||
audio.playback.targetLatencyFrames += devData->periodFrames;
|
||||
checkFill = false;
|
||||
}
|
||||
|
||||
// Measure the Spice audio clock
|
||||
int64_t curTime;
|
||||
int64_t curPosition;
|
||||
@@ -525,8 +507,17 @@ void audio_playbackData(uint8_t * data, size_t size)
|
||||
((double) (curTime - spiceData->devLastTime) /
|
||||
(spiceData->devNextTime - spiceData->devLastTime));
|
||||
|
||||
// Target latency derived experimentally to avoid underruns. This could be
|
||||
// reduced with more tuning. We could adjust on the fly based upon the
|
||||
// device period size, but that would result in underruns if the period size
|
||||
// suddenly increases. It may be better instead to just reduce the maximum
|
||||
// latency on the audio devices, which currently is set quite high
|
||||
int targetLatencyMs = 70;
|
||||
int targetLatencyFrames =
|
||||
targetLatencyMs * audio.playback.sampleRate / 1000;
|
||||
|
||||
double actualOffset = curPosition - devPosition;
|
||||
double actualOffsetError = -(actualOffset - audio.playback.targetLatencyFrames);
|
||||
double actualOffsetError = -(actualOffset - targetLatencyFrames);
|
||||
|
||||
double error = actualOffsetError - offsetError;
|
||||
spiceData->offsetError += spiceData->b * error +
|
||||
@@ -577,18 +568,9 @@ void audio_playbackData(uint8_t * data, size_t size)
|
||||
if (audio.playback.state == STREAM_STATE_SETUP)
|
||||
{
|
||||
frames = ringbuffer_getCount(audio.playback.buffer);
|
||||
if (frames >= max(devData->periodFrames,
|
||||
ringbuffer_getLength(audio.playback.buffer) / 20))
|
||||
{
|
||||
if (audio.audioDev->playback.start(frames))
|
||||
audio.playback.state = STREAM_STATE_RUN;
|
||||
audio.audioDev->playback.start();
|
||||
}
|
||||
}
|
||||
|
||||
// re-arm the buffer fill check if we have buffered enough
|
||||
if (!checkFill && ringbuffer_getCount(audio.playback.buffer) >=
|
||||
audio.playback.targetLatencyFrames)
|
||||
checkFill = true;
|
||||
}
|
||||
|
||||
bool audio_supportsRecord(void)
|
||||
|
Reference in New Issue
Block a user