mirror of
https://github.com/gnif/LookingGlass.git
synced 2024-11-25 14:57:20 +00:00
[client] audio: increase startup latency
Underruns can still happen quite easily at the beginning of playback, particularly at very low latency settings. Further increase the startup latency to avoid this.
This commit is contained in:
parent
5e1b8f2abe
commit
0d97a51802
@ -287,12 +287,10 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
|
||||
pw_thread_loop_unlock(pw.thread);
|
||||
}
|
||||
|
||||
static bool pipewire_playbackStart(int framesBuffered)
|
||||
static void pipewire_playbackStart(void)
|
||||
{
|
||||
if (!pw.playback.stream)
|
||||
return false;
|
||||
|
||||
bool start = false;
|
||||
return;
|
||||
|
||||
if (pw.playback.state != STREAM_STATE_ACTIVE)
|
||||
{
|
||||
@ -301,17 +299,8 @@ static bool pipewire_playbackStart(int framesBuffered)
|
||||
switch (pw.playback.state)
|
||||
{
|
||||
case STREAM_STATE_INACTIVE:
|
||||
// PipeWire startup latency varies wildly depending on what else is, or
|
||||
// was last using the audio device. In the worst case, PipeWire can
|
||||
// request two full buffers within a very short period of time
|
||||
// immediately at the start of playback, so make sure we've got enough
|
||||
// data in the buffer to support this
|
||||
if (framesBuffered >= pw.playback.maxPeriodFrames * 2)
|
||||
{
|
||||
pw_stream_set_active(pw.playback.stream, true);
|
||||
pw.playback.state = STREAM_STATE_ACTIVE;
|
||||
start = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case STREAM_STATE_DRAINING:
|
||||
@ -325,8 +314,6 @@ static bool pipewire_playbackStart(int framesBuffered)
|
||||
|
||||
pw_thread_loop_unlock(pw.thread);
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
static void pipewire_playbackStop(void)
|
||||
|
@ -37,7 +37,7 @@ struct PulseAudio
|
||||
int sinkIndex;
|
||||
bool sinkCorked;
|
||||
bool sinkMuted;
|
||||
int sinkStart;
|
||||
int sinkMaxPeriodFrames;
|
||||
int sinkSampleRate;
|
||||
int sinkChannels;
|
||||
int sinkStride;
|
||||
@ -250,7 +250,7 @@ static void pulseaudio_setup(int channels, int sampleRate,
|
||||
{
|
||||
if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate)
|
||||
{
|
||||
*maxPeriodFrames = pa.sinkStart;
|
||||
*maxPeriodFrames = pa.sinkMaxPeriodFrames;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -289,28 +289,23 @@ static void pulseaudio_setup(int channels, int sampleRate,
|
||||
|
||||
pa.sinkStride = channels * sizeof(float);
|
||||
pa.sinkPullFn = pullFn;
|
||||
pa.sinkStart = attribs.tlength / pa.sinkStride;
|
||||
pa.sinkMaxPeriodFrames = attribs.tlength / pa.sinkStride;
|
||||
pa.sinkCorked = true;
|
||||
|
||||
*maxPeriodFrames = pa.sinkStart;
|
||||
*maxPeriodFrames = pa.sinkMaxPeriodFrames;
|
||||
|
||||
pa_threaded_mainloop_unlock(pa.loop);
|
||||
}
|
||||
|
||||
static bool pulseaudio_start(int framesBuffered)
|
||||
static void pulseaudio_start(void)
|
||||
{
|
||||
if (!pa.sink)
|
||||
return false;
|
||||
|
||||
if (framesBuffered < pa.sinkStart)
|
||||
return false;
|
||||
return;
|
||||
|
||||
pa_threaded_mainloop_lock(pa.loop);
|
||||
pa_stream_cork(pa.sink, 0, NULL, NULL);
|
||||
pa.sinkCorked = false;
|
||||
pa_threaded_mainloop_unlock(pa.loop);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void pulseaudio_stop(void)
|
||||
|
@ -50,9 +50,8 @@ struct LG_AudioDevOps
|
||||
void (*setup)(int channels, int sampleRate, int * maxPeriodFrames,
|
||||
LG_AudioPullFn pullFn);
|
||||
|
||||
/* called when there is data available to start playback
|
||||
* return true if playback should start */
|
||||
bool (*start)(int framesBuffered);
|
||||
/* called when there is data available to start playback */
|
||||
void (*start)(void);
|
||||
|
||||
/* called when SPICE reports the audio stream has stopped */
|
||||
void (*stop)(void);
|
||||
|
@ -612,10 +612,18 @@ void audio_playbackData(uint8_t * data, size_t size)
|
||||
|
||||
if (audio.playback.state == STREAM_STATE_SETUP)
|
||||
{
|
||||
frames = ringbuffer_getCount(audio.playback.buffer);
|
||||
if (audio.audioDev->playback.start(frames))
|
||||
// In the worst case, the audio device can immediately request two full
|
||||
// buffers at the beginning of playback. Latency corrections at startup can
|
||||
// also be quite significant due to poor packet pacing from Spice, so
|
||||
// additionally require at least two full Spice periods' worth of data
|
||||
// before starting playback to minimise the chances of underrunning
|
||||
int startFrames =
|
||||
spiceData->periodFrames * 2 + audio.playback.deviceMaxPeriodFrames * 2;
|
||||
if (spiceData->nextPosition >= startFrames) {
|
||||
audio.audioDev->playback.start();
|
||||
audio.playback.state = STREAM_STATE_RUN;
|
||||
}
|
||||
}
|
||||
|
||||
double latencyFrames = actualOffset;
|
||||
if (audio.audioDev->playback.latency)
|
||||
|
Loading…
Reference in New Issue
Block a user