diff --git a/client/audiodevs/PipeWire/pipewire.c b/client/audiodevs/PipeWire/pipewire.c index 9f2e18ec..85e2bfee 100644 --- a/client/audiodevs/PipeWire/pipewire.c +++ b/client/audiodevs/PipeWire/pipewire.c @@ -53,6 +53,7 @@ struct PipeWire int stride; LG_AudioPullFn pullFn; int maxPeriodFrames; + int startFrames; StreamState state; } @@ -185,7 +186,8 @@ static void pipewire_playbackStopStream(void) } static void pipewire_playbackSetup(int channels, int sampleRate, - int * maxPeriodFrames, LG_AudioPullFn pullFn) + int requestedPeriodFrames, int * maxPeriodFrames, int * startFrames, + LG_AudioPullFn pullFn) { const struct spa_pod * params[1]; uint8_t buffer[1024]; @@ -203,15 +205,15 @@ static void pipewire_playbackSetup(int channels, int sampleRate, pw.playback.sampleRate == sampleRate) { *maxPeriodFrames = pw.playback.maxPeriodFrames; + *startFrames = pw.playback.startFrames; return; } pipewire_playbackStopStream(); - int defaultLatencyFrames = 2048; - char defaultNodeLatency[32]; - snprintf(defaultNodeLatency, sizeof(defaultNodeLatency), "%d/%d", - defaultLatencyFrames, sampleRate); + char requestedNodeLatency[32]; + snprintf(requestedNodeLatency, sizeof(requestedNodeLatency), "%d/%d", + requestedPeriodFrames, sampleRate); pw.playback.channels = channels; pw.playback.sampleRate = sampleRate; @@ -227,7 +229,7 @@ static void pipewire_playbackSetup(int channels, int sampleRate, PW_KEY_MEDIA_TYPE , "Audio", PW_KEY_MEDIA_CATEGORY, "Playback", PW_KEY_MEDIA_ROLE , "Music", - PW_KEY_NODE_LATENCY , defaultNodeLatency, + PW_KEY_NODE_LATENCY , requestedNodeLatency, NULL ), &events, @@ -250,21 +252,26 @@ static void pipewire_playbackSetup(int channels, int sampleRate, { DEBUG_WARN( "PIPEWIRE_LATENCY value '%s' is invalid or does not match stream sample " - "rate; defaulting to %d/%d", actualNodeLatency, defaultLatencyFrames, + "rate; using %d/%d", actualNodeLatency, requestedPeriodFrames, sampleRate); struct spa_dict_item items[] = { - { PW_KEY_NODE_LATENCY, defaultNodeLatency } + { PW_KEY_NODE_LATENCY, requestedNodeLatency } }; pw_stream_update_properties(pw.playback.stream, &SPA_DICT_INIT_ARRAY(items)); - pw.playback.maxPeriodFrames = defaultLatencyFrames; + pw.playback.maxPeriodFrames = requestedPeriodFrames; } else pw.playback.maxPeriodFrames = num; + // If the previous quantum size was very small, PipeWire can request two full + // periods almost immediately at the start of playback + pw.playback.startFrames = pw.playback.maxPeriodFrames * 2; + *maxPeriodFrames = pw.playback.maxPeriodFrames; + *startFrames = pw.playback.startFrames; if (!pw.playback.stream) { diff --git a/client/audiodevs/PulseAudio/pulseaudio.c b/client/audiodevs/PulseAudio/pulseaudio.c index 9979a86d..74342f9b 100644 --- a/client/audiodevs/PulseAudio/pulseaudio.c +++ b/client/audiodevs/PulseAudio/pulseaudio.c @@ -39,6 +39,7 @@ struct PulseAudio bool sinkMuted; bool sinkStarting; int sinkMaxPeriodFrames; + int sinkStartFrames; int sinkSampleRate; int sinkChannels; int sinkStride; @@ -257,29 +258,29 @@ static void pulseaudio_overflow_cb(pa_stream * p, void * userdata) } static void pulseaudio_setup(int channels, int sampleRate, - int * maxPeriodFrames, LG_AudioPullFn pullFn) + int requestedPeriodFrames, int * maxPeriodFrames, int * startFrames, + LG_AudioPullFn pullFn) { if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate) { *maxPeriodFrames = pa.sinkMaxPeriodFrames; + *startFrames = pa.sinkStartFrames; return; } - //TODO: be smarter about this - const int PERIOD_LEN = 80; - pa_sample_spec spec = { .format = PA_SAMPLE_FLOAT32, .rate = sampleRate, .channels = channels }; + int stride = channels * sizeof(float); + int bufferSize = requestedPeriodFrames * 2 * stride; pa_buffer_attr attribs = { - .maxlength = pa_usec_to_bytes((PERIOD_LEN * 2) * PA_USEC_PER_MSEC, &spec), - .tlength = pa_usec_to_bytes(PERIOD_LEN * PA_USEC_PER_MSEC, &spec), + .maxlength = -1, + .tlength = bufferSize, .prebuf = 0, - .fragsize = pa_usec_to_bytes(PERIOD_LEN * PA_USEC_PER_MSEC, &spec), .minreq = (uint32_t)-1 }; @@ -295,17 +296,21 @@ static void pulseaudio_setup(int channels, int sampleRate, pa_stream_set_underflow_callback(pa.sink, pulseaudio_underflow_cb, NULL); pa_stream_set_overflow_callback (pa.sink, pulseaudio_overflow_cb , NULL); - pa_stream_connect_playback(pa.sink, NULL, &attribs, - PA_STREAM_START_CORKED | PA_STREAM_ADJUST_LATENCY, + pa_stream_connect_playback(pa.sink, NULL, &attribs, PA_STREAM_START_CORKED, NULL, NULL); - pa.sinkStride = channels * sizeof(float); + pa.sinkStride = stride; pa.sinkPullFn = pullFn; - pa.sinkMaxPeriodFrames = attribs.tlength / pa.sinkStride; + pa.sinkMaxPeriodFrames = requestedPeriodFrames; pa.sinkCorked = true; pa.sinkStarting = false; - *maxPeriodFrames = pa.sinkMaxPeriodFrames; + // If something else is, or was recently using a small latency value, + // PulseAudio can request way more data at startup than is reasonable + pa.sinkStartFrames = requestedPeriodFrames * 4; + + *maxPeriodFrames = requestedPeriodFrames; + *startFrames = pa.sinkStartFrames; pa_threaded_mainloop_unlock(pa.loop); } diff --git a/client/include/interface/audiodev.h b/client/include/interface/audiodev.h index 8cba7b2f..08f41530 100644 --- a/client/include/interface/audiodev.h +++ b/client/include/interface/audiodev.h @@ -47,8 +47,8 @@ struct LG_AudioDevOps /* setup the stream for playback but don't start it yet * Note: the pull function returns f32 samples */ - void (*setup)(int channels, int sampleRate, int * maxPeriodFrames, - LG_AudioPullFn pullFn); + void (*setup)(int channels, int sampleRate, int requestedPeriodFrames, + int * maxPeriodFrames, int * startFrames, LG_AudioPullFn pullFn); /* called when there is data available to start playback */ void (*start)(void); diff --git a/client/src/audio.c b/client/src/audio.c index c78bb204..a9363be0 100644 --- a/client/src/audio.c +++ b/client/src/audio.c @@ -100,7 +100,8 @@ typedef struct int sampleRate; int stride; int deviceMaxPeriodFrames; - int deviceTargetStartFrames; + int deviceStartFrames; + int targetStartFrames; RingBuffer buffer; RingBuffer deviceTiming; @@ -225,7 +226,7 @@ static int playbackPullFrames(uint8_t * dst, int frames) // startup latency. This avoids underrunning the buffer if the audio // device starts earlier than required int offset = ringbuffer_getCount(audio.playback.buffer) - - audio.playback.deviceTargetStartFrames; + audio.playback.targetStartFrames; if (offset < 0) { data->nextPosition += offset; @@ -361,9 +362,12 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format, audio.playback.spiceData.offsetErrorIntegral = 0.0; audio.playback.spiceData.ratioIntegral = 0.0; + int requestedPeriodFrames = max(g_params.audioPeriodSize, 1); audio.playback.deviceMaxPeriodFrames = 0; - audio.audioDev->playback.setup(channels, sampleRate, - &audio.playback.deviceMaxPeriodFrames, playbackPullFrames); + audio.playback.deviceStartFrames = 0; + audio.audioDev->playback.setup(channels, sampleRate, requestedPeriodFrames, + &audio.playback.deviceMaxPeriodFrames, &audio.playback.deviceStartFrames, + playbackPullFrames); DEBUG_ASSERT(audio.playback.deviceMaxPeriodFrames > 0); // if a volume level was stored, set it before we return @@ -698,14 +702,13 @@ void audio_playbackData(uint8_t * data, size_t size) if (audio.playback.state == STREAM_STATE_SETUP_SPICE) { - // In the worst case, the audio device can immediately request two full - // buffers at the beginning of playback. Latency corrections at startup can - // also be quite significant due to poor packet pacing from Spice, so - // additionally require at least two full Spice periods' worth of data + // Latency corrections at startup can be quite significant due to poor + // packet pacing from Spice, so require at least two full Spice periods' + // worth of data in addition to the startup delay requested by the device // before starting playback to minimise the chances of underrunning int startFrames = - spiceData->periodFrames * 2 + audio.playback.deviceMaxPeriodFrames * 2; - audio.playback.deviceTargetStartFrames = startFrames; + spiceData->periodFrames * 2 + audio.playback.deviceStartFrames; + audio.playback.targetStartFrames = startFrames; // The actual time between opening the device and the device starting to // pull data can range anywhere between nearly instant and hundreds of diff --git a/client/src/config.c b/client/src/config.c index 7882c933..6d9a4c48 100644 --- a/client/src/config.c +++ b/client/src/config.c @@ -464,6 +464,15 @@ static struct Option options[] = .type = OPTION_TYPE_BOOL, .value.x_bool = true }, + + // audio options + { + .module = "audio", + .name = "periodSize", + .description = "Requested audio device period size in samples", + .type = OPTION_TYPE_INT, + .value.x_int = 2048 + }, {0} }; @@ -636,6 +645,8 @@ bool config_load(int argc, char * argv[]) g_params.showCursorDot = option_get_bool("spice", "showCursorDot"); } + g_params.audioPeriodSize = option_get_int("audio", "periodSize"); + return true; } diff --git a/client/src/main.h b/client/src/main.h index 8e615f4c..fe48639d 100644 --- a/client/src/main.h +++ b/client/src/main.h @@ -199,6 +199,8 @@ struct AppParams bool autoCapture; bool captureInputOnly; bool showCursorDot; + + int audioPeriodSize; }; struct CBRequest