[client] audio: make the requested audio device period size configurable

This adds a new `audio:periodSize` option which defaults to 2048 frames.
For PipeWire, this controls the `PIPEWIRE_LATENCY` value. For PulseAudio,
the controls the target buffer length (`tlength`) value.
This commit is contained in:
Chris Spencer 2022-02-20 21:27:22 +00:00 committed by Geoffrey McRae
parent 0dad9b1e76
commit 9908b737b0
6 changed files with 61 additions and 33 deletions

View File

@ -53,6 +53,7 @@ struct PipeWire
int stride; int stride;
LG_AudioPullFn pullFn; LG_AudioPullFn pullFn;
int maxPeriodFrames; int maxPeriodFrames;
int startFrames;
StreamState state; StreamState state;
} }
@ -185,7 +186,8 @@ static void pipewire_playbackStopStream(void)
} }
static void pipewire_playbackSetup(int channels, int sampleRate, static void pipewire_playbackSetup(int channels, int sampleRate,
int * maxPeriodFrames, LG_AudioPullFn pullFn) int requestedPeriodFrames, int * maxPeriodFrames, int * startFrames,
LG_AudioPullFn pullFn)
{ {
const struct spa_pod * params[1]; const struct spa_pod * params[1];
uint8_t buffer[1024]; uint8_t buffer[1024];
@ -203,15 +205,15 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
pw.playback.sampleRate == sampleRate) pw.playback.sampleRate == sampleRate)
{ {
*maxPeriodFrames = pw.playback.maxPeriodFrames; *maxPeriodFrames = pw.playback.maxPeriodFrames;
*startFrames = pw.playback.startFrames;
return; return;
} }
pipewire_playbackStopStream(); pipewire_playbackStopStream();
int defaultLatencyFrames = 2048; char requestedNodeLatency[32];
char defaultNodeLatency[32]; snprintf(requestedNodeLatency, sizeof(requestedNodeLatency), "%d/%d",
snprintf(defaultNodeLatency, sizeof(defaultNodeLatency), "%d/%d", requestedPeriodFrames, sampleRate);
defaultLatencyFrames, sampleRate);
pw.playback.channels = channels; pw.playback.channels = channels;
pw.playback.sampleRate = sampleRate; pw.playback.sampleRate = sampleRate;
@ -227,7 +229,7 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
PW_KEY_MEDIA_TYPE , "Audio", PW_KEY_MEDIA_TYPE , "Audio",
PW_KEY_MEDIA_CATEGORY, "Playback", PW_KEY_MEDIA_CATEGORY, "Playback",
PW_KEY_MEDIA_ROLE , "Music", PW_KEY_MEDIA_ROLE , "Music",
PW_KEY_NODE_LATENCY , defaultNodeLatency, PW_KEY_NODE_LATENCY , requestedNodeLatency,
NULL NULL
), ),
&events, &events,
@ -250,21 +252,26 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
{ {
DEBUG_WARN( DEBUG_WARN(
"PIPEWIRE_LATENCY value '%s' is invalid or does not match stream sample " "PIPEWIRE_LATENCY value '%s' is invalid or does not match stream sample "
"rate; defaulting to %d/%d", actualNodeLatency, defaultLatencyFrames, "rate; using %d/%d", actualNodeLatency, requestedPeriodFrames,
sampleRate); sampleRate);
struct spa_dict_item items[] = { struct spa_dict_item items[] = {
{ PW_KEY_NODE_LATENCY, defaultNodeLatency } { PW_KEY_NODE_LATENCY, requestedNodeLatency }
}; };
pw_stream_update_properties(pw.playback.stream, pw_stream_update_properties(pw.playback.stream,
&SPA_DICT_INIT_ARRAY(items)); &SPA_DICT_INIT_ARRAY(items));
pw.playback.maxPeriodFrames = defaultLatencyFrames; pw.playback.maxPeriodFrames = requestedPeriodFrames;
} }
else else
pw.playback.maxPeriodFrames = num; pw.playback.maxPeriodFrames = num;
// If the previous quantum size was very small, PipeWire can request two full
// periods almost immediately at the start of playback
pw.playback.startFrames = pw.playback.maxPeriodFrames * 2;
*maxPeriodFrames = pw.playback.maxPeriodFrames; *maxPeriodFrames = pw.playback.maxPeriodFrames;
*startFrames = pw.playback.startFrames;
if (!pw.playback.stream) if (!pw.playback.stream)
{ {

View File

@ -39,6 +39,7 @@ struct PulseAudio
bool sinkMuted; bool sinkMuted;
bool sinkStarting; bool sinkStarting;
int sinkMaxPeriodFrames; int sinkMaxPeriodFrames;
int sinkStartFrames;
int sinkSampleRate; int sinkSampleRate;
int sinkChannels; int sinkChannels;
int sinkStride; int sinkStride;
@ -257,29 +258,29 @@ static void pulseaudio_overflow_cb(pa_stream * p, void * userdata)
} }
static void pulseaudio_setup(int channels, int sampleRate, static void pulseaudio_setup(int channels, int sampleRate,
int * maxPeriodFrames, LG_AudioPullFn pullFn) int requestedPeriodFrames, int * maxPeriodFrames, int * startFrames,
LG_AudioPullFn pullFn)
{ {
if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate) if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate)
{ {
*maxPeriodFrames = pa.sinkMaxPeriodFrames; *maxPeriodFrames = pa.sinkMaxPeriodFrames;
*startFrames = pa.sinkStartFrames;
return; return;
} }
//TODO: be smarter about this
const int PERIOD_LEN = 80;
pa_sample_spec spec = { pa_sample_spec spec = {
.format = PA_SAMPLE_FLOAT32, .format = PA_SAMPLE_FLOAT32,
.rate = sampleRate, .rate = sampleRate,
.channels = channels .channels = channels
}; };
int stride = channels * sizeof(float);
int bufferSize = requestedPeriodFrames * 2 * stride;
pa_buffer_attr attribs = pa_buffer_attr attribs =
{ {
.maxlength = pa_usec_to_bytes((PERIOD_LEN * 2) * PA_USEC_PER_MSEC, &spec), .maxlength = -1,
.tlength = pa_usec_to_bytes(PERIOD_LEN * PA_USEC_PER_MSEC, &spec), .tlength = bufferSize,
.prebuf = 0, .prebuf = 0,
.fragsize = pa_usec_to_bytes(PERIOD_LEN * PA_USEC_PER_MSEC, &spec),
.minreq = (uint32_t)-1 .minreq = (uint32_t)-1
}; };
@ -295,17 +296,21 @@ static void pulseaudio_setup(int channels, int sampleRate,
pa_stream_set_underflow_callback(pa.sink, pulseaudio_underflow_cb, NULL); pa_stream_set_underflow_callback(pa.sink, pulseaudio_underflow_cb, NULL);
pa_stream_set_overflow_callback (pa.sink, pulseaudio_overflow_cb , NULL); pa_stream_set_overflow_callback (pa.sink, pulseaudio_overflow_cb , NULL);
pa_stream_connect_playback(pa.sink, NULL, &attribs, pa_stream_connect_playback(pa.sink, NULL, &attribs, PA_STREAM_START_CORKED,
PA_STREAM_START_CORKED | PA_STREAM_ADJUST_LATENCY,
NULL, NULL); NULL, NULL);
pa.sinkStride = channels * sizeof(float); pa.sinkStride = stride;
pa.sinkPullFn = pullFn; pa.sinkPullFn = pullFn;
pa.sinkMaxPeriodFrames = attribs.tlength / pa.sinkStride; pa.sinkMaxPeriodFrames = requestedPeriodFrames;
pa.sinkCorked = true; pa.sinkCorked = true;
pa.sinkStarting = false; pa.sinkStarting = false;
*maxPeriodFrames = pa.sinkMaxPeriodFrames; // If something else is, or was recently using a small latency value,
// PulseAudio can request way more data at startup than is reasonable
pa.sinkStartFrames = requestedPeriodFrames * 4;
*maxPeriodFrames = requestedPeriodFrames;
*startFrames = pa.sinkStartFrames;
pa_threaded_mainloop_unlock(pa.loop); pa_threaded_mainloop_unlock(pa.loop);
} }

View File

@ -47,8 +47,8 @@ struct LG_AudioDevOps
/* setup the stream for playback but don't start it yet /* setup the stream for playback but don't start it yet
* Note: the pull function returns f32 samples * Note: the pull function returns f32 samples
*/ */
void (*setup)(int channels, int sampleRate, int * maxPeriodFrames, void (*setup)(int channels, int sampleRate, int requestedPeriodFrames,
LG_AudioPullFn pullFn); int * maxPeriodFrames, int * startFrames, LG_AudioPullFn pullFn);
/* called when there is data available to start playback */ /* called when there is data available to start playback */
void (*start)(void); void (*start)(void);

View File

@ -100,7 +100,8 @@ typedef struct
int sampleRate; int sampleRate;
int stride; int stride;
int deviceMaxPeriodFrames; int deviceMaxPeriodFrames;
int deviceTargetStartFrames; int deviceStartFrames;
int targetStartFrames;
RingBuffer buffer; RingBuffer buffer;
RingBuffer deviceTiming; RingBuffer deviceTiming;
@ -225,7 +226,7 @@ static int playbackPullFrames(uint8_t * dst, int frames)
// startup latency. This avoids underrunning the buffer if the audio // startup latency. This avoids underrunning the buffer if the audio
// device starts earlier than required // device starts earlier than required
int offset = ringbuffer_getCount(audio.playback.buffer) - int offset = ringbuffer_getCount(audio.playback.buffer) -
audio.playback.deviceTargetStartFrames; audio.playback.targetStartFrames;
if (offset < 0) if (offset < 0)
{ {
data->nextPosition += offset; data->nextPosition += offset;
@ -361,9 +362,12 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
audio.playback.spiceData.offsetErrorIntegral = 0.0; audio.playback.spiceData.offsetErrorIntegral = 0.0;
audio.playback.spiceData.ratioIntegral = 0.0; audio.playback.spiceData.ratioIntegral = 0.0;
int requestedPeriodFrames = max(g_params.audioPeriodSize, 1);
audio.playback.deviceMaxPeriodFrames = 0; audio.playback.deviceMaxPeriodFrames = 0;
audio.audioDev->playback.setup(channels, sampleRate, audio.playback.deviceStartFrames = 0;
&audio.playback.deviceMaxPeriodFrames, playbackPullFrames); audio.audioDev->playback.setup(channels, sampleRate, requestedPeriodFrames,
&audio.playback.deviceMaxPeriodFrames, &audio.playback.deviceStartFrames,
playbackPullFrames);
DEBUG_ASSERT(audio.playback.deviceMaxPeriodFrames > 0); DEBUG_ASSERT(audio.playback.deviceMaxPeriodFrames > 0);
// if a volume level was stored, set it before we return // if a volume level was stored, set it before we return
@ -698,14 +702,13 @@ void audio_playbackData(uint8_t * data, size_t size)
if (audio.playback.state == STREAM_STATE_SETUP_SPICE) if (audio.playback.state == STREAM_STATE_SETUP_SPICE)
{ {
// In the worst case, the audio device can immediately request two full // Latency corrections at startup can be quite significant due to poor
// buffers at the beginning of playback. Latency corrections at startup can // packet pacing from Spice, so require at least two full Spice periods'
// also be quite significant due to poor packet pacing from Spice, so // worth of data in addition to the startup delay requested by the device
// additionally require at least two full Spice periods' worth of data
// before starting playback to minimise the chances of underrunning // before starting playback to minimise the chances of underrunning
int startFrames = int startFrames =
spiceData->periodFrames * 2 + audio.playback.deviceMaxPeriodFrames * 2; spiceData->periodFrames * 2 + audio.playback.deviceStartFrames;
audio.playback.deviceTargetStartFrames = startFrames; audio.playback.targetStartFrames = startFrames;
// The actual time between opening the device and the device starting to // The actual time between opening the device and the device starting to
// pull data can range anywhere between nearly instant and hundreds of // pull data can range anywhere between nearly instant and hundreds of

View File

@ -464,6 +464,15 @@ static struct Option options[] =
.type = OPTION_TYPE_BOOL, .type = OPTION_TYPE_BOOL,
.value.x_bool = true .value.x_bool = true
}, },
// audio options
{
.module = "audio",
.name = "periodSize",
.description = "Requested audio device period size in samples",
.type = OPTION_TYPE_INT,
.value.x_int = 2048
},
{0} {0}
}; };
@ -636,6 +645,8 @@ bool config_load(int argc, char * argv[])
g_params.showCursorDot = option_get_bool("spice", "showCursorDot"); g_params.showCursorDot = option_get_bool("spice", "showCursorDot");
} }
g_params.audioPeriodSize = option_get_int("audio", "periodSize");
return true; return true;
} }

View File

@ -199,6 +199,8 @@ struct AppParams
bool autoCapture; bool autoCapture;
bool captureInputOnly; bool captureInputOnly;
bool showCursorDot; bool showCursorDot;
int audioPeriodSize;
}; };
struct CBRequest struct CBRequest