mirror of
https://github.com/gnif/LookingGlass.git
synced 2024-11-25 06:47:19 +00:00
[client] audio: allow the audiodev to return the periodFrames
This change allows the audiodevs to return the minimum period frames needed to start playback instead of having to rely on a pull to obtain these details. Additionally we are using this information to select an initial start latency as well as to train the desired latency in order to keep it as low as possible.
This commit is contained in:
parent
dd2d84a080
commit
41884bfcc5
@ -104,6 +104,17 @@ static void pipewire_onPlaybackProcess(void * userdata)
|
|||||||
if (pw.playback.rateMatch && pw.playback.rateMatch->size > 0)
|
if (pw.playback.rateMatch && pw.playback.rateMatch->size > 0)
|
||||||
frames = min(frames, pw.playback.rateMatch->size);
|
frames = min(frames, pw.playback.rateMatch->size);
|
||||||
|
|
||||||
|
/* pipewire doesn't provide a way to access the quantum, so we start the
|
||||||
|
* stream and stop it immediately at setup to get this value */
|
||||||
|
if (pw.playback.startFrames == -1)
|
||||||
|
{
|
||||||
|
sbuf->datas[0].chunk->size = 0;
|
||||||
|
pw_stream_queue_buffer(pw.playback.stream, pbuf);
|
||||||
|
pw_stream_set_active(pw.playback.stream, false);
|
||||||
|
pw.playback.startFrames = frames;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
frames = pw.playback.pullFn(dst, frames);
|
frames = pw.playback.pullFn(dst, frames);
|
||||||
if (!frames)
|
if (!frames)
|
||||||
{
|
{
|
||||||
@ -179,7 +190,7 @@ static void pipewire_playbackStopStream(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void pipewire_playbackSetup(int channels, int sampleRate,
|
static void pipewire_playbackSetup(int channels, int sampleRate,
|
||||||
LG_AudioPullFn pullFn)
|
LG_AudioPullFn pullFn, int * periodFrames)
|
||||||
{
|
{
|
||||||
const struct spa_pod * params[1];
|
const struct spa_pod * params[1];
|
||||||
uint8_t buffer[1024];
|
uint8_t buffer[1024];
|
||||||
@ -209,7 +220,7 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
|
|||||||
pw.playback.sampleRate = sampleRate;
|
pw.playback.sampleRate = sampleRate;
|
||||||
pw.playback.stride = sizeof(float) * channels;
|
pw.playback.stride = sizeof(float) * channels;
|
||||||
pw.playback.pullFn = pullFn;
|
pw.playback.pullFn = pullFn;
|
||||||
pw.playback.startFrames = maxLatencyFrames;
|
pw.playback.startFrames = -1;
|
||||||
|
|
||||||
pw_thread_loop_lock(pw.thread);
|
pw_thread_loop_lock(pw.thread);
|
||||||
pw.playback.stream = pw_stream_new_simple(
|
pw.playback.stream = pw_stream_new_simple(
|
||||||
@ -247,19 +258,22 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
|
|||||||
PW_ID_ANY,
|
PW_ID_ANY,
|
||||||
PW_STREAM_FLAG_AUTOCONNECT |
|
PW_STREAM_FLAG_AUTOCONNECT |
|
||||||
PW_STREAM_FLAG_MAP_BUFFERS |
|
PW_STREAM_FLAG_MAP_BUFFERS |
|
||||||
PW_STREAM_FLAG_RT_PROCESS |
|
PW_STREAM_FLAG_RT_PROCESS,
|
||||||
PW_STREAM_FLAG_INACTIVE,
|
|
||||||
params, 1);
|
params, 1);
|
||||||
|
|
||||||
pw_thread_loop_unlock(pw.thread);
|
pw_thread_loop_unlock(pw.thread);
|
||||||
|
|
||||||
|
/* wait for the stream to start and set this value */
|
||||||
|
while(pw.playback.startFrames == -1)
|
||||||
|
pw_thread_loop_wait(pw.thread);
|
||||||
|
|
||||||
|
*periodFrames = pw.playback.startFrames;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pipewire_playbackStart(int framesBuffered)
|
static void pipewire_playbackStart(void)
|
||||||
{
|
{
|
||||||
if (!pw.playback.stream)
|
if (!pw.playback.stream)
|
||||||
return false;
|
return;
|
||||||
|
|
||||||
bool start = false;
|
|
||||||
|
|
||||||
if (pw.playback.state != STREAM_STATE_ACTIVE)
|
if (pw.playback.state != STREAM_STATE_ACTIVE)
|
||||||
{
|
{
|
||||||
@ -268,12 +282,8 @@ static bool pipewire_playbackStart(int framesBuffered)
|
|||||||
switch (pw.playback.state)
|
switch (pw.playback.state)
|
||||||
{
|
{
|
||||||
case STREAM_STATE_INACTIVE:
|
case STREAM_STATE_INACTIVE:
|
||||||
if (framesBuffered >= pw.playback.startFrames)
|
pw_stream_set_active(pw.playback.stream, true);
|
||||||
{
|
pw.playback.state = STREAM_STATE_ACTIVE;
|
||||||
pw_stream_set_active(pw.playback.stream, true);
|
|
||||||
pw.playback.state = STREAM_STATE_ACTIVE;
|
|
||||||
start = true;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case STREAM_STATE_DRAINING:
|
case STREAM_STATE_DRAINING:
|
||||||
@ -287,8 +297,6 @@ static bool pipewire_playbackStart(int framesBuffered)
|
|||||||
|
|
||||||
pw_thread_loop_unlock(pw.thread);
|
pw_thread_loop_unlock(pw.thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pipewire_playbackStop(void)
|
static void pipewire_playbackStop(void)
|
||||||
|
@ -37,7 +37,6 @@ struct PulseAudio
|
|||||||
int sinkIndex;
|
int sinkIndex;
|
||||||
bool sinkCorked;
|
bool sinkCorked;
|
||||||
bool sinkMuted;
|
bool sinkMuted;
|
||||||
int sinkStart;
|
|
||||||
int sinkSampleRate;
|
int sinkSampleRate;
|
||||||
int sinkChannels;
|
int sinkChannels;
|
||||||
int sinkStride;
|
int sinkStride;
|
||||||
@ -246,7 +245,7 @@ static void pulseaudio_overflow_cb(pa_stream * p, void * userdata)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void pulseaudio_setup(int channels, int sampleRate,
|
static void pulseaudio_setup(int channels, int sampleRate,
|
||||||
LG_AudioPullFn pullFn)
|
LG_AudioPullFn pullFn, int * periodFrames)
|
||||||
{
|
{
|
||||||
if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate)
|
if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate)
|
||||||
return;
|
return;
|
||||||
@ -286,26 +285,21 @@ static void pulseaudio_setup(int channels, int sampleRate,
|
|||||||
|
|
||||||
pa.sinkStride = channels * sizeof(float);
|
pa.sinkStride = channels * sizeof(float);
|
||||||
pa.sinkPullFn = pullFn;
|
pa.sinkPullFn = pullFn;
|
||||||
pa.sinkStart = attribs.tlength / pa.sinkStride;
|
|
||||||
pa.sinkCorked = true;
|
pa.sinkCorked = true;
|
||||||
|
*periodFrames = attribs.tlength / pa.sinkStride;
|
||||||
|
|
||||||
pa_threaded_mainloop_unlock(pa.loop);
|
pa_threaded_mainloop_unlock(pa.loop);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pulseaudio_start(int framesBuffered)
|
static void pulseaudio_start(void)
|
||||||
{
|
{
|
||||||
if (!pa.sink)
|
if (!pa.sink)
|
||||||
return false;
|
return;
|
||||||
|
|
||||||
if (framesBuffered < pa.sinkStart)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
pa_threaded_mainloop_lock(pa.loop);
|
pa_threaded_mainloop_lock(pa.loop);
|
||||||
pa_stream_cork(pa.sink, 0, NULL, NULL);
|
pa_stream_cork(pa.sink, 0, NULL, NULL);
|
||||||
pa.sinkCorked = false;
|
pa.sinkCorked = false;
|
||||||
pa_threaded_mainloop_unlock(pa.loop);
|
pa_threaded_mainloop_unlock(pa.loop);
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pulseaudio_stop(void)
|
static void pulseaudio_stop(void)
|
||||||
|
@ -47,11 +47,11 @@ struct LG_AudioDevOps
|
|||||||
/* setup the stream for playback but don't start it yet
|
/* setup the stream for playback but don't start it yet
|
||||||
* Note: the pull function returns f32 samples
|
* Note: the pull function returns f32 samples
|
||||||
*/
|
*/
|
||||||
void (*setup)(int channels, int sampleRate, LG_AudioPullFn pullFn);
|
void (*setup)(int channels, int sampleRate, LG_AudioPullFn pullFn,
|
||||||
|
int * periodFrames);
|
||||||
|
|
||||||
/* called when there is data available to start playback
|
/* called when there is data available to start playback */
|
||||||
* return true if playback should start */
|
void (*start)();
|
||||||
bool (*start)(int framesBuffered);
|
|
||||||
|
|
||||||
/* called when SPICE reports the audio stream has stopped */
|
/* called when SPICE reports the audio stream has stopped */
|
||||||
void (*stop)(void);
|
void (*stop)(void);
|
||||||
|
@ -107,6 +107,7 @@ typedef struct
|
|||||||
// avoid false sharing
|
// avoid false sharing
|
||||||
alignas(64) PlaybackDeviceData deviceData;
|
alignas(64) PlaybackDeviceData deviceData;
|
||||||
alignas(64) PlaybackSpiceData spiceData;
|
alignas(64) PlaybackSpiceData spiceData;
|
||||||
|
int targetLatencyFrames;
|
||||||
}
|
}
|
||||||
playback;
|
playback;
|
||||||
|
|
||||||
@ -218,15 +219,16 @@ static int playbackPullFrames(uint8_t * dst, int frames)
|
|||||||
|
|
||||||
if (audio.playback.buffer)
|
if (audio.playback.buffer)
|
||||||
{
|
{
|
||||||
|
static bool first = true;
|
||||||
// Measure the device clock and post to the Spice thread
|
// Measure the device clock and post to the Spice thread
|
||||||
if (frames != data->periodFrames)
|
if (frames != data->periodFrames || first)
|
||||||
{
|
{
|
||||||
bool init = data->periodFrames == 0;
|
if (first)
|
||||||
if (init)
|
{
|
||||||
data->nextTime = now;
|
data->nextTime = now;
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
|
||||||
data->periodFrames = frames;
|
|
||||||
data->periodSec = (double) frames / audio.playback.sampleRate;
|
|
||||||
data->nextTime += llrint(data->periodSec * 1.0e9);
|
data->nextTime += llrint(data->periodSec * 1.0e9);
|
||||||
data->nextPosition += frames;
|
data->nextPosition += frames;
|
||||||
|
|
||||||
@ -314,10 +316,8 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
|||||||
audio.playback.stride = channels * sizeof(float);
|
audio.playback.stride = channels * sizeof(float);
|
||||||
audio.playback.state = STREAM_STATE_SETUP;
|
audio.playback.state = STREAM_STATE_SETUP;
|
||||||
|
|
||||||
audio.playback.deviceData.periodFrames = 0;
|
|
||||||
audio.playback.deviceData.nextPosition = 0;
|
audio.playback.deviceData.nextPosition = 0;
|
||||||
|
|
||||||
audio.playback.spiceData.periodFrames = 0;
|
|
||||||
audio.playback.spiceData.nextPosition = 0;
|
audio.playback.spiceData.nextPosition = 0;
|
||||||
audio.playback.spiceData.devLastTime = INT64_MIN;
|
audio.playback.spiceData.devLastTime = INT64_MIN;
|
||||||
audio.playback.spiceData.devNextTime = INT64_MIN;
|
audio.playback.spiceData.devNextTime = INT64_MIN;
|
||||||
@ -325,7 +325,14 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
|||||||
audio.playback.spiceData.offsetErrorIntegral = 0.0;
|
audio.playback.spiceData.offsetErrorIntegral = 0.0;
|
||||||
audio.playback.spiceData.ratioIntegral = 0.0;
|
audio.playback.spiceData.ratioIntegral = 0.0;
|
||||||
|
|
||||||
audio.audioDev->playback.setup(channels, sampleRate, playbackPullFrames);
|
int frames;
|
||||||
|
audio.audioDev->playback.setup(channels, sampleRate, playbackPullFrames,
|
||||||
|
&frames);
|
||||||
|
|
||||||
|
audio.playback.deviceData.periodFrames = frames;
|
||||||
|
audio.playback.targetLatencyFrames = frames;
|
||||||
|
audio.playback.deviceData.periodSec =
|
||||||
|
(double)frames / audio.playback.sampleRate;
|
||||||
|
|
||||||
// if a volume level was stored, set it before we return
|
// if a volume level was stored, set it before we return
|
||||||
if (audio.playback.volumeChannels)
|
if (audio.playback.volumeChannels)
|
||||||
@ -394,7 +401,8 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
if (!STREAM_ACTIVE(audio.playback.state))
|
if (!STREAM_ACTIVE(audio.playback.state))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
PlaybackSpiceData * spiceData = &audio.playback.spiceData;
|
PlaybackSpiceData * spiceData = &audio.playback.spiceData;
|
||||||
|
PlaybackDeviceData * devData = &audio.playback.deviceData;
|
||||||
int64_t now = nanotime();
|
int64_t now = nanotime();
|
||||||
|
|
||||||
// Convert from s16 to f32 samples
|
// Convert from s16 to f32 samples
|
||||||
@ -431,6 +439,15 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
spiceData->devNextPosition = deviceTick.nextPosition;
|
spiceData->devNextPosition = deviceTick.nextPosition;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the buffer is getting too empty increase the target latency
|
||||||
|
static bool checkFill = false;
|
||||||
|
if (checkFill && audio.playback.state == STREAM_STATE_RUN &&
|
||||||
|
ringbuffer_getCount(audio.playback.buffer) < devData->periodFrames)
|
||||||
|
{
|
||||||
|
audio.playback.targetLatencyFrames += devData->periodFrames;
|
||||||
|
checkFill = false;
|
||||||
|
}
|
||||||
|
|
||||||
// Measure the Spice audio clock
|
// Measure the Spice audio clock
|
||||||
int64_t curTime;
|
int64_t curTime;
|
||||||
int64_t curPosition;
|
int64_t curPosition;
|
||||||
@ -492,17 +509,8 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
((double) (curTime - spiceData->devLastTime) /
|
((double) (curTime - spiceData->devLastTime) /
|
||||||
(spiceData->devNextTime - spiceData->devLastTime));
|
(spiceData->devNextTime - spiceData->devLastTime));
|
||||||
|
|
||||||
// Target latency derived experimentally to avoid underruns. This could be
|
|
||||||
// reduced with more tuning. We could adjust on the fly based upon the
|
|
||||||
// device period size, but that would result in underruns if the period size
|
|
||||||
// suddenly increases. It may be better instead to just reduce the maximum
|
|
||||||
// latency on the audio devices, which currently is set quite high
|
|
||||||
int targetLatencyMs = 70;
|
|
||||||
int targetLatencyFrames =
|
|
||||||
targetLatencyMs * audio.playback.sampleRate / 1000;
|
|
||||||
|
|
||||||
double actualOffset = curPosition - devPosition;
|
double actualOffset = curPosition - devPosition;
|
||||||
double actualOffsetError = -(actualOffset - targetLatencyFrames);
|
double actualOffsetError = -(actualOffset - audio.playback.targetLatencyFrames);
|
||||||
|
|
||||||
double error = actualOffsetError - offsetError;
|
double error = actualOffsetError - offsetError;
|
||||||
spiceData->offsetError += spiceData->b * error +
|
spiceData->offsetError += spiceData->b * error +
|
||||||
@ -551,9 +559,18 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
if (audio.playback.state == STREAM_STATE_SETUP)
|
if (audio.playback.state == STREAM_STATE_SETUP)
|
||||||
{
|
{
|
||||||
frames = ringbuffer_getCount(audio.playback.buffer);
|
frames = ringbuffer_getCount(audio.playback.buffer);
|
||||||
if (audio.audioDev->playback.start(frames))
|
if (frames >= max(devData->periodFrames,
|
||||||
|
ringbuffer_getLength(audio.playback.buffer) / 20))
|
||||||
|
{
|
||||||
audio.playback.state = STREAM_STATE_RUN;
|
audio.playback.state = STREAM_STATE_RUN;
|
||||||
|
audio.audioDev->playback.start();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// re-arm the buffer fill check if we have buffered enough
|
||||||
|
if (!checkFill && ringbuffer_getCount(audio.playback.buffer) >=
|
||||||
|
audio.playback.targetLatencyFrames)
|
||||||
|
checkFill = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool audio_supportsRecord(void)
|
bool audio_supportsRecord(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user