mirror of
https://github.com/gnif/LookingGlass.git
synced 2024-11-25 23:07:18 +00:00
Revert "[client] audio: allow the audiodev to return the periodFrames"
This reverts commit 41884bfcc5
.
PipeWire can change it's period size on the fly on us making this
approach invalid.
This commit is contained in:
parent
c2a766c2ee
commit
a0477466d2
@ -104,17 +104,6 @@ static void pipewire_onPlaybackProcess(void * userdata)
|
|||||||
if (pw.playback.rateMatch && pw.playback.rateMatch->size > 0)
|
if (pw.playback.rateMatch && pw.playback.rateMatch->size > 0)
|
||||||
frames = min(frames, pw.playback.rateMatch->size);
|
frames = min(frames, pw.playback.rateMatch->size);
|
||||||
|
|
||||||
/* pipewire doesn't provide a way to access the quantum, so we start the
|
|
||||||
* stream and stop it immediately at setup to get this value */
|
|
||||||
if (pw.playback.startFrames == -1)
|
|
||||||
{
|
|
||||||
sbuf->datas[0].chunk->size = 0;
|
|
||||||
pw_stream_queue_buffer(pw.playback.stream, pbuf);
|
|
||||||
pw_stream_set_active(pw.playback.stream, false);
|
|
||||||
pw.playback.startFrames = frames;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
frames = pw.playback.pullFn(dst, frames);
|
frames = pw.playback.pullFn(dst, frames);
|
||||||
if (!frames)
|
if (!frames)
|
||||||
{
|
{
|
||||||
@ -190,7 +179,7 @@ static void pipewire_playbackStopStream(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void pipewire_playbackSetup(int channels, int sampleRate,
|
static void pipewire_playbackSetup(int channels, int sampleRate,
|
||||||
LG_AudioPullFn pullFn, int * periodFrames)
|
LG_AudioPullFn pullFn)
|
||||||
{
|
{
|
||||||
const struct spa_pod * params[1];
|
const struct spa_pod * params[1];
|
||||||
uint8_t buffer[1024];
|
uint8_t buffer[1024];
|
||||||
@ -220,7 +209,7 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
|
|||||||
pw.playback.sampleRate = sampleRate;
|
pw.playback.sampleRate = sampleRate;
|
||||||
pw.playback.stride = sizeof(float) * channels;
|
pw.playback.stride = sizeof(float) * channels;
|
||||||
pw.playback.pullFn = pullFn;
|
pw.playback.pullFn = pullFn;
|
||||||
pw.playback.startFrames = -1;
|
pw.playback.startFrames = maxLatencyFrames;
|
||||||
|
|
||||||
pw_thread_loop_lock(pw.thread);
|
pw_thread_loop_lock(pw.thread);
|
||||||
pw.playback.stream = pw_stream_new_simple(
|
pw.playback.stream = pw_stream_new_simple(
|
||||||
@ -258,22 +247,19 @@ static void pipewire_playbackSetup(int channels, int sampleRate,
|
|||||||
PW_ID_ANY,
|
PW_ID_ANY,
|
||||||
PW_STREAM_FLAG_AUTOCONNECT |
|
PW_STREAM_FLAG_AUTOCONNECT |
|
||||||
PW_STREAM_FLAG_MAP_BUFFERS |
|
PW_STREAM_FLAG_MAP_BUFFERS |
|
||||||
PW_STREAM_FLAG_RT_PROCESS,
|
PW_STREAM_FLAG_RT_PROCESS |
|
||||||
|
PW_STREAM_FLAG_INACTIVE,
|
||||||
params, 1);
|
params, 1);
|
||||||
|
|
||||||
pw_thread_loop_unlock(pw.thread);
|
pw_thread_loop_unlock(pw.thread);
|
||||||
|
|
||||||
/* wait for the stream to start and set this value */
|
|
||||||
while(pw.playback.startFrames == -1)
|
|
||||||
pw_thread_loop_wait(pw.thread);
|
|
||||||
|
|
||||||
*periodFrames = pw.playback.startFrames;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pipewire_playbackStart(void)
|
static bool pipewire_playbackStart(int framesBuffered)
|
||||||
{
|
{
|
||||||
if (!pw.playback.stream)
|
if (!pw.playback.stream)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
|
bool start = false;
|
||||||
|
|
||||||
if (pw.playback.state != STREAM_STATE_ACTIVE)
|
if (pw.playback.state != STREAM_STATE_ACTIVE)
|
||||||
{
|
{
|
||||||
@ -282,8 +268,12 @@ static void pipewire_playbackStart(void)
|
|||||||
switch (pw.playback.state)
|
switch (pw.playback.state)
|
||||||
{
|
{
|
||||||
case STREAM_STATE_INACTIVE:
|
case STREAM_STATE_INACTIVE:
|
||||||
|
if (framesBuffered >= pw.playback.startFrames)
|
||||||
|
{
|
||||||
pw_stream_set_active(pw.playback.stream, true);
|
pw_stream_set_active(pw.playback.stream, true);
|
||||||
pw.playback.state = STREAM_STATE_ACTIVE;
|
pw.playback.state = STREAM_STATE_ACTIVE;
|
||||||
|
start = true;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case STREAM_STATE_DRAINING:
|
case STREAM_STATE_DRAINING:
|
||||||
@ -297,6 +287,8 @@ static void pipewire_playbackStart(void)
|
|||||||
|
|
||||||
pw_thread_loop_unlock(pw.thread);
|
pw_thread_loop_unlock(pw.thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pipewire_playbackStop(void)
|
static void pipewire_playbackStop(void)
|
||||||
|
@ -37,6 +37,7 @@ struct PulseAudio
|
|||||||
int sinkIndex;
|
int sinkIndex;
|
||||||
bool sinkCorked;
|
bool sinkCorked;
|
||||||
bool sinkMuted;
|
bool sinkMuted;
|
||||||
|
int sinkStart;
|
||||||
int sinkSampleRate;
|
int sinkSampleRate;
|
||||||
int sinkChannels;
|
int sinkChannels;
|
||||||
int sinkStride;
|
int sinkStride;
|
||||||
@ -245,7 +246,7 @@ static void pulseaudio_overflow_cb(pa_stream * p, void * userdata)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void pulseaudio_setup(int channels, int sampleRate,
|
static void pulseaudio_setup(int channels, int sampleRate,
|
||||||
LG_AudioPullFn pullFn, int * periodFrames)
|
LG_AudioPullFn pullFn)
|
||||||
{
|
{
|
||||||
if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate)
|
if (pa.sink && pa.sinkChannels == channels && pa.sinkSampleRate == sampleRate)
|
||||||
return;
|
return;
|
||||||
@ -285,21 +286,26 @@ static void pulseaudio_setup(int channels, int sampleRate,
|
|||||||
|
|
||||||
pa.sinkStride = channels * sizeof(float);
|
pa.sinkStride = channels * sizeof(float);
|
||||||
pa.sinkPullFn = pullFn;
|
pa.sinkPullFn = pullFn;
|
||||||
|
pa.sinkStart = attribs.tlength / pa.sinkStride;
|
||||||
pa.sinkCorked = true;
|
pa.sinkCorked = true;
|
||||||
*periodFrames = attribs.tlength / pa.sinkStride;
|
|
||||||
|
|
||||||
pa_threaded_mainloop_unlock(pa.loop);
|
pa_threaded_mainloop_unlock(pa.loop);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pulseaudio_start(void)
|
static bool pulseaudio_start(int framesBuffered)
|
||||||
{
|
{
|
||||||
if (!pa.sink)
|
if (!pa.sink)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
|
if (framesBuffered < pa.sinkStart)
|
||||||
|
return false;
|
||||||
|
|
||||||
pa_threaded_mainloop_lock(pa.loop);
|
pa_threaded_mainloop_lock(pa.loop);
|
||||||
pa_stream_cork(pa.sink, 0, NULL, NULL);
|
pa_stream_cork(pa.sink, 0, NULL, NULL);
|
||||||
pa.sinkCorked = false;
|
pa.sinkCorked = false;
|
||||||
pa_threaded_mainloop_unlock(pa.loop);
|
pa_threaded_mainloop_unlock(pa.loop);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pulseaudio_stop(void)
|
static void pulseaudio_stop(void)
|
||||||
|
@ -47,11 +47,11 @@ struct LG_AudioDevOps
|
|||||||
/* setup the stream for playback but don't start it yet
|
/* setup the stream for playback but don't start it yet
|
||||||
* Note: the pull function returns f32 samples
|
* Note: the pull function returns f32 samples
|
||||||
*/
|
*/
|
||||||
void (*setup)(int channels, int sampleRate, LG_AudioPullFn pullFn,
|
void (*setup)(int channels, int sampleRate, LG_AudioPullFn pullFn);
|
||||||
int * periodFrames);
|
|
||||||
|
|
||||||
/* called when there is data available to start playback */
|
/* called when there is data available to start playback
|
||||||
void (*start)();
|
* return true if playback should start */
|
||||||
|
bool (*start)(int framesBuffered);
|
||||||
|
|
||||||
/* called when SPICE reports the audio stream has stopped */
|
/* called when SPICE reports the audio stream has stopped */
|
||||||
void (*stop)(void);
|
void (*stop)(void);
|
||||||
|
@ -109,7 +109,6 @@ typedef struct
|
|||||||
// avoid false sharing
|
// avoid false sharing
|
||||||
alignas(64) PlaybackDeviceData deviceData;
|
alignas(64) PlaybackDeviceData deviceData;
|
||||||
alignas(64) PlaybackSpiceData spiceData;
|
alignas(64) PlaybackSpiceData spiceData;
|
||||||
int targetLatencyFrames;
|
|
||||||
}
|
}
|
||||||
playback;
|
playback;
|
||||||
|
|
||||||
@ -221,16 +220,15 @@ static int playbackPullFrames(uint8_t * dst, int frames)
|
|||||||
|
|
||||||
if (audio.playback.buffer)
|
if (audio.playback.buffer)
|
||||||
{
|
{
|
||||||
static bool first = true;
|
|
||||||
// Measure the device clock and post to the Spice thread
|
// Measure the device clock and post to the Spice thread
|
||||||
if (frames != data->periodFrames || first)
|
if (frames != data->periodFrames)
|
||||||
{
|
|
||||||
if (first)
|
|
||||||
{
|
{
|
||||||
|
bool init = data->periodFrames == 0;
|
||||||
|
if (init)
|
||||||
data->nextTime = now;
|
data->nextTime = now;
|
||||||
first = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
data->periodFrames = frames;
|
||||||
|
data->periodSec = (double) frames / audio.playback.sampleRate;
|
||||||
data->nextTime += llrint(data->periodSec * 1.0e9);
|
data->nextTime += llrint(data->periodSec * 1.0e9);
|
||||||
data->nextPosition += frames;
|
data->nextPosition += frames;
|
||||||
|
|
||||||
@ -319,6 +317,7 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
|||||||
audio.playback.stride = channels * sizeof(float);
|
audio.playback.stride = channels * sizeof(float);
|
||||||
audio.playback.state = STREAM_STATE_SETUP;
|
audio.playback.state = STREAM_STATE_SETUP;
|
||||||
|
|
||||||
|
audio.playback.deviceData.periodFrames = 0;
|
||||||
audio.playback.deviceData.nextPosition = 0;
|
audio.playback.deviceData.nextPosition = 0;
|
||||||
|
|
||||||
audio.playback.spiceData.periodFrames = 0;
|
audio.playback.spiceData.periodFrames = 0;
|
||||||
@ -329,14 +328,7 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
|||||||
audio.playback.spiceData.offsetErrorIntegral = 0.0;
|
audio.playback.spiceData.offsetErrorIntegral = 0.0;
|
||||||
audio.playback.spiceData.ratioIntegral = 0.0;
|
audio.playback.spiceData.ratioIntegral = 0.0;
|
||||||
|
|
||||||
int frames;
|
audio.audioDev->playback.setup(channels, sampleRate, playbackPullFrames);
|
||||||
audio.audioDev->playback.setup(channels, sampleRate, playbackPullFrames,
|
|
||||||
&frames);
|
|
||||||
|
|
||||||
audio.playback.deviceData.periodFrames = frames;
|
|
||||||
audio.playback.targetLatencyFrames = frames;
|
|
||||||
audio.playback.deviceData.periodSec =
|
|
||||||
(double)frames / audio.playback.sampleRate;
|
|
||||||
|
|
||||||
// if a volume level was stored, set it before we return
|
// if a volume level was stored, set it before we return
|
||||||
if (audio.playback.volumeChannels)
|
if (audio.playback.volumeChannels)
|
||||||
@ -406,7 +398,6 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
PlaybackSpiceData * spiceData = &audio.playback.spiceData;
|
PlaybackSpiceData * spiceData = &audio.playback.spiceData;
|
||||||
PlaybackDeviceData * devData = &audio.playback.deviceData;
|
|
||||||
int64_t now = nanotime();
|
int64_t now = nanotime();
|
||||||
|
|
||||||
// Convert from s16 to f32 samples
|
// Convert from s16 to f32 samples
|
||||||
@ -455,15 +446,6 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
spiceData->devNextPosition = deviceTick.nextPosition;
|
spiceData->devNextPosition = deviceTick.nextPosition;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the buffer is getting too empty increase the target latency
|
|
||||||
static bool checkFill = false;
|
|
||||||
if (checkFill && audio.playback.state == STREAM_STATE_RUN &&
|
|
||||||
ringbuffer_getCount(audio.playback.buffer) < devData->periodFrames)
|
|
||||||
{
|
|
||||||
audio.playback.targetLatencyFrames += devData->periodFrames;
|
|
||||||
checkFill = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Measure the Spice audio clock
|
// Measure the Spice audio clock
|
||||||
int64_t curTime;
|
int64_t curTime;
|
||||||
int64_t curPosition;
|
int64_t curPosition;
|
||||||
@ -525,8 +507,17 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
((double) (curTime - spiceData->devLastTime) /
|
((double) (curTime - spiceData->devLastTime) /
|
||||||
(spiceData->devNextTime - spiceData->devLastTime));
|
(spiceData->devNextTime - spiceData->devLastTime));
|
||||||
|
|
||||||
|
// Target latency derived experimentally to avoid underruns. This could be
|
||||||
|
// reduced with more tuning. We could adjust on the fly based upon the
|
||||||
|
// device period size, but that would result in underruns if the period size
|
||||||
|
// suddenly increases. It may be better instead to just reduce the maximum
|
||||||
|
// latency on the audio devices, which currently is set quite high
|
||||||
|
int targetLatencyMs = 70;
|
||||||
|
int targetLatencyFrames =
|
||||||
|
targetLatencyMs * audio.playback.sampleRate / 1000;
|
||||||
|
|
||||||
double actualOffset = curPosition - devPosition;
|
double actualOffset = curPosition - devPosition;
|
||||||
double actualOffsetError = -(actualOffset - audio.playback.targetLatencyFrames);
|
double actualOffsetError = -(actualOffset - targetLatencyFrames);
|
||||||
|
|
||||||
double error = actualOffsetError - offsetError;
|
double error = actualOffsetError - offsetError;
|
||||||
spiceData->offsetError += spiceData->b * error +
|
spiceData->offsetError += spiceData->b * error +
|
||||||
@ -577,18 +568,9 @@ void audio_playbackData(uint8_t * data, size_t size)
|
|||||||
if (audio.playback.state == STREAM_STATE_SETUP)
|
if (audio.playback.state == STREAM_STATE_SETUP)
|
||||||
{
|
{
|
||||||
frames = ringbuffer_getCount(audio.playback.buffer);
|
frames = ringbuffer_getCount(audio.playback.buffer);
|
||||||
if (frames >= max(devData->periodFrames,
|
if (audio.audioDev->playback.start(frames))
|
||||||
ringbuffer_getLength(audio.playback.buffer) / 20))
|
|
||||||
{
|
|
||||||
audio.playback.state = STREAM_STATE_RUN;
|
audio.playback.state = STREAM_STATE_RUN;
|
||||||
audio.audioDev->playback.start();
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// re-arm the buffer fill check if we have buffered enough
|
|
||||||
if (!checkFill && ringbuffer_getCount(audio.playback.buffer) >=
|
|
||||||
audio.playback.targetLatencyFrames)
|
|
||||||
checkFill = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool audio_supportsRecord(void)
|
bool audio_supportsRecord(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user