mirror of
https://github.com/gnif/LookingGlass.git
synced 2024-11-22 05:27:20 +00:00
[common] ringbuffer: add unbounded mode
In unbounded mode, the read and write pointers are free to move independently of one another. This is useful where the input and output streams are progressing at the same rate on average, and we want to keep the latency stable in the event than an underrun or overrun occurs. If an underrun occurs (i.e., there is not enough data in the buffer to satisfy a read request), the missing values with be filled with zeros. When the writer catches up, the same number of values will be skipped from the input. If an overrun occurs (i.e., there is not enough free space in the buffer to satisfy a write request), excess values will be discarded. When the reader catches up, the same number of values will be zeroed in the output. Unbounded mode is currently unused since our audio input and output streams are not synchronised. This will be implemented in a later commit. Also reimplemented as a lock-free queue which is safer for use in audio device callbacks.
This commit is contained in:
parent
b34b253814
commit
599fdd6ffd
@ -143,18 +143,7 @@ static void playbackStopNL(void)
|
|||||||
static int playbackPullFrames(uint8_t * dst, int frames)
|
static int playbackPullFrames(uint8_t * dst, int frames)
|
||||||
{
|
{
|
||||||
if (audio.playback.buffer)
|
if (audio.playback.buffer)
|
||||||
{
|
frames = ringbuffer_consume(audio.playback.buffer, dst, frames);
|
||||||
frames = min(frames, ringbuffer_getCount(audio.playback.buffer));
|
|
||||||
for(int fetched = 0; fetched < frames; )
|
|
||||||
{
|
|
||||||
int copy = frames - fetched;
|
|
||||||
uint8_t * src = ringbuffer_consume(audio.playback.buffer, ©);
|
|
||||||
|
|
||||||
memcpy(dst, src, copy * audio.playback.stride);
|
|
||||||
dst += copy * audio.playback.stride;
|
|
||||||
fetched += copy;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
frames = 0;
|
frames = 0;
|
||||||
|
|
||||||
@ -183,6 +172,10 @@ void audio_playbackStart(int channels, int sampleRate, PSAudioFormat format,
|
|||||||
playbackStopNL();
|
playbackStopNL();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Using a bounded ring buffer for now. We are not currently doing anything to
|
||||||
|
// keep the input and output in sync, so if we were using an unbounded buffer,
|
||||||
|
// it would eventually end up in a state of permanent underrun or overrun and
|
||||||
|
// the user would only hear silence
|
||||||
const int bufferFrames = sampleRate;
|
const int bufferFrames = sampleRate;
|
||||||
audio.playback.buffer = ringbuffer_new(bufferFrames,
|
audio.playback.buffer = ringbuffer_new(bufferFrames,
|
||||||
channels * sizeof(uint16_t));
|
channels * sizeof(uint16_t));
|
||||||
|
@ -25,9 +25,24 @@ typedef struct RingBuffer * RingBuffer;
|
|||||||
|
|
||||||
RingBuffer ringbuffer_new(int length, size_t valueSize);
|
RingBuffer ringbuffer_new(int length, size_t valueSize);
|
||||||
|
|
||||||
|
/* In an unbounded ring buffer, the read and write pointers are free to move
|
||||||
|
* independently of one another. This is useful if your input and output streams
|
||||||
|
* are progressing at the same rate on average, and you want to keep the
|
||||||
|
* latency stable in the event than an underrun or overrun occurs.
|
||||||
|
*
|
||||||
|
* If an underrun occurs (i.e., there is not enough data in the buffer to
|
||||||
|
* satisfy a read request), the missing values with be filled with zeros. When
|
||||||
|
* the writer catches up, the same number of values will be skipped from the
|
||||||
|
* input.
|
||||||
|
*
|
||||||
|
* If an overrun occurs (i.e., there is not enough free space in the buffer to
|
||||||
|
* satisfy a write request), excess values will be discarded. When the reader
|
||||||
|
* catches up, the same number of values will be zeroed in the output.
|
||||||
|
*/
|
||||||
|
RingBuffer ringbuffer_newUnbounded(int length, size_t valueSize);
|
||||||
|
|
||||||
void ringbuffer_free(RingBuffer * rb);
|
void ringbuffer_free(RingBuffer * rb);
|
||||||
void ringbuffer_push(RingBuffer rb, const void * value);
|
void ringbuffer_push(RingBuffer rb, const void * value);
|
||||||
bool ringbuffer_shift(RingBuffer rb, void * dst);
|
|
||||||
void ringbuffer_reset(RingBuffer rb);
|
void ringbuffer_reset(RingBuffer rb);
|
||||||
|
|
||||||
/* Note that the following functions are NOT thread-safe */
|
/* Note that the following functions are NOT thread-safe */
|
||||||
@ -35,23 +50,22 @@ int ringbuffer_getLength(const RingBuffer rb);
|
|||||||
int ringbuffer_getStart (const RingBuffer rb);
|
int ringbuffer_getStart (const RingBuffer rb);
|
||||||
int ringbuffer_getCount (const RingBuffer rb);
|
int ringbuffer_getCount (const RingBuffer rb);
|
||||||
void * ringbuffer_getValues(const RingBuffer rb);
|
void * ringbuffer_getValues(const RingBuffer rb);
|
||||||
void * ringBuffer_getLastValue(const RingBuffer rb);
|
|
||||||
|
|
||||||
/* appends up to count values to the buffer returning the number of values
|
/* Appends up to count values to the buffer returning the number of values
|
||||||
* appended
|
* appended. If the buffer is unbounded, the return value is always count;
|
||||||
|
* excess values will be discarded if the buffer is full. Pass a null values
|
||||||
|
* pointer to write zeros to the buffer. Count may be negative in unbounded mode
|
||||||
|
* to seek backwards.
|
||||||
* Note: This function is thread-safe */
|
* Note: This function is thread-safe */
|
||||||
int ringbuffer_append(const RingBuffer rb, const void * values, int count);
|
int ringbuffer_append(const RingBuffer rb, const void * values, int count);
|
||||||
|
|
||||||
/* consumes and returns up to *count values from the buffer setting *count to
|
/* Consumes up to count values from the buffer returning the number of values
|
||||||
* the number of valid values returned.
|
* consumed. If the buffer is unbounded, the return value is always count;
|
||||||
|
* excess values will be zeroed if there is not enough data in the buffer. Pass
|
||||||
|
* a null values pointer to move the read pointer without reading any data.
|
||||||
|
* Count may be negative in unbounded mode to seek backwards.
|
||||||
* Note: This function is thread-safe */
|
* Note: This function is thread-safe */
|
||||||
void * ringbuffer_consume(const RingBuffer rb, int * count);
|
int ringbuffer_consume(const RingBuffer rb, void * values, int count);
|
||||||
|
|
||||||
typedef void (*RingBufferValueFn)(void * value, void * udata);
|
|
||||||
|
|
||||||
// set a function to call before a value is about to be overwritten
|
|
||||||
void ringbuffer_setPreOverwriteFn(RingBuffer rb, RingBufferValueFn fn,
|
|
||||||
void * udata);
|
|
||||||
|
|
||||||
typedef bool (*RingBufferIterator)(int index, void * value, void * udata);
|
typedef bool (*RingBufferIterator)(int index, void * value, void * udata);
|
||||||
void ringbuffer_forEach(const RingBuffer rb, RingBufferIterator fn,
|
void ringbuffer_forEach(const RingBuffer rb, RingBufferIterator fn,
|
||||||
|
@ -19,79 +19,68 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "common/ringbuffer.h"
|
#include "common/ringbuffer.h"
|
||||||
#include "common/locking.h"
|
#include "common/debug.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
|
||||||
|
#include <stdatomic.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
struct RingBuffer
|
struct RingBuffer
|
||||||
{
|
{
|
||||||
RingBufferValueFn preOverwriteFn;
|
uint32_t length;
|
||||||
void * preOverwriteUdata;
|
uint32_t valueSize;
|
||||||
|
uint32_t readPos;
|
||||||
int length;
|
uint32_t writePos;
|
||||||
size_t valueSize;
|
bool unbounded;
|
||||||
LG_Lock lock;
|
char values[0];
|
||||||
int start, pos, count;
|
|
||||||
char values[0];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
RingBuffer ringbuffer_new(int length, size_t valueSize)
|
RingBuffer ringbuffer_newInternal(int length, size_t valueSize,
|
||||||
|
bool unbounded)
|
||||||
{
|
{
|
||||||
|
DEBUG_ASSERT(valueSize > 0 && valueSize < UINT32_MAX);
|
||||||
|
|
||||||
struct RingBuffer * rb = calloc(1, sizeof(*rb) + valueSize * length);
|
struct RingBuffer * rb = calloc(1, sizeof(*rb) + valueSize * length);
|
||||||
rb->length = length;
|
rb->length = length;
|
||||||
rb->valueSize = valueSize;
|
rb->valueSize = valueSize;
|
||||||
LG_LOCK_INIT(rb->lock);
|
rb->readPos = 0;
|
||||||
|
rb->writePos = 0;
|
||||||
|
rb->unbounded = unbounded;
|
||||||
return rb;
|
return rb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
RingBuffer ringbuffer_new(int length, size_t valueSize)
|
||||||
|
{
|
||||||
|
return ringbuffer_newInternal(length, valueSize, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
RingBuffer ringbuffer_newUnbounded(int length, size_t valueSize)
|
||||||
|
{
|
||||||
|
return ringbuffer_newInternal(length, valueSize, true);
|
||||||
|
}
|
||||||
|
|
||||||
void ringbuffer_free(RingBuffer * rb)
|
void ringbuffer_free(RingBuffer * rb)
|
||||||
{
|
{
|
||||||
if (!*rb)
|
if (!*rb)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
LG_LOCK_FREE(rb->lock);
|
|
||||||
free(*rb);
|
free(*rb);
|
||||||
*rb = NULL;
|
*rb = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ringbuffer_push(RingBuffer rb, const void * value)
|
void ringbuffer_push(RingBuffer rb, const void * value)
|
||||||
{
|
{
|
||||||
void * dst = rb->values + rb->pos * rb->valueSize;
|
if (!rb->unbounded && ringbuffer_getCount(rb) == rb->length)
|
||||||
if (rb->count < rb->length)
|
ringbuffer_consume(rb, NULL, 1);
|
||||||
++rb->count;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (++rb->start == rb->length)
|
|
||||||
rb->start = 0;
|
|
||||||
|
|
||||||
if (rb->preOverwriteFn)
|
ringbuffer_append(rb, value, 1);
|
||||||
rb->preOverwriteFn(dst, rb->preOverwriteUdata);
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(dst, value, rb->valueSize);
|
|
||||||
if (++rb->pos == rb->length)
|
|
||||||
rb->pos = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ringbuffer_shift(RingBuffer rb, void * dst)
|
|
||||||
{
|
|
||||||
if (rb->count == 0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
memcpy(dst, rb->values + rb->start * rb->valueSize, rb->valueSize);
|
|
||||||
--rb->count;
|
|
||||||
if (++rb->start == rb->length)
|
|
||||||
rb->start = 0;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ringbuffer_reset(RingBuffer rb)
|
void ringbuffer_reset(RingBuffer rb)
|
||||||
{
|
{
|
||||||
rb->start = 0;
|
atomic_store(&rb->readPos, 0);
|
||||||
rb->pos = 0;
|
atomic_store(&rb->writePos, 0);
|
||||||
rb->count = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ringbuffer_getLength(const RingBuffer rb)
|
int ringbuffer_getLength(const RingBuffer rb)
|
||||||
@ -101,12 +90,15 @@ int ringbuffer_getLength(const RingBuffer rb)
|
|||||||
|
|
||||||
int ringbuffer_getStart(const RingBuffer rb)
|
int ringbuffer_getStart(const RingBuffer rb)
|
||||||
{
|
{
|
||||||
return rb->start;
|
return atomic_load(&rb->readPos) % rb->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ringbuffer_getCount(const RingBuffer rb)
|
int ringbuffer_getCount(const RingBuffer rb)
|
||||||
{
|
{
|
||||||
return rb->count;
|
uint32_t writePos = atomic_load(&rb->writePos);
|
||||||
|
uint32_t readPos = atomic_load(&rb->readPos);
|
||||||
|
|
||||||
|
return writePos - readPos;
|
||||||
}
|
}
|
||||||
|
|
||||||
void * ringbuffer_getValues(const RingBuffer rb)
|
void * ringbuffer_getValues(const RingBuffer rb)
|
||||||
@ -114,113 +106,192 @@ void * ringbuffer_getValues(const RingBuffer rb)
|
|||||||
return rb->values;
|
return rb->values;
|
||||||
}
|
}
|
||||||
|
|
||||||
void * ringBuffer_getLastValue(const RingBuffer rb)
|
|
||||||
{
|
|
||||||
if (rb->count == 0)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
int index = rb->start + rb->count - 1;
|
|
||||||
if (index >= rb->length)
|
|
||||||
index -= rb->length;
|
|
||||||
|
|
||||||
return rb->values + index * rb->valueSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ringbuffer_append(const RingBuffer rb, const void * values, int count)
|
int ringbuffer_append(const RingBuffer rb, const void * values, int count)
|
||||||
{
|
{
|
||||||
if (count == 0)
|
if (count == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
LG_LOCK(rb->lock);
|
// Seeking backwards is only supported in unbounded mode at the moment
|
||||||
if (count > rb->length - rb->count)
|
if (count < 0 && !rb->unbounded)
|
||||||
count = rb->length - rb->count;
|
return 0;
|
||||||
|
|
||||||
const char * p = (const char *)values;
|
uint32_t readPos = atomic_load_explicit(&rb->readPos, memory_order_acquire);
|
||||||
int remain = count;
|
uint32_t writePos = rb->writePos;
|
||||||
do
|
uint32_t newWritePos = writePos;
|
||||||
|
|
||||||
|
if (count < 0)
|
||||||
{
|
{
|
||||||
int copy = rb->length - rb->pos;
|
// Seeking backwards; just update the write pointer
|
||||||
if (copy > remain)
|
newWritePos += count;
|
||||||
copy = remain;
|
|
||||||
|
|
||||||
memcpy(rb->values + rb->pos * rb->valueSize, p, copy * rb->valueSize);
|
|
||||||
rb->pos += copy;
|
|
||||||
if (rb->pos == rb->length)
|
|
||||||
rb->pos = 0;
|
|
||||||
|
|
||||||
p += copy * rb->valueSize;
|
|
||||||
remain -= copy;
|
|
||||||
}
|
}
|
||||||
while(remain > 0);
|
else
|
||||||
|
|
||||||
rb->count += count;
|
|
||||||
LG_UNLOCK(rb->lock);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void * ringbuffer_consume(const RingBuffer rb, int * count)
|
|
||||||
{
|
|
||||||
LG_LOCK(rb->lock);
|
|
||||||
if (rb->count == 0)
|
|
||||||
{
|
{
|
||||||
*count = 0;
|
int32_t writeOffset = writePos - readPos;
|
||||||
LG_UNLOCK(rb->lock);
|
if (writeOffset < 0)
|
||||||
return NULL;
|
{
|
||||||
|
DEBUG_ASSERT(rb->unbounded);
|
||||||
|
|
||||||
|
// The reader is ahead of the writer; skip new values to remain in sync
|
||||||
|
int32_t underrun = -writeOffset;
|
||||||
|
int32_t skipLen = min(underrun, count);
|
||||||
|
|
||||||
|
if (values)
|
||||||
|
values += skipLen * rb->valueSize;
|
||||||
|
|
||||||
|
count -= skipLen;
|
||||||
|
newWritePos += skipLen;
|
||||||
|
writeOffset = newWritePos - readPos;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count > 0)
|
||||||
|
{
|
||||||
|
DEBUG_ASSERT(writeOffset >= 0);
|
||||||
|
|
||||||
|
// We may not be able to write anything if the writer is too far ahead of
|
||||||
|
// the reader
|
||||||
|
uint32_t writeLen = 0;
|
||||||
|
if (writeOffset < rb->length) {
|
||||||
|
uint32_t writeIndex = newWritePos % rb->length;
|
||||||
|
uint32_t writeAvailable = rb->length - writeOffset;
|
||||||
|
uint32_t writeAvailableBack =
|
||||||
|
min(rb->length - writeIndex, writeAvailable);
|
||||||
|
|
||||||
|
writeLen = min(count, writeAvailable);
|
||||||
|
uint32_t writeLenBack = min(writeLen, writeAvailableBack);
|
||||||
|
uint32_t writeLenFront = writeLen - writeLenBack;
|
||||||
|
|
||||||
|
if (values)
|
||||||
|
{
|
||||||
|
memcpy(rb->values + writeIndex * rb->valueSize, values,
|
||||||
|
writeLenBack * rb->valueSize);
|
||||||
|
memcpy(rb->values, values + writeLenBack * rb->valueSize,
|
||||||
|
writeLenFront * rb->valueSize);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
memset(rb->values + writeIndex * rb->valueSize, 0,
|
||||||
|
writeLenBack * rb->valueSize);
|
||||||
|
memset(rb->values, 0, writeLenFront * rb->valueSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rb->unbounded)
|
||||||
|
newWritePos += count;
|
||||||
|
else
|
||||||
|
newWritePos += writeLen;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*count > rb->count)
|
atomic_store_explicit(&rb->writePos, newWritePos, memory_order_release);
|
||||||
*count = rb->count;
|
|
||||||
|
|
||||||
if (*count > rb->length - rb->start)
|
return newWritePos - writePos;
|
||||||
*count = rb->length - rb->start;
|
|
||||||
|
|
||||||
void * values = rb->values + rb->start * rb->valueSize;
|
|
||||||
rb->start += *count;
|
|
||||||
rb->count -= *count;
|
|
||||||
if (rb->start == rb->length)
|
|
||||||
rb->start = 0;
|
|
||||||
|
|
||||||
LG_UNLOCK(rb->lock);
|
|
||||||
|
|
||||||
return values;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ringbuffer_setPreOverwriteFn(const RingBuffer rb, RingBufferValueFn fn,
|
int ringbuffer_consume(const RingBuffer rb, void * values, int count)
|
||||||
void * udata)
|
|
||||||
{
|
{
|
||||||
rb->preOverwriteFn = fn;
|
if (count == 0)
|
||||||
rb->preOverwriteUdata = udata;
|
return 0;
|
||||||
|
|
||||||
|
// Seeking backwards is only supported in unbounded mode at the moment
|
||||||
|
if (count < 0 && !rb->unbounded)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
uint32_t readPos = rb->readPos;
|
||||||
|
uint32_t writePos = atomic_load_explicit(&rb->writePos, memory_order_acquire);
|
||||||
|
uint32_t newReadPos = readPos;
|
||||||
|
|
||||||
|
if (count < 0)
|
||||||
|
{
|
||||||
|
// Seeking backwards; just update the read pointer
|
||||||
|
newReadPos += count;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int32_t writeOffset = writePos - newReadPos;
|
||||||
|
if (writeOffset < 0)
|
||||||
|
{
|
||||||
|
DEBUG_ASSERT(rb->unbounded);
|
||||||
|
|
||||||
|
// We are already in an underrun condition; just fill the buffer with
|
||||||
|
// zeros
|
||||||
|
newReadPos += count;
|
||||||
|
|
||||||
|
if (values)
|
||||||
|
memset(values, 0, count * rb->valueSize);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
uint32_t readIndex = newReadPos % rb->length;
|
||||||
|
uint32_t readAvailable = min(writeOffset, rb->length);
|
||||||
|
uint32_t readLen = min(count, readAvailable);
|
||||||
|
|
||||||
|
if (values)
|
||||||
|
{
|
||||||
|
uint32_t readAvailableBack = min(rb->length - readIndex, readAvailable);
|
||||||
|
uint32_t readLenBack = min(readLen, readAvailableBack);
|
||||||
|
uint32_t readLenFront = readLen - readLenBack;
|
||||||
|
|
||||||
|
memcpy(values, rb->values + readIndex * rb->valueSize,
|
||||||
|
readLenBack * rb->valueSize);
|
||||||
|
memcpy(values + readLenBack * rb->valueSize, rb->values,
|
||||||
|
readLenFront * rb->valueSize);
|
||||||
|
|
||||||
|
if (rb->unbounded && readLen < count)
|
||||||
|
{
|
||||||
|
// One of two things has happened: we have caught up with the writer
|
||||||
|
// and are starting to underrun, or we are really far behind the
|
||||||
|
// writer and an overrun has occurred. Either way, the only thing left
|
||||||
|
// to do is to fill the rest of the buffer with zeros
|
||||||
|
uint32_t remaining = count - readLen;
|
||||||
|
memset(values + readLen * rb->valueSize, 0,
|
||||||
|
remaining * rb->valueSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rb->unbounded)
|
||||||
|
newReadPos += count;
|
||||||
|
else
|
||||||
|
newReadPos += readLen;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic_store_explicit(&rb->readPos, newReadPos, memory_order_release);
|
||||||
|
|
||||||
|
return newReadPos - readPos;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ringbuffer_forEach(const RingBuffer rb, RingBufferIterator fn, void * udata,
|
void ringbuffer_forEach(const RingBuffer rb, RingBufferIterator fn,
|
||||||
bool reverse)
|
void * udata, bool reverse)
|
||||||
{
|
{
|
||||||
|
uint32_t readPos = rb->readPos;
|
||||||
|
uint32_t writePos = atomic_load_explicit(&rb->writePos, memory_order_acquire);
|
||||||
|
|
||||||
|
int32_t writeOffset = writePos - readPos;
|
||||||
|
if (writeOffset < 0)
|
||||||
|
{
|
||||||
|
DEBUG_ASSERT(rb->unbounded);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t readAvailable = min(writeOffset, rb->length);
|
||||||
|
|
||||||
if (reverse)
|
if (reverse)
|
||||||
{
|
{
|
||||||
int index = rb->start + rb->count - 1;
|
readPos = readPos + readAvailable - 1;
|
||||||
if (index >= rb->length)
|
for (int i = 0; i < readAvailable; ++i, --readPos)
|
||||||
index -= rb->length;
|
|
||||||
|
|
||||||
for(int i = 0; i < rb->count; ++i)
|
|
||||||
{
|
{
|
||||||
void * value = rb->values + index * rb->valueSize;
|
uint32_t readIndex = readPos % rb->length;
|
||||||
if (--index == -1)
|
void * value = rb->values + readIndex * rb->valueSize;
|
||||||
index = rb->length - 1;
|
|
||||||
|
|
||||||
if (!fn(i, value, udata))
|
if (!fn(i, value, udata))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
int index = rb->start;
|
for (int i = 0; i < readAvailable; ++i, ++readPos)
|
||||||
for(int i = 0; i < rb->count; ++i)
|
|
||||||
{
|
{
|
||||||
void * value = rb->values + index * rb->valueSize;
|
uint32_t readIndex = readPos % rb->length;
|
||||||
if (++index == rb->length)
|
void * value = rb->values + readIndex * rb->valueSize;
|
||||||
index = 0;
|
|
||||||
|
|
||||||
if (!fn(i, value, udata))
|
if (!fn(i, value, udata))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user