-
Notifications
You must be signed in to change notification settings - Fork 64
/
ringbuffer.hpp
474 lines (400 loc) · 15.9 KB
/
ringbuffer.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
/*!
* \file ringbuffer.hpp
* \version 2.0.5
* \brief Simple SPSC ring buffer implementation
*
* \author Jan Oleksiewicz <[email protected]>
* \license SPDX-License-Identifier: MIT
* \date 22 Jun 2017
*/
#ifndef RINGBUFFER_HPP
#define RINGBUFFER_HPP
#include <stdint.h>
#include <stddef.h>
#include <limits>
#include <atomic>
#include <type_traits>
namespace jnk0le
{
/*!
* \brief Lock free, with no wasted slots ringbuffer implementation
*
* \tparam T Type of buffered elements
* \tparam buffer_size Size of the buffer. Must be a power of 2.
* \tparam fake_tso Omit generation of explicit barrier code to avoid unnecesary instructions in tso scenario (e.g. simple microcontrollers/single core)
* \tparam cacheline_size Size of the cache line, to insert appropriate padding in between indexes and buffer
* \tparam index_t Type of array indexing type. Serves also as placeholder for future implementations.
*/
template<typename T, size_t buffer_size = 16, bool fake_tso = false, size_t cacheline_size = 0, typename index_t = size_t>
class Ringbuffer
{
public:
/*!
* \brief Default constructor, will initialize head and tail indexes
*/
Ringbuffer() : head(0), tail(0) {}
/*!
* \brief Special case constructor to premature out unnecessary initialization code when object is
* instantiated in .bss section
* \warning If object is instantiated on stack, heap or inside noinit section then the contents have to be
* explicitly cleared before use
* \param dummy Ignored
*/
Ringbuffer(int dummy) { (void)(dummy); }
/*!
* \brief Clear buffer from producer side
* \warning function may return without performing any action if consumer tries to read data at the same time
*/
void producerClear(void) {
// head modification will lead to underflow if cleared during consumer read
// doing this properly with CAS is not possible without modifying the consumer code
consumerClear();
}
/*!
* \brief Clear buffer from consumer side
*/
void consumerClear(void) {
tail.store(head.load(std::memory_order_relaxed), std::memory_order_relaxed);
}
/*!
* \brief Check if buffer is empty
* \return True if buffer is empty
*/
bool isEmpty(void) const {
return readAvailable() == 0;
}
/*!
* \brief Check if buffer is full
* \return True if buffer is full
*/
bool isFull(void) const {
return writeAvailable() == 0;
}
/*!
* \brief Check how many elements can be read from the buffer
* \return Number of elements that can be read
*/
index_t readAvailable(void) const {
return head.load(index_acquire_barrier) - tail.load(std::memory_order_relaxed);
}
/*!
* \brief Check how many elements can be written into the buffer
* \return Number of free slots that can be be written
*/
index_t writeAvailable(void) const {
return buffer_size - (head.load(std::memory_order_relaxed) - tail.load(index_acquire_barrier));
}
/*!
* \brief Inserts data into internal buffer, without blocking
* \param data element to be inserted into internal buffer
* \return True if data was inserted
*/
bool insert(T data)
{
index_t tmp_head = head.load(std::memory_order_relaxed);
if((tmp_head - tail.load(index_acquire_barrier)) == buffer_size)
return false;
else
{
data_buff[tmp_head++ & buffer_mask] = data;
std::atomic_signal_fence(std::memory_order_release);
head.store(tmp_head, index_release_barrier);
}
return true;
}
/*!
* \brief Inserts data into internal buffer, without blocking
* \param[in] data Pointer to memory location where element, to be inserted into internal buffer, is located
* \return True if data was inserted
*/
bool insert(const T* data)
{
index_t tmp_head = head.load(std::memory_order_relaxed);
if((tmp_head - tail.load(index_acquire_barrier)) == buffer_size)
return false;
else
{
data_buff[tmp_head++ & buffer_mask] = *data;
std::atomic_signal_fence(std::memory_order_release);
head.store(tmp_head, index_release_barrier);
}
return true;
}
/*!
* \brief Inserts data returned by callback function, into internal buffer, without blocking
*
* This is a special purpose function that can be used to avoid redundant availability checks in case when
* acquiring data have a side effects (like clearing status flags by reading a peripheral data register)
*
* \param get_data_callback Pointer to callback function that returns element to be inserted into buffer
* \return True if data was inserted and callback called
*/
bool insertFromCallbackWhenAvailable(T (*get_data_callback)(void))
{
index_t tmp_head = head.load(std::memory_order_relaxed);
if((tmp_head - tail.load(index_acquire_barrier)) == buffer_size)
return false;
else
{
//execute callback only when there is space in buffer
data_buff[tmp_head++ & buffer_mask] = get_data_callback();
std::atomic_signal_fence(std::memory_order_release);
head.store(tmp_head, index_release_barrier);
}
return true;
}
/*!
* \brief Removes single element without reading
* \return True if one element was removed
*/
bool remove()
{
index_t tmp_tail = tail.load(std::memory_order_relaxed);
if(tmp_tail == head.load(std::memory_order_relaxed))
return false;
else
tail.store(++tmp_tail, index_release_barrier); // release in case data was loaded/used before
return true;
}
/*!
* \brief Removes multiple elements without reading and storing it elsewhere
* \param cnt Maximum number of elements to remove
* \return Number of removed elements
*/
size_t remove(size_t cnt) {
index_t tmp_tail = tail.load(std::memory_order_relaxed);
index_t avail = head.load(std::memory_order_relaxed) - tmp_tail;
cnt = (cnt > avail) ? avail : cnt;
tail.store(tmp_tail + cnt, index_release_barrier);
return cnt;
}
/*!
* \brief Reads one element from internal buffer without blocking
* \param[out] data Reference to memory location where removed element will be stored
* \return True if data was fetched from the internal buffer
*/
bool remove(T& data) {
return remove(&data); // references are anyway implemented as pointers
}
/*!
* \brief Reads one element from internal buffer without blocking
* \param[out] data Pointer to memory location where removed element will be stored
* \return True if data was fetched from the internal buffer
*/
bool remove(T* data) {
index_t tmp_tail = tail.load(std::memory_order_relaxed);
if(tmp_tail == head.load(index_acquire_barrier))
return false;
else
{
*data = data_buff[tmp_tail++ & buffer_mask];
std::atomic_signal_fence(std::memory_order_release);
tail.store(tmp_tail, index_release_barrier);
}
return true;
}
/*!
* \brief Gets the first element in the buffer on consumed side
*
* It is safe to use and modify item contents only on consumer side
*
* \return Pointer to first element, nullptr if buffer was empty
*/
T* peek() {
index_t tmp_tail = tail.load(std::memory_order_relaxed);
if(tmp_tail == head.load(index_acquire_barrier))
return nullptr;
else
return &data_buff[tmp_tail & buffer_mask];
}
/*!
* \brief Gets the n'th element on consumed side
*
* It is safe to use and modify item contents only on consumer side
*
* \param index Item offset starting on the consumed side
* \return Pointer to requested element, nullptr if index exceeds storage count
*/
T* at(size_t index) {
index_t tmp_tail = tail.load(std::memory_order_relaxed);
if((head.load(index_acquire_barrier) - tmp_tail) <= index)
return nullptr;
else
return &data_buff[(tmp_tail + index) & buffer_mask];
}
/*!
* \brief Gets the n'th element on consumed side
*
* Unchecked operation, assumes that software already knows if the element can be used, if
* requested index is out of bounds then reference will point to somewhere inside the buffer
* The isEmpty() and readAvailable() will place appropriate memory barriers if used as loop limiter
* It is safe to use and modify T contents only on consumer side
*
* \param index Item offset starting on the consumed side
* \return Reference to requested element, undefined if index exceeds storage count
*/
T& operator[](size_t index) {
return data_buff[(tail.load(std::memory_order_relaxed) + index) & buffer_mask];
}
/*!
* \brief Insert multiple elements into internal buffer without blocking
*
* This function will insert as much data as possible from given buffer.
*
* \param[in] buff Pointer to buffer with data to be inserted from
* \param count Number of elements to write from the given buffer
* \return Number of elements written into internal buffer
*/
size_t writeBuff(const T* buff, size_t count);
/*!
* \brief Insert multiple elements into internal buffer without blocking
*
* This function will continue writing new entries until all data is written or there is no more space.
* The callback function can be used to indicate to consumer that it can start fetching data.
*
* \warning This function is not deterministic
*
* \param[in] buff Pointer to buffer with data to be inserted from
* \param count Number of elements to write from the given buffer
* \param count_to_callback Number of elements to write before calling a callback function in first loop
* \param execute_data_callback Pointer to callback function executed after every loop iteration
* \return Number of elements written into internal buffer
*/
size_t writeBuff(const T* buff, size_t count, size_t count_to_callback, void (*execute_data_callback)(void));
/*!
* \brief Load multiple elements from internal buffer without blocking
*
* This function will read up to specified amount of data.
*
* \param[out] buff Pointer to buffer where data will be loaded into
* \param count Number of elements to load into the given buffer
* \return Number of elements that were read from internal buffer
*/
size_t readBuff(T* buff, size_t count);
/*!
* \brief Load multiple elements from internal buffer without blocking
*
* This function will continue reading new entries until all requested data is read or there is nothing
* more to read.
* The callback function can be used to indicate to producer that it can start writing new data.
*
* \warning This function is not deterministic
*
* \param[out] buff Pointer to buffer where data will be loaded into
* \param count Number of elements to load into the given buffer
* \param count_to_callback Number of elements to load before calling a callback function in first iteration
* \param execute_data_callback Pointer to callback function executed after every loop iteration
* \return Number of elements that were read from internal buffer
*/
size_t readBuff(T* buff, size_t count, size_t count_to_callback, void (*execute_data_callback)(void));
private:
constexpr static index_t buffer_mask = buffer_size-1; //!< bitwise mask for a given buffer size
constexpr static std::memory_order index_acquire_barrier = fake_tso ?
std::memory_order_relaxed
: std::memory_order_acquire; // do not load from, or store to buffer before confirmed by the opposite side
constexpr static std::memory_order index_release_barrier = fake_tso ?
std::memory_order_relaxed
: std::memory_order_release; // do not update own side before all operations on data_buff committed
alignas(cacheline_size) std::atomic<index_t> head; //!< head index
alignas(cacheline_size) std::atomic<index_t> tail; //!< tail index
// put buffer after variables so everything can be reached with short offsets
alignas(cacheline_size) T data_buff[buffer_size]; //!< actual buffer
// let's assert that no UB will be compiled in
static_assert((buffer_size != 0), "buffer cannot be of zero size");
static_assert((buffer_size & buffer_mask) == 0, "buffer size is not a power of 2");
static_assert(sizeof(index_t) <= sizeof(size_t),
"indexing type size is larger than size_t, operation is not lock free and doesn't make sense");
static_assert(std::numeric_limits<index_t>::is_integer, "indexing type is not integral type");
static_assert(!(std::numeric_limits<index_t>::is_signed), "indexing type must not be signed");
static_assert(buffer_mask <= ((std::numeric_limits<index_t>::max)() >> 1),
"buffer size is too large for a given indexing type (maximum size for n-bit type is 2^(n-1))");
static_assert(std::is_trivial<T>::value, "non trivial objects will currently break");
};
template<typename T, size_t buffer_size, bool fake_tso, size_t cacheline_size, typename index_t>
size_t Ringbuffer<T, buffer_size, fake_tso, cacheline_size, index_t>::writeBuff(const T* buff, size_t count)
{
index_t available = 0;
index_t tmp_head = head.load(std::memory_order_relaxed);
size_t to_write = count;
available = buffer_size - (tmp_head - tail.load(index_acquire_barrier));
if(available < count) // do not write more than we can
to_write = available;
// maybe divide it into 2 separate writes
for(size_t i = 0; i < to_write; i++)
data_buff[tmp_head++ & buffer_mask] = buff[i];
std::atomic_signal_fence(std::memory_order_release);
head.store(tmp_head, index_release_barrier);
return to_write;
}
template<typename T, size_t buffer_size, bool fake_tso, size_t cacheline_size, typename index_t>
size_t Ringbuffer<T, buffer_size, fake_tso, cacheline_size, index_t>::writeBuff(const T* buff, size_t count,
size_t count_to_callback, void(*execute_data_callback)())
{
size_t written = 0;
index_t available = 0;
index_t tmp_head = head.load(std::memory_order_relaxed);
size_t to_write = count;
if(count_to_callback != 0 && count_to_callback < count)
to_write = count_to_callback;
while(written < count)
{
available = buffer_size - (tmp_head - tail.load(index_acquire_barrier));
if(available == 0) // less than ??
break;
if(to_write > available) // do not write more than we can
to_write = available;
while(to_write--)
data_buff[tmp_head++ & buffer_mask] = buff[written++];
std::atomic_signal_fence(std::memory_order_release);
head.store(tmp_head, index_release_barrier);
if(execute_data_callback != nullptr)
execute_data_callback();
to_write = count - written;
}
return written;
}
template<typename T, size_t buffer_size, bool fake_tso, size_t cacheline_size, typename index_t>
size_t Ringbuffer<T, buffer_size, fake_tso, cacheline_size, index_t>::readBuff(T* buff, size_t count)
{
index_t available = 0;
index_t tmp_tail = tail.load(std::memory_order_relaxed);
size_t to_read = count;
available = head.load(index_acquire_barrier) - tmp_tail;
if(available < count) // do not read more than we can
to_read = available;
// maybe divide it into 2 separate reads
for(size_t i = 0; i < to_read; i++)
buff[i] = data_buff[tmp_tail++ & buffer_mask];
std::atomic_signal_fence(std::memory_order_release);
tail.store(tmp_tail, index_release_barrier);
return to_read;
}
template<typename T, size_t buffer_size, bool fake_tso, size_t cacheline_size, typename index_t>
size_t Ringbuffer<T, buffer_size, fake_tso, cacheline_size, index_t>::readBuff(T* buff, size_t count,
size_t count_to_callback, void(*execute_data_callback)())
{
size_t read = 0;
index_t available = 0;
index_t tmp_tail = tail.load(std::memory_order_relaxed);
size_t to_read = count;
if(count_to_callback != 0 && count_to_callback < count)
to_read = count_to_callback;
while(read < count)
{
available = head.load(index_acquire_barrier) - tmp_tail;
if(available == 0) // less than ??
break;
if(to_read > available) // do not write more than we can
to_read = available;
while(to_read--)
buff[read++] = data_buff[tmp_tail++ & buffer_mask];
std::atomic_signal_fence(std::memory_order_release);
tail.store(tmp_tail, index_release_barrier);
if(execute_data_callback != nullptr)
execute_data_callback();
to_read = count - read;
}
return read;
}
} // namespace
#endif //RINGBUFFER_HPP