common: Use consistent cache line size in RingBuffer

Replace hardcoded 128-byte alignment with a defined CACHE_LINE_SIZE constant
of 64 bytes for the atomic indices in RingBuffer. This value is more
appropriate for most modern CPU architectures and simplifies the
implementation by using a consistent value regardless of compiler support
for hardware_interference_size.

Changes:
- Add CACHE_LINE_SIZE constant set to 64 bytes
- Use CACHE_LINE_SIZE for atomic index alignment in both code paths
- Remove outdated TODO comment about hardware_destructive_interference_size
This commit is contained in:
Zephyron 2025-01-06 12:41:42 +10:00
parent 21f94d5825
commit 08f1ed4011
No known key found for this signature in database
GPG key ID: 8DA271B6A74353F1

View file

@ -31,6 +31,8 @@ class RingBuffer {
// Ensure lock-free.
static_assert(std::atomic_size_t::is_always_lock_free);
static constexpr size_t CACHE_LINE_SIZE = 64;
public:
/// Pushes slots into the ring buffer
/// @param new_slots Pointer to the slots to push
@ -105,11 +107,11 @@ private:
// TODO: Remove this ifdef whenever clang and GCC support
// std::hardware_destructive_interference_size.
#ifdef __cpp_lib_hardware_interference_size
alignas(std::hardware_destructive_interference_size) std::atomic_size_t m_read_index{0};
alignas(std::hardware_destructive_interference_size) std::atomic_size_t m_write_index{0};
alignas(CACHE_LINE_SIZE) std::atomic_size_t m_read_index{0};
alignas(CACHE_LINE_SIZE) std::atomic_size_t m_write_index{0};
#else
alignas(128) std::atomic_size_t m_read_index{0};
alignas(128) std::atomic_size_t m_write_index{0};
alignas(CACHE_LINE_SIZE) std::atomic_size_t m_read_index{0};
alignas(CACHE_LINE_SIZE) std::atomic_size_t m_write_index{0};
#endif
std::array<T, capacity> m_data;