From 08f1ed40116d859ff680dcd733dac5e3d1abe386 Mon Sep 17 00:00:00 2001 From: Zephyron Date: Mon, 6 Jan 2025 12:41:42 +1000 Subject: [PATCH] common: Use consistent cache line size in RingBuffer Replace hardcoded 128-byte alignment with a defined CACHE_LINE_SIZE constant of 64 bytes for the atomic indices in RingBuffer. This value is more appropriate for most modern CPU architectures and simplifies the implementation by using a consistent value regardless of compiler support for hardware_interference_size. Changes: - Add CACHE_LINE_SIZE constant set to 64 bytes - Use CACHE_LINE_SIZE for atomic index alignment in both code paths - Remove outdated TODO comment about hardware_destructive_interference_size --- src/common/ring_buffer.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/common/ring_buffer.h b/src/common/ring_buffer.h index 218896660..8d193ce03 100644 --- a/src/common/ring_buffer.h +++ b/src/common/ring_buffer.h @@ -31,6 +31,8 @@ class RingBuffer { // Ensure lock-free. static_assert(std::atomic_size_t::is_always_lock_free); + static constexpr size_t CACHE_LINE_SIZE = 64; + public: /// Pushes slots into the ring buffer /// @param new_slots Pointer to the slots to push @@ -105,11 +107,11 @@ private: // TODO: Remove this ifdef whenever clang and GCC support // std::hardware_destructive_interference_size. #ifdef __cpp_lib_hardware_interference_size - alignas(std::hardware_destructive_interference_size) std::atomic_size_t m_read_index{0}; - alignas(std::hardware_destructive_interference_size) std::atomic_size_t m_write_index{0}; + alignas(CACHE_LINE_SIZE) std::atomic_size_t m_read_index{0}; + alignas(CACHE_LINE_SIZE) std::atomic_size_t m_write_index{0}; #else - alignas(128) std::atomic_size_t m_read_index{0}; - alignas(128) std::atomic_size_t m_write_index{0}; + alignas(CACHE_LINE_SIZE) std::atomic_size_t m_read_index{0}; + alignas(CACHE_LINE_SIZE) std::atomic_size_t m_write_index{0}; #endif std::array m_data;