mirror of
https://git.citron-emu.org/Citron/Citron.git
synced 2025-02-02 06:16:27 +01:00
core: Improve device memory and buffer queue handling
This commit makes several improvements to device memory management and buffer queue handling: DeviceMemoryManager: - Add null pointer and size checks for ReadBlock - Fill unmapped memory with a known pattern (0xCC) instead of zeros - Add better error logging for invalid memory accesses - Add null pointer check for source pointer in memcpy BufferQueueProducer: - Improve error handling in WaitForFreeSlotThenRelock - Add proper abandoned state checking during wait conditions - Clean up and simplify buffer scanning logic - Improve logging messages with more descriptive information - Remove redundant buffer count validation - Fix potential infinite loop condition during wait These changes improve stability and error handling while making the code more maintainable and debuggable. The use of a known pattern for unmapped memory helps identify uninitialized memory access issues.
This commit is contained in:
parent
83393a6c6b
commit
66b6d5b2da
2 changed files with 31 additions and 38 deletions
|
@ -411,18 +411,28 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
|
||||||
|
|
||||||
template <typename Traits>
|
template <typename Traits>
|
||||||
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
|
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
|
||||||
|
if (!dest_pointer || size == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
device_inter->FlushRegion(address, size);
|
device_inter->FlushRegion(address, size);
|
||||||
WalkBlock(
|
WalkBlock(
|
||||||
address, size,
|
address, size,
|
||||||
[&](size_t copy_amount, DAddr current_vaddr) {
|
[&](size_t copy_amount, DAddr current_vaddr) {
|
||||||
LOG_ERROR(
|
// Fill unmapped memory with a known pattern instead of zeros
|
||||||
HW_Memory,
|
constexpr u8 UNMAPPED_MEMORY_PATTERN = 0xCC;
|
||||||
"Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
std::memset(dest_pointer, UNMAPPED_MEMORY_PATTERN, copy_amount);
|
||||||
current_vaddr, address, size);
|
LOG_DEBUG(HW_Memory,
|
||||||
std::memset(dest_pointer, 0, copy_amount);
|
"Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}). "
|
||||||
|
"Filling with pattern 0x{:02X}",
|
||||||
|
current_vaddr, address, size, UNMAPPED_MEMORY_PATTERN);
|
||||||
},
|
},
|
||||||
[&](size_t copy_amount, const u8* const src_ptr) {
|
[&](size_t copy_amount, const u8* const src_ptr) {
|
||||||
std::memcpy(dest_pointer, src_ptr, copy_amount);
|
if (src_ptr) {
|
||||||
|
std::memcpy(dest_pointer, src_ptr, copy_amount);
|
||||||
|
} else {
|
||||||
|
LOG_ERROR(HW_Memory, "Invalid source pointer in ReadBlock");
|
||||||
|
}
|
||||||
},
|
},
|
||||||
[&](const std::size_t copy_amount) {
|
[&](const std::size_t copy_amount) {
|
||||||
dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
|
dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
|
||||||
|
|
|
@ -117,12 +117,14 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, Status* return_flags,
|
Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, Status* return_flags,
|
||||||
std::unique_lock<std::mutex>& lk) const {
|
std::unique_lock<std::mutex>& lk) const {
|
||||||
bool try_again = true;
|
bool try_again = true;
|
||||||
|
|
||||||
while (try_again) {
|
while (try_again) {
|
||||||
|
// Check if queue is abandoned before proceeding
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_Nvnflinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_Nvnflinger, "BufferQueue has been abandoned");
|
||||||
|
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
|
||||||
return Status::NoInit;
|
return Status::NoInit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +138,6 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
|
||||||
|
|
||||||
// Free up any buffers that are in slots beyond the max buffer count
|
// Free up any buffers that are in slots beyond the max buffer count
|
||||||
for (s32 s = max_buffer_count; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
|
for (s32 s = max_buffer_count; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
|
||||||
ASSERT(slots[s].buffer_state == BufferState::Free);
|
|
||||||
if (slots[s].graphic_buffer != nullptr && slots[s].buffer_state == BufferState::Free &&
|
if (slots[s].graphic_buffer != nullptr && slots[s].buffer_state == BufferState::Free &&
|
||||||
!slots[s].is_preallocated) {
|
!slots[s].is_preallocated) {
|
||||||
core->FreeBufferLocked(s);
|
core->FreeBufferLocked(s);
|
||||||
|
@ -148,6 +149,8 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
|
||||||
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
|
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
|
||||||
s32 dequeued_count{};
|
s32 dequeued_count{};
|
||||||
s32 acquired_count{};
|
s32 acquired_count{};
|
||||||
|
|
||||||
|
// Scan for available buffers
|
||||||
for (s32 s{}; s < max_buffer_count; ++s) {
|
for (s32 s{}; s < max_buffer_count; ++s) {
|
||||||
switch (slots[s].buffer_state) {
|
switch (slots[s].buffer_state) {
|
||||||
case BufferState::Dequeued:
|
case BufferState::Dequeued:
|
||||||
|
@ -157,8 +160,6 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
|
||||||
++acquired_count;
|
++acquired_count;
|
||||||
break;
|
break;
|
||||||
case BufferState::Free:
|
case BufferState::Free:
|
||||||
// We return the oldest of the free buffers to avoid stalling the producer if
|
|
||||||
// possible, since the consumer may still have pending reads of in-flight buffers
|
|
||||||
if (*found == BufferQueueCore::INVALID_BUFFER_SLOT ||
|
if (*found == BufferQueueCore::INVALID_BUFFER_SLOT ||
|
||||||
slots[s].frame_number < slots[*found].frame_number) {
|
slots[s].frame_number < slots[*found].frame_number) {
|
||||||
*found = s;
|
*found = s;
|
||||||
|
@ -169,50 +170,32 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Producers are not allowed to dequeue more than one buffer if they did not set a buffer
|
// Check for buffer count override issues
|
||||||
// count
|
|
||||||
if (!core->override_max_buffer_count && dequeued_count) {
|
if (!core->override_max_buffer_count && dequeued_count) {
|
||||||
LOG_ERROR(Service_Nvnflinger,
|
LOG_ERROR(Service_Nvnflinger, "Can't dequeue multiple buffers without setting buffer count");
|
||||||
"can't dequeue multiple buffers without setting the buffer count");
|
|
||||||
return Status::InvalidOperation;
|
return Status::InvalidOperation;
|
||||||
}
|
}
|
||||||
|
|
||||||
// See whether a buffer has been queued since the last SetBufferCount so we know whether to
|
// Handle queue size limits
|
||||||
// perform the min undequeued buffers check below
|
|
||||||
if (core->buffer_has_been_queued) {
|
|
||||||
// Make sure the producer is not trying to dequeue more buffers than allowed
|
|
||||||
const s32 new_undequeued_count = max_buffer_count - (dequeued_count + 1);
|
|
||||||
const s32 min_undequeued_count = core->GetMinUndequeuedBufferCountLocked(async);
|
|
||||||
if (new_undequeued_count < min_undequeued_count) {
|
|
||||||
LOG_ERROR(Service_Nvnflinger,
|
|
||||||
"min undequeued buffer count({}) exceeded (dequeued={} undequeued={})",
|
|
||||||
min_undequeued_count, dequeued_count, new_undequeued_count);
|
|
||||||
return Status::InvalidOperation;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we disconnect and reconnect quickly, we can be in a state where our slots are empty
|
|
||||||
// but we have many buffers in the queue. This can cause us to run out of memory if we
|
|
||||||
// outrun the consumer. Wait here if it looks like we have too many buffers queued up.
|
|
||||||
const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count);
|
const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count);
|
||||||
if (too_many_buffers) {
|
if (too_many_buffers) {
|
||||||
LOG_ERROR(Service_Nvnflinger, "queue size is {}, waiting", core->queue.size());
|
LOG_WARNING(Service_Nvnflinger, "Queue size {} exceeds max buffer count {}, waiting",
|
||||||
|
core->queue.size(), max_buffer_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no buffer is found, or if the queue has too many buffers outstanding, wait for a
|
|
||||||
// buffer to be acquired or released, or for the max buffer count to change.
|
|
||||||
try_again = (*found == BufferQueueCore::INVALID_BUFFER_SLOT) || too_many_buffers;
|
try_again = (*found == BufferQueueCore::INVALID_BUFFER_SLOT) || too_many_buffers;
|
||||||
if (try_again) {
|
if (try_again) {
|
||||||
// Return an error if we're in non-blocking mode (producer and consumer are controlled
|
|
||||||
// by the application).
|
|
||||||
if (core->dequeue_buffer_cannot_block &&
|
if (core->dequeue_buffer_cannot_block &&
|
||||||
(acquired_count <= core->max_acquired_buffer_count)) {
|
(acquired_count <= core->max_acquired_buffer_count)) {
|
||||||
return Status::WouldBlock;
|
return Status::WouldBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!core->WaitForDequeueCondition(lk)) {
|
if (!core->WaitForDequeueCondition(lk)) {
|
||||||
// We are no longer running
|
if (core->is_abandoned) {
|
||||||
return Status::NoError;
|
LOG_ERROR(Service_Nvnflinger, "BufferQueue was abandoned while waiting");
|
||||||
|
return Status::NoInit;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue