diff --git a/src/dxvk/dxvk_device.cpp b/src/dxvk/dxvk_device.cpp index 4d85fb2c..65893de5 100644 --- a/src/dxvk/dxvk_device.cpp +++ b/src/dxvk/dxvk_device.cpp @@ -262,8 +262,10 @@ namespace dxvk { VkResult DxvkDevice::presentImage( const Rc& presenter, VkSemaphore semaphore) { - std::lock_guard queueLock(m_submissionLock); - VkResult status = presenter->presentImage(semaphore); + DxvkPresentInfo presentInfo; + presentInfo.presenter = presenter; + presentInfo.waitSync = semaphore; + VkResult status = m_submissionQueue.present(presentInfo); if (status != VK_SUCCESS) return status; @@ -278,32 +280,22 @@ namespace dxvk { const Rc& commandList, VkSemaphore waitSync, VkSemaphore wakeSync) { - VkResult status; - - { // Queue submissions are not thread safe - std::lock_guard queueLock(m_submissionLock); - std::lock_guard statLock(m_statLock); - - m_statCounters.merge(commandList->statCounters()); - m_statCounters.addCtr(DxvkStatCounter::QueueSubmitCount, 1); - - status = commandList->submit( - m_graphicsQueue.queueHandle, - waitSync, wakeSync); - } - - if (status == VK_SUCCESS) { - // Add this to the set of running submissions - m_submissionQueue.submit(commandList); - } else { - Logger::err(str::format( - "DxvkDevice: Command buffer submission failed: ", - status)); - } + DxvkSubmitInfo submitInfo; + submitInfo.cmdList = commandList; + submitInfo.queue = m_graphicsQueue.queueHandle; + submitInfo.waitSync = waitSync; + submitInfo.wakeSync = wakeSync; + m_submissionQueue.submit(submitInfo); + + std::lock_guard statLock(m_statLock); + m_statCounters.merge(commandList->statCounters()); + m_statCounters.addCtr(DxvkStatCounter::QueueSubmitCount, 1); } void DxvkDevice::waitForIdle() { + m_submissionQueue.synchronize(); + if (m_vkd->vkDeviceWaitIdle(m_vkd->device()) != VK_SUCCESS) Logger::err("DxvkDevice: waitForIdle: Operation failed"); } diff --git a/src/dxvk/dxvk_device.h b/src/dxvk/dxvk_device.h index d2af2e39..f612c9d7 100644 --- a/src/dxvk/dxvk_device.h +++ b/src/dxvk/dxvk_device.h @@ -339,6 +339,7 @@ namespace dxvk { * presenter's \c presentImage method. * \param [in] presenter The presenter * \param [in] semaphore Sync semaphore + * \returns Status of the operation */ VkResult presentImage( const Rc& presenter, @@ -347,11 +348,11 @@ namespace dxvk { /** * \brief Submits a command list * - * Synchronization arguments are optional. + * Submits the given command list to the device using + * the given set of optional synchronization primitives. * \param [in] commandList The command list to submit * \param [in] waitSync (Optional) Semaphore to wait on * \param [in] wakeSync (Optional) Semaphore to notify - * \returns Synchronization fence */ void submitCommandList( const Rc& commandList, @@ -366,7 +367,8 @@ namespace dxvk { * to lock the queue before submitting command buffers. */ void lockSubmission() { - m_submissionLock.lock(); + m_submissionQueue.synchronize(); + m_submissionQueue.lockDeviceQueue(); } /** @@ -376,7 +378,7 @@ namespace dxvk { * itself can use them for submissions again. */ void unlockSubmission() { - m_submissionLock.unlock(); + m_submissionQueue.unlockDeviceQueue(); } /** @@ -430,7 +432,6 @@ namespace dxvk { sync::Spinlock m_statLock; DxvkStatCounters m_statCounters; - std::mutex m_submissionLock; DxvkDeviceQueue m_graphicsQueue; DxvkDeviceQueue m_presentQueue; diff --git a/src/dxvk/dxvk_queue.cpp b/src/dxvk/dxvk_queue.cpp index 191db0da..6278ccea 100644 --- a/src/dxvk/dxvk_queue.cpp +++ b/src/dxvk/dxvk_queue.cpp @@ -5,7 +5,8 @@ namespace dxvk { DxvkSubmissionQueue::DxvkSubmissionQueue(DxvkDevice* device) : m_device(device), - m_thread([this] () { threadFunc(); }) { + m_submitThread([this] () { submitCmdLists(); }), + m_finishThread([this] () { finishCmdLists(); }) { } @@ -15,61 +16,132 @@ namespace dxvk { m_stopped.store(true); } - m_condOnAdd.notify_one(); - m_thread.join(); + m_appendCond.notify_all(); + m_submitCond.notify_all(); + + m_submitThread.join(); + m_finishThread.join(); } - void DxvkSubmissionQueue::submit(const Rc& cmdList) { - { std::unique_lock lock(m_mutex); - - m_condOnTake.wait(lock, [this] { - return m_entries.size() < MaxNumQueuedCommandBuffers; + void DxvkSubmissionQueue::submit(DxvkSubmitInfo submitInfo) { + std::unique_lock lock(m_mutex); + + m_finishCond.wait(lock, [this] { + return m_submitQueue.size() + m_finishQueue.size() <= MaxNumQueuedCommandBuffers; + }); + + m_pending += 1; + m_submitQueue.push(std::move(submitInfo)); + m_appendCond.notify_all(); + } + + + VkResult DxvkSubmissionQueue::present(DxvkPresentInfo presentInfo) { + this->synchronize(); + + std::unique_lock lock(m_mutexQueue); + return presentInfo.presenter->presentImage(presentInfo.waitSync); + } + + + void DxvkSubmissionQueue::synchronize() { + std::unique_lock lock(m_mutex); + + m_submitCond.wait(lock, [this] { + return m_submitQueue.empty(); + }); + } + + + void DxvkSubmissionQueue::lockDeviceQueue() { + m_mutexQueue.lock(); + } + + + void DxvkSubmissionQueue::unlockDeviceQueue() { + m_mutexQueue.unlock(); + } + + + void DxvkSubmissionQueue::submitCmdLists() { + env::setThreadName("dxvk-submit"); + + std::unique_lock lock(m_mutex); + + while (!m_stopped.load()) { + m_appendCond.wait(lock, [this] { + return m_stopped.load() || !m_submitQueue.empty(); }); - m_submits += 1; - m_entries.push(cmdList); - m_condOnAdd.notify_one(); + if (m_stopped.load()) + return; + + DxvkSubmitInfo submitInfo = std::move(m_submitQueue.front()); + lock.unlock(); + + // Submit command buffer to device + VkResult status; + + { std::lock_guard lock(m_mutexQueue); + + status = submitInfo.cmdList->submit( + submitInfo.queue, + submitInfo.waitSync, + submitInfo.wakeSync); + } + + // On success, pass it on to the queue thread + lock = std::unique_lock(m_mutex); + + if (status == VK_SUCCESS) { + m_finishQueue.push(std::move(submitInfo)); + m_submitQueue.pop(); + m_submitCond.notify_all(); + } else { + Logger::err(str::format( + "DxvkSubmissionQueue: Command submission failed with ", + status)); + m_pending -= 1; + } } } - void DxvkSubmissionQueue::threadFunc() { + void DxvkSubmissionQueue::finishCmdLists() { env::setThreadName("dxvk-queue"); + std::unique_lock lock(m_mutex); + while (!m_stopped.load()) { - Rc cmdList; + m_submitCond.wait(lock, [this] { + return m_stopped.load() || !m_finishQueue.empty(); + }); + + if (m_stopped.load()) + return; - { std::unique_lock lock(m_mutex); - - m_condOnAdd.wait(lock, [this] { - return m_stopped.load() || (m_entries.size() != 0); - }); - - if (m_entries.size() != 0) { - cmdList = std::move(m_entries.front()); - m_entries.pop(); - } - - m_condOnTake.notify_one(); - } + DxvkSubmitInfo submitInfo = std::move(m_finishQueue.front()); + lock.unlock(); - if (cmdList != nullptr) { - VkResult status = cmdList->synchronize(); + VkResult status = submitInfo.cmdList->synchronize(); + + if (status == VK_SUCCESS) { + submitInfo.cmdList->signalEvents(); + submitInfo.cmdList->reset(); - if (status == VK_SUCCESS) { - cmdList->signalEvents(); - cmdList->reset(); - - m_device->recycleCommandList(cmdList); - } else { - Logger::err(str::format( - "DxvkSubmissionQueue: Failed to sync fence: ", - status)); - } - - m_submits -= 1; + m_device->recycleCommandList(submitInfo.cmdList); + } else { + Logger::err(str::format( + "DxvkSubmissionQueue: Failed to sync fence: ", + status)); } + + lock = std::unique_lock(m_mutex); + m_pending -= 1; + + m_finishQueue.pop(); + m_finishCond.notify_all(); } } diff --git a/src/dxvk/dxvk_queue.h b/src/dxvk/dxvk_queue.h index 94bfedbd..aa842142 100644 --- a/src/dxvk/dxvk_queue.h +++ b/src/dxvk/dxvk_queue.h @@ -6,17 +6,45 @@ #include "../util/thread.h" +#include "../vulkan/vulkan_presenter.h" + #include "dxvk_cmdlist.h" namespace dxvk { class DxvkDevice; + + /** + * \brief Queue submission info + * + * Stores parameters used to submit + * a command buffer to the device. + */ + struct DxvkSubmitInfo { + Rc cmdList; + VkQueue queue; + VkSemaphore waitSync; + VkSemaphore wakeSync; + }; + + /** + * \brief Present info + * + * Stores parameters used to present + * a swap chain image on the device. + */ + struct DxvkPresentInfo { + Rc presenter; + VkSemaphore waitSync; + }; + + /** * \brief Submission queue */ class DxvkSubmissionQueue { - + public: DxvkSubmissionQueue(DxvkDevice* device); @@ -30,35 +58,84 @@ namespace dxvk { * \returns Pending submission count */ uint32_t pendingSubmissions() const { - return m_submits.load(); + return m_pending.load(); } /** - * \brief Submits a command list + * \brief Submits a command list asynchronously * - * Submits a command list to the queue thread. - * This thread will wait for the command list - * to finish executing on the GPU and signal - * any queries and events that are used by - * the command list in question. - * \param [in] cmdList The command list + * Queues a command list for submission on the + * dedicated submission thread. Use this to take + * the submission overhead off the calling thread. + * \param [in] submitInfo Submission parameters */ - void submit(const Rc& cmdList); + void submit( + DxvkSubmitInfo submitInfo); + + /** + * \brief Presents an image synchronously + * + * Waits for queued command lists to be submitted + * and then presents the current swap chain image + * of the presenter. May stall the calling thread. + * \param [in] present Present parameters + * \returns Status of the operation + */ + VkResult present( + DxvkPresentInfo present); + + /** + * \brief Synchronizes with queue submissions + * + * Waits for all pending command lists to be + * submitted to the GPU before returning. + */ + void synchronize(); + + /** + * \brief Locks device queue + * + * Locks the mutex that protects the Vulkan queue + * that DXVK uses for command buffer submission. + * This is needed when the app submits its own + * command buffers to the queue. + */ + void lockDeviceQueue(); + + /** + * \brief Unlocks device queue + * + * Unlocks the mutex that protects the Vulkan + * queue used for command buffer submission. + */ + void unlockDeviceQueue(); private: - + DxvkDevice* m_device; std::atomic m_stopped = { false }; - std::atomic m_submits = { 0u }; - + std::atomic m_pending = { 0u }; + std::mutex m_mutex; - std::condition_variable m_condOnAdd; - std::condition_variable m_condOnTake; - std::queue> m_entries; - dxvk::thread m_thread; + std::mutex m_mutexQueue; - void threadFunc(); + std::condition_variable m_appendCond; + std::condition_variable m_submitCond; + std::condition_variable m_finishCond; + + std::queue m_submitQueue; + std::queue m_finishQueue; + + dxvk::thread m_submitThread; + dxvk::thread m_finishThread; + + VkResult submitToQueue( + const DxvkSubmitInfo& submission); + + void submitCmdLists(); + + void finishCmdLists(); };