|
|
@@ -235,44 +235,10 @@ namespace BansheeEngine
|
|
|
mBoundParams.insert(params);
|
|
|
}
|
|
|
|
|
|
- VulkanCommandBuffer::VulkanCommandBuffer(VulkanDevice& device, GpuQueueType type, UINT32 deviceIdx,
|
|
|
- UINT32 queueIdx, bool secondary)
|
|
|
- : CommandBuffer(type, deviceIdx, queueIdx, secondary), mBuffer(nullptr)
|
|
|
- , mDevice(device), mQueue(nullptr), mIdMask(0)
|
|
|
- {
|
|
|
- UINT32 numQueues = device.getNumQueues(mType);
|
|
|
- if (numQueues == 0) // Fall back to graphics queue
|
|
|
- {
|
|
|
- mType = GQT_GRAPHICS;
|
|
|
- numQueues = device.getNumQueues(GQT_GRAPHICS);
|
|
|
- }
|
|
|
-
|
|
|
- mQueue = device.getQueue(mType, mQueueIdx % numQueues);
|
|
|
-
|
|
|
- // If multiple command buffer IDs map to the same queue, mark them in the mask
|
|
|
- UINT32 curIdx = mQueueIdx;
|
|
|
- while (curIdx < BS_MAX_QUEUES_PER_TYPE)
|
|
|
- {
|
|
|
- mIdMask |= CommandSyncMask::getGlobalQueueIdx(mType, curIdx);
|
|
|
- curIdx += numQueues;
|
|
|
- }
|
|
|
-
|
|
|
- acquireNewBuffer();
|
|
|
- }
|
|
|
-
|
|
|
- void VulkanCommandBuffer::acquireNewBuffer()
|
|
|
- {
|
|
|
- VulkanCmdBufferPool& pool = mDevice.getCmdBufferPool();
|
|
|
-
|
|
|
- if (mBuffer != nullptr)
|
|
|
- assert(mBuffer->isSubmitted());
|
|
|
-
|
|
|
- UINT32 queueFamily = mDevice.getQueueFamily(mType);
|
|
|
- mBuffer = pool.getBuffer(queueFamily, mIsSecondary);
|
|
|
- }
|
|
|
-
|
|
|
void VulkanCmdBuffer::submit(VulkanQueue* queue, UINT32 queueIdx, UINT32 syncMask)
|
|
|
{
|
|
|
+ assert(isReadyForSubmit());
|
|
|
+
|
|
|
// Issue pipeline barriers for queue transitions (need to happen on original queue first, then on new queue)
|
|
|
for (auto& entry : mBoundParams)
|
|
|
entry->prepareForSubmit(this, mTransitionInfoTemp);
|
|
|
@@ -286,50 +252,91 @@ namespace BansheeEngine
|
|
|
if (entryQueueFamily == mQueueFamily)
|
|
|
continue;
|
|
|
|
|
|
- VkCommandBuffer cmdBuffer; // TODO - Get the command buffer on entryQueueFamily
|
|
|
+ VulkanCmdBuffer* cmdBuffer = device.getCmdBufferPool().getBuffer(entryQueueFamily, false);
|
|
|
+ VkCommandBuffer vkCmdBuffer = cmdBuffer->getHandle();
|
|
|
|
|
|
TransitionInfo& barriers = entry.second;
|
|
|
UINT32 numImgBarriers = (UINT32)barriers.imageBarriers.size();
|
|
|
UINT32 numBufferBarriers = (UINT32)barriers.bufferBarriers.size();
|
|
|
|
|
|
- vkCmdPipelineBarrier(cmdBuffer,
|
|
|
+ vkCmdPipelineBarrier(vkCmdBuffer,
|
|
|
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
|
|
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
|
|
0, 0, nullptr,
|
|
|
numBufferBarriers, barriers.bufferBarriers.data(),
|
|
|
numImgBarriers, barriers.imageBarriers.data());
|
|
|
|
|
|
- // TODO - Submit the command buffer
|
|
|
- // TODO - Register the command buffer in the sync mask so we wait on it
|
|
|
+ // Find an appropriate queue to execute on
|
|
|
+ UINT32 otherQueueIdx = 0;
|
|
|
+ VulkanQueue* otherQueue = nullptr;
|
|
|
+ GpuQueueType otherQueueType = GQT_GRAPHICS;
|
|
|
+ for (UINT32 i = 0; i < GQT_COUNT; i++)
|
|
|
+ {
|
|
|
+ if (device.getQueueFamily(otherQueueType) != entryQueueFamily)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ UINT32 numQueues = device.getNumQueues(otherQueueType);
|
|
|
+ for(UINT32 j = 0; j < numQueues; j++)
|
|
|
+ {
|
|
|
+ // Try to find a queue not currently executing
|
|
|
+ VulkanQueue* curQueue = device.getQueue(otherQueueType, j);
|
|
|
+ if(!curQueue->isExecuting())
|
|
|
+ {
|
|
|
+ otherQueue = curQueue;
|
|
|
+ otherQueueIdx = j;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // Can't find empty one, use the first one then
|
|
|
+ if(otherQueue == nullptr)
|
|
|
+ {
|
|
|
+ otherQueue = device.getQueue(otherQueueType, 0);
|
|
|
+ otherQueueIdx = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ otherQueueType = (GpuQueueType)i;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ UINT32 otherGlobalQueueIdx = CommandSyncMask::getGlobalQueueIdx(otherQueueType, otherQueueIdx);
|
|
|
+ syncMask |= otherGlobalQueueIdx;
|
|
|
+
|
|
|
+ cmdBuffer->end();
|
|
|
+ cmdBuffer->submit(otherQueue, otherQueueIdx, 0);
|
|
|
|
|
|
// If there are any layout transitions, reset them as we don't need them for the second pipeline barrier
|
|
|
for (auto& barrierEntry : barriers.imageBarriers)
|
|
|
barrierEntry.oldLayout = barrierEntry.newLayout;
|
|
|
}
|
|
|
|
|
|
+ UINT32 deviceIdx = device.getIndex();
|
|
|
+ VulkanCommandBufferManager& cbm = static_cast<VulkanCommandBufferManager&>(CommandBufferManager::instance());
|
|
|
+
|
|
|
+ UINT32 numSemaphores;
|
|
|
+ cbm.getSyncSemaphores(deviceIdx, syncMask, mSemaphoresTemp, numSemaphores);
|
|
|
+
|
|
|
// Issue second part of transition pipeline barriers (on this queue)
|
|
|
for (auto& entry : mTransitionInfoTemp)
|
|
|
{
|
|
|
- VkCommandBuffer cmdBuffer; // TODO - Get the command buffer on queueFamily AND this exact queue
|
|
|
- // - Probably best to just append it to current submitInfo as it is executed in order
|
|
|
+ VulkanCmdBuffer* cmdBuffer = device.getCmdBufferPool().getBuffer(mQueueFamily, false);
|
|
|
+ VkCommandBuffer vkCmdBuffer = cmdBuffer->getHandle();
|
|
|
|
|
|
TransitionInfo& barriers = entry.second;
|
|
|
UINT32 numImgBarriers = (UINT32)barriers.imageBarriers.size();
|
|
|
UINT32 numBufferBarriers = (UINT32)barriers.bufferBarriers.size();
|
|
|
|
|
|
- vkCmdPipelineBarrier(cmdBuffer,
|
|
|
+ vkCmdPipelineBarrier(vkCmdBuffer,
|
|
|
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
|
|
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
|
|
0, 0, nullptr,
|
|
|
numBufferBarriers, barriers.bufferBarriers.data(),
|
|
|
numImgBarriers, barriers.imageBarriers.data());
|
|
|
- }
|
|
|
|
|
|
- UINT32 deviceIdx = device.getIndex();
|
|
|
- VulkanCommandBufferManager& cbm = static_cast<VulkanCommandBufferManager&>(CommandBufferManager::instance());
|
|
|
-
|
|
|
- UINT32 numSemaphores;
|
|
|
- cbm.getSyncSemaphores(deviceIdx, syncMask, mSemaphoresTemp, numSemaphores);
|
|
|
+ cmdBuffer->end();
|
|
|
+
|
|
|
+ queue->submit(cmdBuffer, mSemaphoresTemp, numSemaphores);
|
|
|
+ numSemaphores = 0; // Semaphores are only needed the first time, since we're adding the buffers on the same queue
|
|
|
+ }
|
|
|
|
|
|
queue->submit(this, mSemaphoresTemp, numSemaphores);
|
|
|
|
|
|
@@ -353,10 +360,44 @@ namespace BansheeEngine
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- void VulkanCommandBuffer::submit(UINT32 syncMask)
|
|
|
+ VulkanCommandBuffer::VulkanCommandBuffer(VulkanDevice& device, GpuQueueType type, UINT32 deviceIdx,
|
|
|
+ UINT32 queueIdx, bool secondary)
|
|
|
+ : CommandBuffer(type, deviceIdx, queueIdx, secondary), mBuffer(nullptr)
|
|
|
+ , mDevice(device), mQueue(nullptr), mIdMask(0)
|
|
|
+ {
|
|
|
+ UINT32 numQueues = device.getNumQueues(mType);
|
|
|
+ if (numQueues == 0) // Fall back to graphics queue
|
|
|
+ {
|
|
|
+ mType = GQT_GRAPHICS;
|
|
|
+ numQueues = device.getNumQueues(GQT_GRAPHICS);
|
|
|
+ }
|
|
|
+
|
|
|
+ mQueue = device.getQueue(mType, mQueueIdx % numQueues);
|
|
|
+
|
|
|
+ // If multiple command buffer IDs map to the same queue, mark them in the mask
|
|
|
+ UINT32 curIdx = mQueueIdx;
|
|
|
+ while (curIdx < BS_MAX_QUEUES_PER_TYPE)
|
|
|
+ {
|
|
|
+ mIdMask |= CommandSyncMask::getGlobalQueueIdx(mType, curIdx);
|
|
|
+ curIdx += numQueues;
|
|
|
+ }
|
|
|
+
|
|
|
+ acquireNewBuffer();
|
|
|
+ }
|
|
|
+
|
|
|
+ void VulkanCommandBuffer::acquireNewBuffer()
|
|
|
{
|
|
|
- assert(mBuffer != nullptr && mBuffer->isReadyForSubmit());
|
|
|
+ VulkanCmdBufferPool& pool = mDevice.getCmdBufferPool();
|
|
|
+
|
|
|
+ if (mBuffer != nullptr)
|
|
|
+ assert(mBuffer->isSubmitted());
|
|
|
|
|
|
+ UINT32 queueFamily = mDevice.getQueueFamily(mType);
|
|
|
+ mBuffer = pool.getBuffer(queueFamily, mIsSecondary);
|
|
|
+ }
|
|
|
+
|
|
|
+ void VulkanCommandBuffer::submit(UINT32 syncMask)
|
|
|
+ {
|
|
|
// Ignore myself
|
|
|
syncMask &= ~mIdMask;
|
|
|
|