ソースを参照

vulkan: Switch to synchronization2

We require 1.3 already anyway (due to dynamic rendering) so let's use
synchronization2 across the board as well.
rdb 2 ヶ月 前
コミット
7e8e167efc

+ 3 - 9
panda/src/vulkandisplay/vulkanBufferContext.h

@@ -37,9 +37,9 @@ public:
   bool _host_visible = false;
 
   // Used for shader buffers.
-  VkAccessFlags _write_access_mask = 0;
-  VkPipelineStageFlags _write_stage_mask = 0;
-  VkPipelineStageFlags _read_stage_mask = 0;
+  VkAccessFlags2 _write_access_mask = 0;
+  VkPipelineStageFlags2 _write_stage_mask = 0;
+  VkPipelineStageFlags2 _read_stage_mask = 0;
 
   // Sequence number of the last command buffer in which this was used.
   uint64_t _read_seq = 0;
@@ -49,12 +49,6 @@ public:
   bool _pooled_barrier_exists = false;
   size_t _buffer_barrier_index = 0;
 
-  // These fields are managed by VulkanFrameData::add_initial_barrier(),
-  // and are used to keep track of the barrier we issue at the beginning of a
-  // frame.
-  VkAccessFlags _initial_src_access_mask = 0;
-  VkAccessFlags _initial_dst_access_mask = 0;
-
 public:
   static TypeHandle get_class_type() {
     return _type_handle;

+ 1 - 9
panda/src/vulkandisplay/vulkanCommandBuffer.I

@@ -20,16 +20,12 @@ VulkanCommandBuffer(VulkanCommandBuffer &&from) noexcept :
   _seq(from._seq),
   _wait_semaphore(from._wait_semaphore),
   _image_barriers(std::move(from._image_barriers)),
-  _buffer_barriers(std::move(from._buffer_barriers)),
-  _barrier_src_stage_mask(from._barrier_src_stage_mask),
-  _barrier_dst_stage_mask(from._barrier_dst_stage_mask) {
+  _buffer_barriers(std::move(from._buffer_barriers)) {
   from._cmd = VK_NULL_HANDLE;
   from._seq = 0;
   from._wait_semaphore = VK_NULL_HANDLE;
   from._image_barriers.clear();
   from._buffer_barriers.clear();
-  from._barrier_src_stage_mask = 0;
-  from._barrier_dst_stage_mask = 0;
 }
 
 /**
@@ -51,14 +47,10 @@ operator = (VulkanCommandBuffer &&from) noexcept {
   _wait_semaphore = from._wait_semaphore;
   _image_barriers = std::move(from._image_barriers);
   _buffer_barriers = std::move(from._buffer_barriers);
-  _barrier_src_stage_mask = from._barrier_src_stage_mask;
-  _barrier_dst_stage_mask = from._barrier_dst_stage_mask;
   from._cmd = VK_NULL_HANDLE;
   from._seq = 0;
   from._wait_semaphore = VK_NULL_HANDLE;
   from._image_barriers.clear();
   from._buffer_barriers.clear();
-  from._barrier_src_stage_mask = 0;
-  from._barrier_dst_stage_mask = 0;
   return *this;
 }

+ 73 - 47
panda/src/vulkandisplay/vulkanCommandBuffer.cxx

@@ -23,24 +23,24 @@
  */
 void VulkanCommandBuffer::
 add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
-            VkPipelineStageFlags dst_stage_mask,
-            VkAccessFlags dst_access_mask) {
+            VkPipelineStageFlags2 dst_stage_mask,
+            VkAccessFlags2 dst_access_mask) {
   nassertv(_cmd != VK_NULL_HANDLE);
 
   // Are we writing to the texture?
-  VkAccessFlags write_mask = (dst_access_mask &
-    (VK_ACCESS_SHADER_WRITE_BIT |
-     VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
-     VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
-     VK_ACCESS_TRANSFER_WRITE_BIT |
-     VK_ACCESS_HOST_WRITE_BIT |
-     VK_ACCESS_MEMORY_WRITE_BIT));
+  VkAccessFlags2 write_mask = (dst_access_mask &
+    (VK_ACCESS_2_SHADER_WRITE_BIT |
+     VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT |
+     VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+     VK_ACCESS_2_TRANSFER_WRITE_BIT |
+     VK_ACCESS_2_HOST_WRITE_BIT |
+     VK_ACCESS_2_MEMORY_WRITE_BIT));
 
   nassertv(tc->_write_seq <= _seq);
   nassertv((write_mask == 0 || tc->_read_seq <= _seq));
 
-  VkPipelineStageFlags src_stage_mask = tc->_write_stage_mask;
-  VkAccessFlags src_access_mask = tc->_write_access_mask;
+  VkPipelineStageFlags2 src_stage_mask = tc->_write_stage_mask;
+  VkAccessFlags2 src_access_mask = tc->_write_access_mask;
 
   bool is_write = (tc->_layout != layout || write_mask != 0);
   if (is_write) {
@@ -50,7 +50,7 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
 
     if (src_stage_mask == 0) {
       // Can't specify a source stage mask of zero.
-      src_stage_mask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+      src_stage_mask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
     }
   }
   else if (src_stage_mask == 0) {
@@ -70,11 +70,13 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
     }
   }
 
-  VkImageMemoryBarrier img_barrier;
+  VkImageMemoryBarrier2 img_barrier;
   if (tc->_image != VK_NULL_HANDLE) {
-    img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+    img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;
     img_barrier.pNext = nullptr;
+    img_barrier.srcStageMask = src_stage_mask;
     img_barrier.srcAccessMask = src_access_mask;
+    img_barrier.dstStageMask = dst_stage_mask;
     img_barrier.dstAccessMask = dst_access_mask;
     img_barrier.oldLayout = tc->_layout;
     img_barrier.newLayout = layout;
@@ -88,11 +90,13 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
     img_barrier.subresourceRange.layerCount = tc->_array_layers;
   }
 
-  VkBufferMemoryBarrier buf_barrier;
+  VkBufferMemoryBarrier2 buf_barrier;
   if (tc->_buffer != VK_NULL_HANDLE) {
-    buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+    buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2;
     buf_barrier.pNext = nullptr;
+    buf_barrier.srcStageMask = src_stage_mask;
     buf_barrier.srcAccessMask = src_access_mask;
+    buf_barrier.dstStageMask = dst_stage_mask;
     buf_barrier.dstAccessMask = dst_access_mask;
     buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -133,15 +137,19 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
       // Already exists, this barrier, just modify it.
       if (tc->_image != VK_NULL_HANDLE) {
         nassertv(tc->_image_barrier_index <= _image_barriers.size());
-        VkImageMemoryBarrier &existing_barrier = _image_barriers[tc->_image_barrier_index];
+        VkImageMemoryBarrier2 &existing_barrier = _image_barriers[tc->_image_barrier_index];
+        existing_barrier.srcStageMask |= img_barrier.srcStageMask;
         existing_barrier.srcAccessMask |= img_barrier.srcAccessMask;
+        existing_barrier.dstStageMask |= img_barrier.dstStageMask;
         existing_barrier.dstAccessMask |= img_barrier.dstAccessMask;
       }
       if (tc->_buffer != VK_NULL_HANDLE) {
         nassertv(tc->_buffer_barrier_index <= _buffer_barriers.size());
-        VkBufferMemoryBarrier &existing_barrier = _buffer_barriers[tc->_buffer_barrier_index];
-        existing_barrier.srcAccessMask |= buf_barrier.srcAccessMask;
-        existing_barrier.dstAccessMask |= buf_barrier.dstAccessMask;
+        VkBufferMemoryBarrier2 &existing_barrier = _buffer_barriers[tc->_buffer_barrier_index];
+        existing_barrier.srcStageMask |= img_barrier.srcStageMask;
+        existing_barrier.srcAccessMask |= img_barrier.srcAccessMask;
+        existing_barrier.dstStageMask |= img_barrier.dstStageMask;
+        existing_barrier.dstAccessMask |= img_barrier.dstAccessMask;
       }
     } else {
       if (tc->_image != VK_NULL_HANDLE) {
@@ -154,15 +162,21 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
       }
       tc->_pooled_barrier_exists = true;
     }
-    _barrier_src_stage_mask |= src_stage_mask;
-    _barrier_dst_stage_mask |= dst_stage_mask;
   }
   else {
     // We already have an access done in this CB, issue the barrier now.
-    vkCmdPipelineBarrier(_cmd, src_stage_mask, dst_stage_mask, 0,
-                         0, nullptr,
-                         (tc->_buffer != VK_NULL_HANDLE), &buf_barrier,
-                         (tc->_image != VK_NULL_HANDLE), &img_barrier);
+    VkDependencyInfo info = {
+      VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+      nullptr, // pNext
+      0, // dependencyFlags
+      0, // memoryBarrierCount
+      nullptr, // pMemoryBarriers
+      tc->_buffer != VK_NULL_HANDLE,
+      &buf_barrier,
+      tc->_image != VK_NULL_HANDLE,
+      &img_barrier,
+    };
+    vkCmdPipelineBarrier2(_cmd, &info);
 
     tc->_pooled_barrier_exists = false;
   }
@@ -182,9 +196,9 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
     // another read later from a different (earlier) stage, which is why we
     // don't zero out _write_stage_mask.  We can just check _read_stage_mask
     // the next time to see what we have already synchronized with the write.
-    tc->_read_stage_mask |= dst_stage_mask & ~VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+    tc->_read_stage_mask |= dst_stage_mask & ~VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
 
-    if (dst_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
+    if (dst_stage_mask & (VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT)) {
       // Actually, looks like we've synchronized all stages.  We still do need
       // to keep _read_stage_mask, since a subsequent write still needs to
       // wait for this read to complete.
@@ -200,22 +214,22 @@ add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
  * Note that these barriers may be done BEFORE waiting on the semaphore.
  */
 void VulkanCommandBuffer::
-add_barrier(VulkanBufferContext *bc, VkPipelineStageFlags dst_stage_mask,
-            VkAccessFlags dst_access_mask) {
+add_barrier(VulkanBufferContext *bc, VkPipelineStageFlags2 dst_stage_mask,
+            VkAccessFlags2 dst_access_mask) {
   nassertv(_cmd != VK_NULL_HANDLE);
 
   // Are we writing to the buffer?
-  VkAccessFlags write_mask = (dst_access_mask &
-    (VK_ACCESS_SHADER_WRITE_BIT |
-     VK_ACCESS_TRANSFER_WRITE_BIT |
-     VK_ACCESS_HOST_WRITE_BIT |
-     VK_ACCESS_MEMORY_WRITE_BIT));
+  VkAccessFlags2 write_mask = (dst_access_mask &
+    (VK_ACCESS_2_SHADER_WRITE_BIT |
+     VK_ACCESS_2_TRANSFER_WRITE_BIT |
+     VK_ACCESS_2_HOST_WRITE_BIT |
+     VK_ACCESS_2_MEMORY_WRITE_BIT));
 
   nassertv(bc->_write_seq <= _seq);
   nassertv((write_mask == 0 || bc->_read_seq <= _seq));
 
-  VkPipelineStageFlags src_stage_mask = bc->_write_stage_mask;
-  VkAccessFlags src_access_mask = bc->_write_access_mask;
+  VkPipelineStageFlags2 src_stage_mask = bc->_write_stage_mask;
+  VkAccessFlags2 src_access_mask = bc->_write_access_mask;
 
   if (write_mask != 0) {
     // Before a layout transition or a write, all stages that previously read
@@ -224,7 +238,7 @@ add_barrier(VulkanBufferContext *bc, VkPipelineStageFlags dst_stage_mask,
 
     if (src_stage_mask == 0) {
       // Can't specify a source stage mask of zero.
-      src_stage_mask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+      src_stage_mask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
     }
   }
   else if (src_stage_mask == 0) {
@@ -244,10 +258,12 @@ add_barrier(VulkanBufferContext *bc, VkPipelineStageFlags dst_stage_mask,
     }
   }
 
-  VkBufferMemoryBarrier buf_barrier;
-  buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+  VkBufferMemoryBarrier2 buf_barrier;
+  buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2;
   buf_barrier.pNext = nullptr;
+  buf_barrier.srcStageMask = src_stage_mask;
   buf_barrier.srcAccessMask = src_access_mask;
+  buf_barrier.dstStageMask = dst_stage_mask;
   buf_barrier.dstAccessMask = dst_access_mask;
   buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
   buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -278,21 +294,31 @@ add_barrier(VulkanBufferContext *bc, VkPipelineStageFlags dst_stage_mask,
     if (bc->_read_seq == _seq && bc->_pooled_barrier_exists) {
       // Already exists, this barrier, just modify it.
       nassertv(bc->_buffer_barrier_index <= _buffer_barriers.size());
-      VkBufferMemoryBarrier &existing_barrier = _buffer_barriers[bc->_buffer_barrier_index];
+      VkBufferMemoryBarrier2 &existing_barrier = _buffer_barriers[bc->_buffer_barrier_index];
+      existing_barrier.srcStageMask |= buf_barrier.srcStageMask;
       existing_barrier.srcAccessMask |= buf_barrier.srcAccessMask;
+      existing_barrier.dstStageMask |= buf_barrier.dstStageMask;
       existing_barrier.dstAccessMask |= buf_barrier.dstAccessMask;
     } else {
       bc->_buffer_barrier_index = _buffer_barriers.size();
       _buffer_barriers.push_back(std::move(buf_barrier));
       bc->_pooled_barrier_exists = true;
     }
-    _barrier_src_stage_mask |= src_stage_mask;
-    _barrier_dst_stage_mask |= dst_stage_mask;
   }
   else {
     // We already have an access done in this CB, issue the barrier now.
-    vkCmdPipelineBarrier(_cmd, src_stage_mask, dst_stage_mask, 0,
-                         0, nullptr, 1, &buf_barrier, 0, nullptr);
+    VkDependencyInfo info = {
+      VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+      nullptr, // pNext
+      0, // dependencyFlags
+      0, // memoryBarrierCount
+      nullptr, // pMemoryBarriers
+      1, // bufferMemoryBarrierCount
+      &buf_barrier, // pBufferMemoryBarriers
+      0, // imageMemoryBarrierCount
+      nullptr, // pImageMemoryBarriers
+    };
+    vkCmdPipelineBarrier2(_cmd, &info);
 
     bc->_pooled_barrier_exists = false;
   }
@@ -311,9 +337,9 @@ add_barrier(VulkanBufferContext *bc, VkPipelineStageFlags dst_stage_mask,
     // another read later from a different (earlier) stage, which is why we
     // don't zero out _write_stage_mask.  We can just check _read_stage_mask
     // the next time to see what we have already synchronized with the write.
-    bc->_read_stage_mask |= dst_stage_mask & ~VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+    bc->_read_stage_mask |= dst_stage_mask & ~VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
 
-    if (dst_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
+    if (dst_stage_mask & (VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT)) {
       // Actually, looks like we've synchronized all stages.  We still do need
       // to keep _read_stage_mask, since a subsequent write still needs to
       // wait for this read to complete.

+ 6 - 8
panda/src/vulkandisplay/vulkanCommandBuffer.h

@@ -41,11 +41,11 @@ public:
   }
 
   void add_barrier(VulkanTextureContext *tc, VkImageLayout layout,
-                   VkPipelineStageFlags stage_mask,
-                   VkAccessFlags access_mask = 0);
+                   VkPipelineStageFlags2 stage_mask,
+                   VkAccessFlags2 access_mask = 0);
   void add_barrier(VulkanBufferContext *bc,
-                   VkPipelineStageFlags stage_mask,
-                   VkAccessFlags access_mask = 0);
+                   VkPipelineStageFlags2 stage_mask,
+                   VkAccessFlags2 access_mask = 0);
 
 public:
   VkCommandBuffer _cmd = VK_NULL_HANDLE;
@@ -58,10 +58,8 @@ public:
 
   // These barriers need to be issued BEFORE the command buffer (usually the
   // barrier is added to the previous command buffer).
-  pvector<VkImageMemoryBarrier> _image_barriers;
-  pvector<VkBufferMemoryBarrier> _buffer_barriers;
-  VkPipelineStageFlags _barrier_src_stage_mask = 0;
-  VkPipelineStageFlags _barrier_dst_stage_mask = 0;
+  pvector<VkImageMemoryBarrier2> _image_barriers;
+  pvector<VkBufferMemoryBarrier2> _buffer_barriers;
 };
 
 #include "vulkanCommandBuffer.I"

+ 30 - 30
panda/src/vulkandisplay/vulkanGraphicsBuffer.cxx

@@ -162,9 +162,9 @@ begin_frame(FrameMode mode, Thread *current_thread) {
     attach._tc->set_active(true);
 
     VkImageLayout layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-    VkAccessFlags write_access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
-    VkAccessFlags read_access_mask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
-    VkPipelineStageFlags stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+    VkAccessFlags2 write_access_mask = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
+    VkAccessFlags2 read_access_mask = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT;
+    VkPipelineStageFlags2 stage_mask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
 
     if (attach._plane == RTP_stencil || attach._plane == RTP_depth ||
         attach._plane == RTP_depth_stencil) {
@@ -173,10 +173,10 @@ begin_frame(FrameMode mode, Thread *current_thread) {
       vkgsg->_fb_depth_tc = attach._tc;
 
       layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-      stage_mask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
-                 | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-      write_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
-      read_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+      stage_mask = VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT
+                 | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
+      write_access_mask = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+      read_access_mask = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
 
       if (get_clear_depth_active()) {
         depth_attachment.clearValue.depthStencil.depth = get_clear_depth();
@@ -245,18 +245,18 @@ begin_frame(FrameMode mode, Thread *current_thread) {
     attach._tc->mark_used_this_frame(frame_data);
 
     VkImageLayout layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-    VkAccessFlags write_access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
-    VkAccessFlags read_access_mask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
-    VkPipelineStageFlags stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+    VkAccessFlags write_access_mask = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
+    VkAccessFlags read_access_mask = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT;
+    VkPipelineStageFlags stage_mask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
 
     if (attach._plane == RTP_stencil || attach._plane == RTP_depth ||
         attach._plane == RTP_depth_stencil) {
       vkgsg->_fb_depth_tc = attach._tc;
       layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-      stage_mask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
-                 | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-      write_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
-      read_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+      stage_mask = VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT
+                 | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
+      write_access_mask = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+      read_access_mask = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
     }
     else if (attach._plane == RTP_color) {
       vkgsg->_fb_color_tc = attach._tc;
@@ -314,18 +314,18 @@ end_frame(FrameMode mode, Thread *current_thread) {
       attach._tc->_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
 
       // This seems to squelch a validation warning, not sure about this yet
-      attach._tc->_write_stage_mask |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+      attach._tc->_write_stage_mask |= VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
     }*/
 
     for (Attachment &attach : _attachments) {
       if (attach._plane == RTP_stencil || attach._plane == RTP_depth ||
           attach._plane == RTP_depth_stencil) {
-        attach._tc->_write_stage_mask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
-                                      | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-        attach._tc->_write_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+        attach._tc->_write_stage_mask = VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT
+                                      | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
+        attach._tc->_write_access_mask = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
       } else {
-        attach._tc->_write_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
-        attach._tc->_write_access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+        attach._tc->_write_stage_mask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
+        attach._tc->_write_access_mask = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
       }
       attach._tc->_read_stage_mask = 0;
       attach._tc->_write_seq = vkgsg->_render_cmd._seq;
@@ -465,7 +465,7 @@ setup_render_pass() {
   dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
   dependency.dstSubpass = 0;
   dependency.srcStageMask = 0;
-  dependency.dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+  dependency.dstStageMask = VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT;
   dependency.srcAccessMask = 0;
   dependency.dstAccessMask = 0;
   dependency.dependencyFlags = 0;
@@ -495,10 +495,10 @@ setup_render_pass() {
       attach.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
     }
 
-    dependency.srcStageMask |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
-    dependency.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
-    dependency.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
-                                VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+    dependency.srcStageMask |= VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
+    dependency.srcAccessMask |= VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
+    dependency.dstAccessMask |= VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT |
+                                VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
 
     color_reference.attachment = ai++;
     have_color_reference = true;
@@ -534,17 +534,17 @@ setup_render_pass() {
       attach.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
     }
 
-    dependency.srcStageMask |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-    dependency.dstStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
-    dependency.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
-    dependency.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+    dependency.srcStageMask |= VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
+    dependency.dstStageMask |= VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT;
+    dependency.srcAccessMask |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+    dependency.dstAccessMask |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
 
     depth_reference.attachment = ai++;
     have_depth_reference = true;
   }
 
   if (dependency.srcStageMask == 0) {
-    dependency.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+    dependency.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
   }
 
   VkSubpassDescription subpass;

+ 132 - 85
panda/src/vulkandisplay/vulkanGraphicsStateGuardian.cxx

@@ -150,6 +150,14 @@ reset() {
   enabled_features.pNext = &v_1_2_features;
 #endif
 
+  // synchronization2 from 1.3 core
+  VkPhysicalDeviceSynchronization2Features sync2_features = {
+    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
+    enabled_features.pNext,
+    VK_TRUE,
+  };
+  enabled_features.pNext = &sync2_features;
+
   VkPhysicalDeviceDynamicRenderingFeatures dr_features = {
     VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES,
     enabled_features.pNext,
@@ -336,6 +344,7 @@ reset() {
   _vkCmdDraw = (PFN_vkCmdDraw)vkGetDeviceProcAddr(_device, "vkCmdDraw");
   _vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)vkGetDeviceProcAddr(_device, "vkCmdDrawIndexed");
   _vkCmdPushConstants = (PFN_vkCmdPushConstants)vkGetDeviceProcAddr(_device, "vkCmdPushConstants");
+  _vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)vkGetDeviceProcAddr(_device, "vkCmdWriteTimestamp2");
   _vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)vkGetDeviceProcAddr(_device, "vkUpdateDescriptorSets");
 
   if (_supports_dynamic_rendering) {
@@ -999,7 +1008,7 @@ allocate_memory(VulkanMemoryBlock &block, const VkMemoryRequirements &reqs,
  */
 VulkanTextureContext *VulkanGraphicsStateGuardian::
 use_texture(Texture *texture, VkImageLayout layout,
-            VkPipelineStageFlags stage_mask, VkAccessFlags access_mask,
+            VkPipelineStageFlags2 stage_mask, VkAccessFlags2 access_mask,
             bool discard) {
   nassertr(_render_cmd, nullptr);
 
@@ -1644,8 +1653,8 @@ upload_texture(VulkanTextureContext *tc, CompletionToken token) {
     // Issue a command to transition the image into a layout optimal for
     // transferring into.
     _transfer_cmd.add_barrier(tc, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                              VK_PIPELINE_STAGE_TRANSFER_BIT,
-                              VK_ACCESS_TRANSFER_WRITE_BIT);
+                              VK_PIPELINE_STAGE_2_TRANSFER_BIT,
+                              VK_ACCESS_2_TRANSFER_WRITE_BIT);
 
     // Schedule a copy from our staging buffer to the image.
     VkBufferImageCopy region = {};
@@ -2498,8 +2507,8 @@ release_index_buffer(IndexBufferContext *context) {
  * Prepares the buffer for the given usage of the buffer.
  */
 VulkanBufferContext *VulkanGraphicsStateGuardian::
-use_shader_buffer(ShaderBuffer *buffer, VkPipelineStageFlags stage_mask,
-                  VkAccessFlags access_mask) {
+use_shader_buffer(ShaderBuffer *buffer, VkPipelineStageFlags2 stage_mask,
+                  VkAccessFlags2 access_mask) {
   nassertr(_render_cmd, nullptr);
 
   VulkanBufferContext *bc;
@@ -2579,25 +2588,32 @@ prepare_shader_buffer(ShaderBuffer *data) {
     }
     _data_transferred_pcollector.add_level(data_size);
 
-    VkBufferMemoryBarrier barrier;
+    VkBufferMemoryBarrier2 barrier;
     barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
     barrier.pNext = nullptr;
-    barrier.srcAccessMask = use_staging_buffer ? VK_ACCESS_TRANSFER_WRITE_BIT : VK_ACCESS_HOST_WRITE_BIT;
-    barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+    barrier.srcStageMask = use_staging_buffer ? VK_PIPELINE_STAGE_2_TRANSFER_BIT : VK_PIPELINE_STAGE_2_HOST_BIT;
+    barrier.srcAccessMask = use_staging_buffer ? VK_ACCESS_2_TRANSFER_WRITE_BIT : VK_ACCESS_2_HOST_WRITE_BIT;
+    barrier.dstStageMask = VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT
+                         | VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT
+                         | VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT
+                         | VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT
+                         | VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT
+                         | VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT;
+    barrier.dstAccessMask = VK_ACCESS_2_SHADER_READ_BIT;
     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.buffer = bc->_buffer;
     barrier.offset = 0;
     barrier.size = VK_WHOLE_SIZE;
-    vkCmdPipelineBarrier(_transfer_cmd,
-                         use_staging_buffer ? VK_PIPELINE_STAGE_TRANSFER_BIT : VK_PIPELINE_STAGE_HOST_BIT,
-                         VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
-                         VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
-                         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
-                         VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
-                         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
-                         VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
-                         0, 0, nullptr, 1, &barrier, 0, nullptr);
+
+    VkDependencyInfo info = {
+      VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+      nullptr, 0, // pNext, dependencyFlags
+      0, nullptr, // memory barriers
+      1, &barrier, // buffer barriers
+      0, nullptr, // image barriers
+    };
+    vkCmdPipelineBarrier2(_transfer_cmd, &info);
   }
 
   //bc->enqueue_lru(&_prepared_objects->_graphics_memory_lru);
@@ -2655,7 +2671,7 @@ issue_timer_query(int pstats_index) {
   uint32_t query = get_next_timer_query(pstats_index);
 
   bool is_end = pstats_index & 0x8000;
-  vkCmdWriteTimestamp(_render_cmd, is_end ? VK_PIPELINE_STAGE_ALL_COMMANDS_BIT : VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, _timer_query_pool, query);
+  _vkCmdWriteTimestamp2(_render_cmd, is_end ? VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT : VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT, _timer_query_pool, query);
 }
 
 /**
@@ -3135,7 +3151,7 @@ begin_frame(Thread *current_thread, VkSemaphore wait_for) {
       // Issue the first timer query on the transfer command buffer, since that
       // marks the first command we will submit belonging to this frame.
       uint32_t query = get_next_timer_query(0);
-      vkCmdWriteTimestamp(_transfer_cmd, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, _timer_query_pool, query);
+      _vkCmdWriteTimestamp2(_transfer_cmd, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT, _timer_query_pool, query);
     }
 #endif
   }
@@ -3183,7 +3199,7 @@ begin_frame(Thread *current_thread, VkSemaphore wait_for) {
     _transfer_end_query = get_next_timer_query(_wait_semaphore_pcollector.get_index());
     _transfer_end_query_pool = _timer_query_pool;
     uint32_t query = get_next_timer_query(_wait_semaphore_pcollector.get_index() | 0x8000);
-    vkCmdWriteTimestamp(_render_cmd, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, _timer_query_pool, query);
+    _vkCmdWriteTimestamp2(_render_cmd, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT, _timer_query_pool, query);
   } else {
     _transfer_end_query_pool = VK_NULL_HANDLE;
   }
@@ -3204,7 +3220,7 @@ end_frame(Thread *current_thread, VkSemaphore signal_done) {
 
 #ifdef DO_PSTATS
   if (_transfer_end_query_pool != VK_NULL_HANDLE) {
-    vkCmdWriteTimestamp(_transfer_cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+    _vkCmdWriteTimestamp2(_transfer_cmd, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
                         _transfer_end_query_pool, _transfer_end_query);
     _transfer_end_query_pool = VK_NULL_HANDLE;
   }
@@ -3764,16 +3780,16 @@ end_command_buffer(VulkanCommandBuffer &&cmd, VkSemaphore signal_done) {
       _pending_submissions.push_back({VK_NULL_HANDLE, VK_NULL_HANDLE, 0u, 1u});
     }
 
-    vkCmdPipelineBarrier(_pending_command_buffers.back(),
-                         cmd._barrier_src_stage_mask,
-                         cmd._barrier_dst_stage_mask,
-                         0, 0, nullptr,
-                         cmd._buffer_barriers.size(), cmd._buffer_barriers.data(),
-                         cmd._image_barriers.size(), cmd._image_barriers.data());
-    cmd._buffer_barriers.clear();
-    cmd._image_barriers.clear();
-    cmd._barrier_src_stage_mask = 0;
-    cmd._barrier_dst_stage_mask = 0;
+    VkDependencyInfo info = {
+      VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+      nullptr, // pNext
+      0, // dependencyFlags
+      0, // memoryBarrierCount
+      nullptr, // pMemoryBarriers
+      (uint32_t)cmd._buffer_barriers.size(), cmd._buffer_barriers.data(),
+      (uint32_t)cmd._image_barriers.size(), cmd._image_barriers.data(),
+    };
+    vkCmdPipelineBarrier2(_pending_command_buffers.back(), &info);
   }
 
   size_t i = _pending_command_buffers.size();
@@ -3839,23 +3855,27 @@ flush(VkFence fence) {
 
   PStatTimer timer(_flush_pcollector);
 
-  // We may need to wait until the attachments are available for writing.
-  // TOP_OF_PIPE placates the validation layer, not sure why it's needed.
-  static const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+  VkCommandBufferSubmitInfo *cb_infos = (VkCommandBufferSubmitInfo *)alloca(sizeof(VkCommandBufferSubmitInfo) * _pending_command_buffers.size());
+  for (size_t i = 0; i < _pending_command_buffers.size(); ++i) {
+    cb_infos[i] = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, nullptr, _pending_command_buffers[i], 0u};
+  }
+
+  VkSemaphoreSubmitInfo *sem_infos = (VkSemaphoreSubmitInfo *)alloca(sizeof(VkSemaphoreSubmitInfo) * _pending_submissions.size() * 2);
+  size_t sem_i = 0;
 
-  VkSubmitInfo *submit_infos = (VkSubmitInfo *)alloca(sizeof(VkSubmitInfo) * _pending_submissions.size());
+  VkSubmitInfo2 *submit_infos = (VkSubmitInfo2 *)alloca(sizeof(VkSubmitInfo2) * _pending_submissions.size());
   for (size_t i = 0; i < _pending_submissions.size(); ++i) {
     auto &pending = _pending_submissions[i];
-    VkSubmitInfo &submit_info = submit_infos[i];
+    VkSubmitInfo2 &submit_info = submit_infos[i];
     submit_info.pNext = nullptr;
-    submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
-    submit_info.waitSemaphoreCount = 0;
-    submit_info.pWaitSemaphores = nullptr;
-    submit_info.pWaitDstStageMask = nullptr;
-    submit_info.commandBufferCount = pending._num_command_buffers;
-    submit_info.pCommandBuffers = &_pending_command_buffers[pending._first_command_buffer];
-    submit_info.signalSemaphoreCount = 0;
-    submit_info.pSignalSemaphores = nullptr;
+    submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2;
+    submit_info.flags = 0;
+    submit_info.waitSemaphoreInfoCount = 0;
+    submit_info.pWaitSemaphoreInfos = nullptr;
+    submit_info.commandBufferInfoCount = pending._num_command_buffers;
+    submit_info.pCommandBufferInfos = &cb_infos[pending._first_command_buffer];
+    submit_info.signalSemaphoreInfoCount = 0;
+    submit_info.pSignalSemaphoreInfos = nullptr;
 
 #ifndef NDEBUG
     if (vulkandisplay_cat.is_spam()) {
@@ -3878,16 +3898,30 @@ flush(VkFence fence) {
 #endif
 
     if (pending._wait_semaphore != VK_NULL_HANDLE) {
-      submit_info.waitSemaphoreCount = 1;
-      submit_info.pWaitSemaphores = &pending._wait_semaphore;
-      submit_info.pWaitDstStageMask = &wait_stage_mask;
-
+      // We may need to wait until the attachments are available for writing.
+      // TOP_OF_PIPE placates the validation layer, not sure why it's needed.
+      VkSemaphoreSubmitInfo &sem_info = sem_infos[sem_i++];
+      sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO;
+      sem_info.pNext = nullptr;
+      sem_info.semaphore = pending._wait_semaphore;
+      sem_info.stageMask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
+      sem_info.deviceIndex = 0;
+
+      submit_info.waitSemaphoreInfoCount = 1;
+      submit_info.pWaitSemaphoreInfos = &sem_info;
       frame_data._pending_destroy_semaphores.push_back(pending._wait_semaphore);
     }
 
     if (pending._signal_semaphore != VK_NULL_HANDLE) {
-      submit_info.signalSemaphoreCount = 1;
-      submit_info.pSignalSemaphores = &pending._signal_semaphore;
+      VkSemaphoreSubmitInfo &sem_info = sem_infos[sem_i++];
+      sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO;
+      sem_info.pNext = nullptr;
+      sem_info.semaphore = pending._signal_semaphore;
+      sem_info.stageMask = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT; //FIXME
+      sem_info.deviceIndex = 0;
+
+      submit_info.signalSemaphoreInfoCount = 1;
+      submit_info.pSignalSemaphoreInfos = &sem_info;
     }
   }
 
@@ -3898,7 +3932,7 @@ flush(VkFence fence) {
   }
 #endif
 
-  VkResult err = vkQueueSubmit(_queue, _pending_submissions.size(), submit_infos, fence);
+  VkResult err = vkQueueSubmit2(_queue, _pending_submissions.size(), submit_infos, fence);
   if (err) {
     vulkan_error(err, "Error submitting command buffers");
     if (err == VK_ERROR_DEVICE_LOST) {
@@ -4199,7 +4233,7 @@ framebuffer_copy_to_texture(Texture *tex, int view, int z,
 
   VulkanTextureContext *tc;
   tc = use_texture(tex, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                   VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
+                   VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_WRITE_BIT,
                    true);
   nassertr(tc != nullptr, false);
 
@@ -4213,8 +4247,8 @@ framebuffer_copy_to_texture(Texture *tex, int view, int z,
   // Issue a command to transition the image into a layout optimal for
   // transferring from.
   _render_cmd.add_barrier(fbtc, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                          VK_PIPELINE_STAGE_TRANSFER_BIT,
-                          VK_ACCESS_TRANSFER_READ_BIT);
+                          VK_PIPELINE_STAGE_2_TRANSFER_BIT,
+                          VK_ACCESS_2_TRANSFER_READ_BIT);
 
   if (fbtc->_format == tc->_format) {
     // The formats are the same.  This is just an image copy.
@@ -4343,8 +4377,8 @@ do_extract_image(VulkanTextureContext *tc, Texture *tex, int view, int z, Screen
   // Issue a command to transition the image into a layout optimal for
   // transferring from.
   cmd.add_barrier(tc, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                  VK_PIPELINE_STAGE_TRANSFER_BIT,
-                  VK_ACCESS_TRANSFER_READ_BIT);
+                  VK_PIPELINE_STAGE_2_TRANSFER_BIT,
+                  VK_ACCESS_2_TRANSFER_READ_BIT);
 
   if (tc->_image != VK_NULL_HANDLE) {
     VkBufferImageCopy region;
@@ -4410,20 +4444,27 @@ do_extract_buffer(VulkanBufferContext *bc, vector_uchar &data) {
       return false;
     }
 
-    VkBufferMemoryBarrier barrier;
+    VkBufferMemoryBarrier2 barrier;
     barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
     barrier.pNext = nullptr;
-    barrier.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT;
-    barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+    barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
+    barrier.srcAccessMask = VK_ACCESS_2_MEMORY_WRITE_BIT;
+    barrier.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
+    barrier.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT;
     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.buffer = bc->_buffer;
     barrier.offset = 0;
     barrier.size = VK_WHOLE_SIZE;
-    vkCmdPipelineBarrier(_transfer_cmd,
-                         VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
-                         VK_PIPELINE_STAGE_TRANSFER_BIT,
-                         0, 0, nullptr, 1, &barrier, 0, nullptr);
+
+    VkDependencyInfo info = {
+      VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+      nullptr, 0, // pNext, dependencyFlags
+      0, nullptr, // memory barriers
+      1, &barrier, // buffer barriers
+      0, nullptr, // image barriers
+    };
+    vkCmdPipelineBarrier2(_transfer_cmd, &info);
 
     VkBufferCopy region;
     region.srcOffset = 0;
@@ -4432,30 +4473,36 @@ do_extract_buffer(VulkanBufferContext *bc, vector_uchar &data) {
     vkCmdCopyBuffer(_transfer_cmd, bc->_buffer, tmp_buffer, 1, &region);
 
     // Issue a new barrier to make the copy visible on the host.
-    barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
-    barrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
+    barrier.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
+    barrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT;
+    barrier.dstStageMask = VK_PIPELINE_STAGE_2_HOST_BIT;
+    barrier.dstAccessMask = VK_ACCESS_2_HOST_READ_BIT;
     barrier.buffer = tmp_buffer;
     barrier.offset = 0;
     barrier.size = VK_WHOLE_SIZE;
-    vkCmdPipelineBarrier(_transfer_cmd,
-                         VK_PIPELINE_STAGE_TRANSFER_BIT,
-                         VK_PIPELINE_STAGE_HOST_BIT,
-                         0, 0, nullptr, 1, &barrier, 0, nullptr);
+    vkCmdPipelineBarrier2(_transfer_cmd, &info);
   } else {
-    VkBufferMemoryBarrier barrier;
+    VkBufferMemoryBarrier2 barrier;
     barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
     barrier.pNext = nullptr;
-    barrier.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT;
-    barrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
+    barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
+    barrier.srcAccessMask = VK_ACCESS_2_MEMORY_WRITE_BIT;
+    barrier.dstStageMask = VK_PIPELINE_STAGE_2_HOST_BIT;
+    barrier.dstAccessMask = VK_ACCESS_2_HOST_READ_BIT;
     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.buffer = bc->_buffer;
     barrier.offset = 0;
     barrier.size = VK_WHOLE_SIZE;
-    vkCmdPipelineBarrier(_transfer_cmd,
-                         VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
-                         VK_PIPELINE_STAGE_HOST_BIT,
-                         0, 0, nullptr, 1, &barrier, 0, nullptr);
+
+    VkDependencyInfo info = {
+      VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+      nullptr, 0, // pNext, dependencyFlags
+      0, nullptr, // memory barriers
+      1, &barrier, // buffer barriers
+      0, nullptr, // image barriers
+    };
+    vkCmdPipelineBarrier2(_transfer_cmd, &info);
   }
 
   VkFence fence = create_fence();
@@ -5518,29 +5565,29 @@ update_lattr_descriptor_set(VkDescriptorSet ds, const LightAttrib *attr) {
 
     // We don't know at this point which stages is using them, and finding out
     // would require duplication of descriptor sets, so we flag all stages.
-    VkPipelineStageFlags stage_flags = 0
-      | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
-      | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
-      | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
+    VkPipelineStageFlags2 stage_flags = 0
+      | VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT
+      | VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT
+      | VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT
       ;
 
     if (_supported_shader_caps & ShaderModule::C_tessellation_shader) {
-      //stage_flags |= VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
-      //stage_flags |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
+      //stage_flags |= VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT;
+      //stage_flags |= VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT;
     }
     if (_supported_shader_caps & ShaderModule::C_geometry_shader) {
-      //stage_flags |= VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
+      //stage_flags |= VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT;
     }
 
     VulkanTextureContext *tc;
     tc = use_texture(texture, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
-                     stage_flags, VK_ACCESS_SHADER_READ_BIT);
+                     stage_flags, VK_ACCESS_2_SHADER_READ_BIT);
 
     if (tc == nullptr) {
       // We can't bind this because we're currently rendering into it.
       texture = dummy;
       tc = use_texture(texture, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
-                       stage_flags, VK_ACCESS_SHADER_READ_BIT);
+                       stage_flags, VK_ACCESS_2_SHADER_READ_BIT);
     }
 
     VkDescriptorImageInfo &image_info = image_infos[i];

+ 5 - 4
panda/src/vulkandisplay/vulkanGraphicsStateGuardian.h

@@ -57,8 +57,8 @@ public:
                        VkFlags required_flags, bool linear);
 
   VulkanTextureContext *use_texture(Texture *texture, VkImageLayout layout,
-                                    VkPipelineStageFlags stage_mask,
-                                    VkAccessFlags access_mask,
+                                    VkPipelineStageFlags2 stage_mask,
+                                    VkAccessFlags2 access_mask,
                                     bool discard=false);
   virtual TextureContext *prepare_texture(Texture *tex);
   bool create_texture(VulkanTextureContext *vtc);
@@ -91,8 +91,8 @@ public:
   virtual void release_index_buffer(IndexBufferContext *ibc);
 
   VulkanBufferContext *use_shader_buffer(ShaderBuffer *buffer,
-                                         VkPipelineStageFlags stage_mask,
-                                         VkAccessFlags access_mask);
+                                         VkPipelineStageFlags2 stage_mask,
+                                         VkAccessFlags2 access_mask);
   virtual BufferContext *prepare_shader_buffer(ShaderBuffer *data);
   virtual void release_shader_buffer(BufferContext *bc);
   virtual bool extract_shader_buffer_data(ShaderBuffer *buffer, vector_uchar &data);
@@ -377,6 +377,7 @@ private:
   PFN_vkCmdSetPatchControlPointsEXT _vkCmdSetPatchControlPointsEXT;
   PFN_vkCmdSetPrimitiveRestartEnable _vkCmdSetPrimitiveRestartEnable;
   PFN_vkCmdSetPrimitiveTopology _vkCmdSetPrimitiveTopology;
+  PFN_vkCmdWriteTimestamp2 _vkCmdWriteTimestamp2;
   PFN_vkUpdateDescriptorSets _vkUpdateDescriptorSets;
 
   friend class VulkanGraphicsBuffer;

+ 41 - 41
panda/src/vulkandisplay/vulkanGraphicsWindow.cxx

@@ -201,7 +201,7 @@ begin_frame(FrameMode mode, Thread *current_thread) {
   color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
 
   // Reset this to reflect getting this texture fresh from the present engine.
-  color_tc->_write_stage_mask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+  color_tc->_write_stage_mask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
   color_tc->_write_access_mask = 0;
   color_tc->_read_stage_mask = 0;
 
@@ -214,15 +214,15 @@ begin_frame(FrameMode mode, Thread *current_thread) {
     color_tc->_write_seq = vkgsg->_render_cmd._seq;
   }
   vkgsg->_render_cmd.add_barrier(color_tc, color_attachment.imageLayout,
-                                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
-                                 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
+                                 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT,
+                                 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT);
   /*if (color_tc->_layout != color_attachment.imageLayout ||
-      (color_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT) != 0 ||
-      (color_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT) != 0) {
+      (color_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT) != 0 ||
+      (color_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT) != 0) {
     frame_data.add_initial_barrier(color_tc,
       color_attachment.imageLayout,
-      VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
-      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
+      VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT,
+      VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT);
   }*/
 
   VkRenderingAttachmentInfo depth_attachment = {VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO};
@@ -260,18 +260,18 @@ begin_frame(FrameMode mode, Thread *current_thread) {
     }
 
     /*if (_depth_stencil_tc->_layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
-        (_depth_stencil_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT) != 0 ||
-        (_depth_stencil_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT) != 0) {
+        (_depth_stencil_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT) != 0 ||
+        (_depth_stencil_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT) != 0) {
       frame_data.add_initial_barrier(_depth_stencil_tc,
         VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
-        VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
-        VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
+        VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT,
+        VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
     }*/
 
     vkgsg->_render_cmd.add_barrier(_depth_stencil_tc,
       VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
-      VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
-      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
+      VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT,
+      VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
   }
 
   vkgsg->_vkCmdBeginRendering(cmd, &render_info);
@@ -309,20 +309,20 @@ begin_frame(FrameMode mode, Thread *current_thread) {
     }
 
     if (color_tc->_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ||
-        (color_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT) != 0 ||
-        (color_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT) != 0) {
+        (color_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT) != 0 ||
+        (color_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT) != 0) {
       frame_data.add_initial_barrier(color_tc,
         VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
-        VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
-        VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
+        VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT,
+        VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT);
     }
   }
   else {
     // This transition will be made when the render pass ends.
     color_tc->_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-    color_tc->_read_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
-    color_tc->_write_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
-    color_tc->_write_access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+    color_tc->_read_stage_mask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
+    color_tc->_write_stage_mask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
+    color_tc->_write_access_mask = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
 
     LColor clear_color = get_clear_color();
     clears[0].color.float32[0] = clear_color[0];
@@ -339,20 +339,20 @@ begin_frame(FrameMode mode, Thread *current_thread) {
     // Transition the depth-stencil image to a consistent state.
     if (!get_clear_depth_active() || !get_clear_stencil_active()) {
       if (_depth_stencil_tc->_layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
-          (_depth_stencil_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT) != 0 ||
-          (_depth_stencil_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT) != 0) {
+          (_depth_stencil_tc->_write_stage_mask & ~VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT) != 0 ||
+          (_depth_stencil_tc->_read_stage_mask & ~VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT) != 0) {
         frame_data.add_initial_barrier(_depth_stencil_tc,
           VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
-          VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
-          VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
+          VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT,
+          VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
       }
     }
     else {
       // This transition will be made when the first subpass is started.
       _depth_stencil_tc->_layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-      _depth_stencil_tc->_write_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
-      _depth_stencil_tc->_write_stage_mask = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-      _depth_stencil_tc->_read_stage_mask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
+      _depth_stencil_tc->_write_access_mask = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+      _depth_stencil_tc->_write_stage_mask = VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
+      _depth_stencil_tc->_read_stage_mask = VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT;
     }
 
     if (get_clear_depth_active() || get_clear_stencil_active()) {
@@ -394,14 +394,14 @@ end_frame(FrameMode mode, Thread *current_thread) {
 
     // The driver implicitly transitioned this to the final layout.
     buffer._tc->_layout = _final_layout;*/
-    //buffer._tc->mark_written(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
-    //                         VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
+    //buffer._tc->mark_written(VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT,
+    //                         VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT);
 
     if (_depth_stencil_tc != nullptr) {
       //_depth_stencil_tc->_layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
       //_depth_stencil_tc->mark_written(
-      //  VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
-      //  VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
+      //  VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT,
+      //  VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
     }
 
     // Now we can do copy-to-texture, now that the render pass has ended.
@@ -413,7 +413,7 @@ end_frame(FrameMode mode, Thread *current_thread) {
     //if (buffer._tc->_layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
     vkgsg->_render_cmd.add_barrier(buffer._tc,
                                    VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
-                                   VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+                                   VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT,
                                    0);
     //}
   }
@@ -829,11 +829,11 @@ setup_render_pass() {
   VkSubpassDependency dependency;
   dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
   dependency.dstSubpass = 0;
-  dependency.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
-  dependency.dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+  dependency.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
+  dependency.dstStageMask = VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT;
   dependency.srcAccessMask = 0;
-  dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
-                           | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+  dependency.dstAccessMask = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT
+                           | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
   dependency.dependencyFlags = 0;
 
   size_t i = 1;
@@ -866,10 +866,10 @@ setup_render_pass() {
     subpass.pDepthStencilAttachment = &depth_reference;
     ++i;
 
-    dependency.srcStageMask = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-    dependency.dstStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
-    dependency.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
-    dependency.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+    dependency.srcStageMask = VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
+    dependency.dstStageMask |= VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT;
+    dependency.srcAccessMask |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+    dependency.dstAccessMask |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
   }
 
   // Also create an attachment reference for the resolve target.
@@ -1243,7 +1243,7 @@ create_swapchain() {
     }*/
 
     // Don't start rendering until the image has been acquired.
-    //buffer._tc->mark_written(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0);
+    //buffer._tc->mark_written(VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT, 0);
   }
 
   // Create a semaphore for signalling the availability of an image.

+ 14 - 14
panda/src/vulkandisplay/vulkanShaderContext.cxx

@@ -491,22 +491,22 @@ r_extract_resources(const Shader::Parameter &param, const AccessChain &chain,
     desc._pipeline_stage_mask = 0;
 
     if (desc._stage_mask & VK_SHADER_STAGE_VERTEX_BIT) {
-      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT;
     }
     if (desc._stage_mask & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
-      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
+      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT;
     }
     if (desc._stage_mask & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) {
-      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
+      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT;
     }
     if (desc._stage_mask & VK_SHADER_STAGE_GEOMETRY_BIT) {
-      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
+      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT;
     }
     if (desc._stage_mask & VK_SHADER_STAGE_FRAGMENT_BIT) {
-      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
     }
     if (desc._stage_mask & VK_SHADER_STAGE_COMPUTE_BIT) {
-      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+      desc._pipeline_stage_mask |= VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT;
     }
 
     if (const ShaderType::SampledImage *sampler = type->as_sampled_image()) {
@@ -698,7 +698,7 @@ fetch_descriptor(VulkanGraphicsStateGuardian *gsg, const Descriptor &desc,
       tc = gsg->use_texture(texture,
                             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
                             desc._pipeline_stage_mask,
-                            VK_ACCESS_SHADER_READ_BIT);
+                            VK_ACCESS_2_SHADER_READ_BIT);
 
       VulkanSamplerContext *sc;
       DCAST_INTO_R(sc, sampler.prepare_now(pgo, gsg), false);
@@ -722,7 +722,7 @@ fetch_descriptor(VulkanGraphicsStateGuardian *gsg, const Descriptor &desc,
       tc = gsg->use_texture(texture,
                             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
                             desc._pipeline_stage_mask,
-                            VK_ACCESS_SHADER_READ_BIT);
+                            VK_ACCESS_2_SHADER_READ_BIT);
 
       VkBufferView &texel_buffer_view = *texel_buffer_views++;
       texel_buffer_view = tc ? tc->get_buffer_view(view) : VK_NULL_HANDLE;
@@ -744,12 +744,12 @@ fetch_descriptor(VulkanGraphicsStateGuardian *gsg, const Descriptor &desc,
       PT(Texture) texture = desc._binding->fetch_texture_image(state, id, access, z, n);
       access = access & desc._access;
 
-      VkAccessFlags access_mask = 0;
+      VkAccessFlags2 access_mask = 0;
       if ((access & ShaderType::Access::READ_ONLY) != ShaderType::Access::NONE) {
-        access_mask |= VK_ACCESS_SHADER_READ_BIT;
+        access_mask |= VK_ACCESS_2_SHADER_READ_BIT;
       }
       if ((access & ShaderType::Access::WRITE_ONLY) != ShaderType::Access::NONE) {
-        access_mask |= VK_ACCESS_SHADER_WRITE_BIT;
+        access_mask |= VK_ACCESS_2_SHADER_WRITE_BIT;
       }
 
       VulkanTextureContext *tc;
@@ -775,12 +775,12 @@ fetch_descriptor(VulkanGraphicsStateGuardian *gsg, const Descriptor &desc,
     for (ResourceId id : desc._resource_ids) {
       PT(ShaderBuffer) buffer = desc._binding->fetch_shader_buffer(state, id);
 
-      VkAccessFlags access_mask = 0;
+      VkAccessFlags2 access_mask = 0;
       if ((desc._access & ShaderType::Access::READ_ONLY) != ShaderType::Access::NONE) {
-        access_mask |= VK_ACCESS_SHADER_READ_BIT;
+        access_mask |= VK_ACCESS_2_SHADER_READ_BIT;
       }
       if ((desc._access & ShaderType::Access::WRITE_ONLY) != ShaderType::Access::NONE) {
-        access_mask |= VK_ACCESS_SHADER_WRITE_BIT;
+        access_mask |= VK_ACCESS_2_SHADER_WRITE_BIT;
       }
 
       VulkanBufferContext *bc;

+ 1 - 1
panda/src/vulkandisplay/vulkanShaderContext.h

@@ -148,7 +148,7 @@ private:
     PT(ShaderInputBinding) _binding;
     small_vector<ResourceId, 1> _resource_ids;
     VkShaderStageFlags _stage_mask = 0;
-    VkPipelineStageFlags _pipeline_stage_mask = 0;
+    VkPipelineStageFlags2 _pipeline_stage_mask = 0;
     ShaderType::Access _access = ShaderType::Access::READ_WRITE;
   };
   pvector<Descriptor> _tattr_descriptors;

+ 3 - 3
panda/src/vulkandisplay/vulkanTextureContext.cxx

@@ -173,7 +173,7 @@ clear_color_image(VulkanCommandBuffer &cmd, const VkClearColorValue &value) {
   discard();
 
   cmd.add_barrier(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                  VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
+                  VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_WRITE_BIT);
 
   VkImageSubresourceRange range;
   range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
@@ -196,7 +196,7 @@ clear_depth_stencil_image(VulkanCommandBuffer &cmd, const VkClearDepthStencilVal
   discard();
 
   cmd.add_barrier(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                  VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
+                  VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_WRITE_BIT);
 
   VkImageSubresourceRange range;
   range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
@@ -216,7 +216,7 @@ clear_buffer(VulkanCommandBuffer &cmd, uint32_t fill) {
 
   discard();
   cmd.add_barrier(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                  VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
+                  VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_WRITE_BIT);
 
   vkCmdFillBuffer(cmd, _buffer, 0, VK_WHOLE_SIZE, fill);
 }

+ 3 - 3
panda/src/vulkandisplay/vulkanTextureContext.h

@@ -79,11 +79,11 @@ public:
 
   // The "current" layout and details of the last write
   VkImageLayout _layout = VK_IMAGE_LAYOUT_UNDEFINED;
-  VkAccessFlags _write_access_mask = 0;
-  VkPipelineStageFlags _write_stage_mask = 0;
+  VkAccessFlags2 _write_access_mask = 0;
+  VkPipelineStageFlags2 _write_stage_mask = 0;
 
   // Which stages we've already synchronized with the last write.
-  VkPipelineStageFlags _read_stage_mask = 0;
+  VkPipelineStageFlags2 _read_stage_mask = 0;
 
   // If you're wondering why there is no _read_access_mask, read this:
   // https://github.com/KhronosGroup/Vulkan-Docs/issues/131