1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #ifndef VK_PIPELINE_HPP_
16 #define VK_PIPELINE_HPP_
17 
18 #include "VkObject.hpp"
19 #include "Device/Renderer.hpp"
20 #include "Vulkan/VkDescriptorSet.hpp"
21 #include "Vulkan/VkPipelineCache.hpp"
22 #include <memory>
23 
24 namespace sw {
25 
26 class ComputeProgram;
27 class SpirvShader;
28 
29 }  // namespace sw
30 
31 namespace vk {
32 
33 namespace dbg {
34 class Context;
35 }  // namespace dbg
36 
37 class PipelineCache;
38 class PipelineLayout;
39 class ShaderModule;
40 class Device;
41 
42 class Pipeline
43 {
44 public:
45 	Pipeline(PipelineLayout const *layout, const Device *device);
46 	virtual ~Pipeline() = default;
47 
operator VkPipeline()48 	operator VkPipeline()
49 	{
50 		return vk::TtoVkT<Pipeline, VkPipeline>(this);
51 	}
52 
Cast(VkPipeline object)53 	static inline Pipeline *Cast(VkPipeline object)
54 	{
55 		return vk::VkTtoT<Pipeline, VkPipeline>(object);
56 	}
57 
destroy(const VkAllocationCallbacks * pAllocator)58 	void destroy(const VkAllocationCallbacks *pAllocator)
59 	{
60 		destroyPipeline(pAllocator);
61 	}
62 
63 	virtual void destroyPipeline(const VkAllocationCallbacks *pAllocator) = 0;
64 #ifndef NDEBUG
65 	virtual VkPipelineBindPoint bindPoint() const = 0;
66 #endif
67 
getLayout() const68 	PipelineLayout const *getLayout() const
69 	{
70 		return layout;
71 	}
72 
73 protected:
74 	PipelineLayout const *layout = nullptr;
75 	Device const *const device;
76 
77 	const bool robustBufferAccess = true;
78 };
79 
80 class GraphicsPipeline : public Pipeline, public ObjectBase<GraphicsPipeline, VkPipeline>
81 {
82 public:
83 	GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo,
84 	                 void *mem,
85 	                 const Device *device);
86 	virtual ~GraphicsPipeline() = default;
87 
88 	void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
89 
90 #ifndef NDEBUG
bindPoint() const91 	VkPipelineBindPoint bindPoint() const override
92 	{
93 		return VK_PIPELINE_BIND_POINT_GRAPHICS;
94 	}
95 #endif
96 
97 	static size_t ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo);
98 
99 	void compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
100 
101 	uint32_t computePrimitiveCount(uint32_t vertexCount) const;
102 	const sw::Context &getContext() const;
103 	const VkRect2D &getScissor() const;
104 	const VkViewport &getViewport() const;
105 	const sw::float4 &getBlendConstants() const;
106 	bool hasDynamicState(VkDynamicState dynamicState) const;
hasPrimitiveRestartEnable() const107 	bool hasPrimitiveRestartEnable() const { return primitiveRestartEnable; }
108 
109 private:
110 	void setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader);
111 	const std::shared_ptr<sw::SpirvShader> getShader(const VkShaderStageFlagBits &stage) const;
112 	std::shared_ptr<sw::SpirvShader> vertexShader;
113 	std::shared_ptr<sw::SpirvShader> fragmentShader;
114 
115 	uint32_t dynamicStateFlags = 0;
116 	bool primitiveRestartEnable = false;
117 	sw::Context context;
118 	VkRect2D scissor;
119 	VkViewport viewport;
120 	sw::float4 blendConstants;
121 };
122 
123 class ComputePipeline : public Pipeline, public ObjectBase<ComputePipeline, VkPipeline>
124 {
125 public:
126 	ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, const Device *device);
127 	virtual ~ComputePipeline() = default;
128 
129 	void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
130 
131 #ifndef NDEBUG
bindPoint() const132 	VkPipelineBindPoint bindPoint() const override
133 	{
134 		return VK_PIPELINE_BIND_POINT_COMPUTE;
135 	}
136 #endif
137 
138 	static size_t ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo);
139 
140 	void compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
141 
142 	void run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
143 	         uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
144 	         vk::DescriptorSet::Bindings const &descriptorSets,
145 	         vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
146 	         sw::PushConstantStorage const &pushConstants);
147 
148 protected:
149 	std::shared_ptr<sw::SpirvShader> shader;
150 	std::shared_ptr<sw::ComputeProgram> program;
151 };
152 
Cast(VkPipeline object)153 static inline Pipeline *Cast(VkPipeline object)
154 {
155 	return Pipeline::Cast(object);
156 }
157 
158 }  // namespace vk
159 
160 #endif  // VK_PIPELINE_HPP_
161