1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans  https://bulletphysics.org
4 
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
10 
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15 
16 #ifndef BT_QUANTIZED_BVH_H
17 #define BT_QUANTIZED_BVH_H
18 
19 class btSerializer;
20 
21 //#define DEBUG_CHECK_DEQUANTIZATION 1
22 #ifdef DEBUG_CHECK_DEQUANTIZATION
23 #ifdef __SPU__
24 #define printf spu_printf
25 #endif  //__SPU__
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 #endif  //DEBUG_CHECK_DEQUANTIZATION
30 
31 #include "LinearMath/btVector3.h"
32 #include "LinearMath/btAlignedAllocator.h"
33 
34 #ifdef BT_USE_DOUBLE_PRECISION
35 #define btQuantizedBvhData btQuantizedBvhDoubleData
36 #define btOptimizedBvhNodeData btOptimizedBvhNodeDoubleData
37 #define btQuantizedBvhDataName "btQuantizedBvhDoubleData"
38 #else
39 #define btQuantizedBvhData btQuantizedBvhFloatData
40 #define btOptimizedBvhNodeData btOptimizedBvhNodeFloatData
41 #define btQuantizedBvhDataName "btQuantizedBvhFloatData"
42 #endif
43 
44 //http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp
45 
46 //Note: currently we have 16 bytes per quantized node
47 #define MAX_SUBTREE_SIZE_IN_BYTES 2048
48 
49 // 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one
50 // actually) triangles each (since the sign bit is reserved
51 #define MAX_NUM_PARTS_IN_BITS 10
52 
53 ///btQuantizedBvhNode is a compressed aabb node, 16 bytes.
54 ///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
ATTRIBUTE_ALIGNED16(struct)55 ATTRIBUTE_ALIGNED16(struct)
56 btQuantizedBvhNode
57 {
58 	BT_DECLARE_ALIGNED_ALLOCATOR();
59 
60 	//12 bytes
61 	unsigned short int m_quantizedAabbMin[3];
62 	unsigned short int m_quantizedAabbMax[3];
63 	//4 bytes
64 	int m_escapeIndexOrTriangleIndex;
65 
66 	bool isLeafNode() const
67 	{
68 		//skipindex is negative (internal node), triangleindex >=0 (leafnode)
69 		return (m_escapeIndexOrTriangleIndex >= 0);
70 	}
71 	int getEscapeIndex() const
72 	{
73 		btAssert(!isLeafNode());
74 		return -m_escapeIndexOrTriangleIndex;
75 	}
76 	int getTriangleIndex() const
77 	{
78 		btAssert(isLeafNode());
79 		unsigned int x = 0;
80 		unsigned int y = (~(x & 0)) << (31 - MAX_NUM_PARTS_IN_BITS);
81 		// Get only the lower bits where the triangle index is stored
82 		return (m_escapeIndexOrTriangleIndex & ~(y));
83 	}
84 	int getPartId() const
85 	{
86 		btAssert(isLeafNode());
87 		// Get only the highest bits where the part index is stored
88 		return (m_escapeIndexOrTriangleIndex >> (31 - MAX_NUM_PARTS_IN_BITS));
89 	}
90 };
91 
92 /// btOptimizedBvhNode contains both internal and leaf node information.
93 /// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes.
ATTRIBUTE_ALIGNED16(struct)94 ATTRIBUTE_ALIGNED16(struct)
95 btOptimizedBvhNode
96 {
97 	BT_DECLARE_ALIGNED_ALLOCATOR();
98 
99 	//32 bytes
100 	btVector3 m_aabbMinOrg;
101 	btVector3 m_aabbMaxOrg;
102 
103 	//4
104 	int m_escapeIndex;
105 
106 	//8
107 	//for child nodes
108 	int m_subPart;
109 	int m_triangleIndex;
110 
111 	//pad the size to 64 bytes
112 	char m_padding[20];
113 };
114 
115 ///btBvhSubtreeInfo provides info to gather a subtree of limited size
ATTRIBUTE_ALIGNED16(class)116 ATTRIBUTE_ALIGNED16(class)
117 btBvhSubtreeInfo
118 {
119 public:
120 	BT_DECLARE_ALIGNED_ALLOCATOR();
121 
122 	//12 bytes
123 	unsigned short int m_quantizedAabbMin[3];
124 	unsigned short int m_quantizedAabbMax[3];
125 	//4 bytes, points to the root of the subtree
126 	int m_rootNodeIndex;
127 	//4 bytes
128 	int m_subtreeSize;
129 	int m_padding[3];
130 
131 	btBvhSubtreeInfo()
132 	{
133 		//memset(&m_padding[0], 0, sizeof(m_padding));
134 	}
135 
136 	void setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode)
137 	{
138 		m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
139 		m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
140 		m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
141 		m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
142 		m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
143 		m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
144 	}
145 };
146 
147 class btNodeOverlapCallback
148 {
149 public:
~btNodeOverlapCallback()150 	virtual ~btNodeOverlapCallback(){};
151 
152 	virtual void processNode(int subPart, int triangleIndex) = 0;
153 };
154 
155 #include "LinearMath/btAlignedAllocator.h"
156 #include "LinearMath/btAlignedObjectArray.h"
157 
158 ///for code readability:
159 typedef btAlignedObjectArray<btOptimizedBvhNode> NodeArray;
160 typedef btAlignedObjectArray<btQuantizedBvhNode> QuantizedNodeArray;
161 typedef btAlignedObjectArray<btBvhSubtreeInfo> BvhSubtreeInfoArray;
162 
163 ///The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
164 ///It is used by the btBvhTriangleMeshShape as midphase.
165 ///It is recommended to use quantization for better performance and lower memory requirements.
ATTRIBUTE_ALIGNED16(class)166 ATTRIBUTE_ALIGNED16(class)
167 btQuantizedBvh
168 {
169 public:
170 	enum btTraversalMode
171 	{
172 		TRAVERSAL_STACKLESS = 0,
173 		TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
174 		TRAVERSAL_RECURSIVE
175 	};
176 
177 protected:
178 	btVector3 m_bvhAabbMin;
179 	btVector3 m_bvhAabbMax;
180 	btVector3 m_bvhQuantization;
181 
182 	int m_bulletVersion;  //for serialization versioning. It could also be used to detect endianess.
183 
184 	int m_curNodeIndex;
185 	//quantization data
186 	bool m_useQuantization;
187 
188 	NodeArray m_leafNodes;
189 	NodeArray m_contiguousNodes;
190 	QuantizedNodeArray m_quantizedLeafNodes;
191 	QuantizedNodeArray m_quantizedContiguousNodes;
192 
193 	btTraversalMode m_traversalMode;
194 	BvhSubtreeInfoArray m_SubtreeHeaders;
195 
196 	//This is only used for serialization so we don't have to add serialization directly to btAlignedObjectArray
197 	mutable int m_subtreeHeaderCount;
198 
199 	///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
200 	///this might be refactored into a virtual, it is usually not calculated at run-time
201 	void setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin)
202 	{
203 		if (m_useQuantization)
204 		{
205 			quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0], aabbMin, 0);
206 		}
207 		else
208 		{
209 			m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin;
210 		}
211 	}
212 	void setInternalNodeAabbMax(int nodeIndex, const btVector3& aabbMax)
213 	{
214 		if (m_useQuantization)
215 		{
216 			quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0], aabbMax, 1);
217 		}
218 		else
219 		{
220 			m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax;
221 		}
222 	}
223 
224 	btVector3 getAabbMin(int nodeIndex) const
225 	{
226 		if (m_useQuantization)
227 		{
228 			return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMin[0]);
229 		}
230 		//non-quantized
231 		return m_leafNodes[nodeIndex].m_aabbMinOrg;
232 	}
233 	btVector3 getAabbMax(int nodeIndex) const
234 	{
235 		if (m_useQuantization)
236 		{
237 			return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]);
238 		}
239 		//non-quantized
240 		return m_leafNodes[nodeIndex].m_aabbMaxOrg;
241 	}
242 
243 	void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
244 	{
245 		if (m_useQuantization)
246 		{
247 			m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex;
248 		}
249 		else
250 		{
251 			m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex;
252 		}
253 	}
254 
255 	void mergeInternalNodeAabb(int nodeIndex, const btVector3& newAabbMin, const btVector3& newAabbMax)
256 	{
257 		if (m_useQuantization)
258 		{
259 			unsigned short int quantizedAabbMin[3];
260 			unsigned short int quantizedAabbMax[3];
261 			quantize(quantizedAabbMin, newAabbMin, 0);
262 			quantize(quantizedAabbMax, newAabbMax, 1);
263 			for (int i = 0; i < 3; i++)
264 			{
265 				if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i])
266 					m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i];
267 
268 				if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i])
269 					m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i];
270 			}
271 		}
272 		else
273 		{
274 			//non-quantized
275 			m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin);
276 			m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax);
277 		}
278 	}
279 
280 	void swapLeafNodes(int firstIndex, int secondIndex);
281 
282 	void assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex);
283 
284 protected:
285 	void buildTree(int startIndex, int endIndex);
286 
287 	int calcSplittingAxis(int startIndex, int endIndex);
288 
289 	int sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis);
290 
291 	void walkStacklessTree(btNodeOverlapCallback * nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const;
292 
293 	void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const;
294 	void walkStacklessQuantizedTree(btNodeOverlapCallback * nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const;
295 	void walkStacklessTreeAgainstRay(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const;
296 
297 	///tree traversal designed for small-memory processors like PS3 SPU
298 	void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback * nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const;
299 
300 	///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
301 	void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode, btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const;
302 
303 	///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
304 	void walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA, const btQuantizedBvhNode* treeNodeB, btNodeOverlapCallback* nodeCallback) const;
305 
306 	void updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex);
307 
308 public:
309 	BT_DECLARE_ALIGNED_ALLOCATOR();
310 
311 	btQuantizedBvh();
312 
313 	virtual ~btQuantizedBvh();
314 
315 	///***************************************** expert/internal use only *************************
316 	void setQuantizationValues(const btVector3& bvhAabbMin, const btVector3& bvhAabbMax, btScalar quantizationMargin = btScalar(1.0));
317 	QuantizedNodeArray& getLeafNodeArray() { return m_quantizedLeafNodes; }
318 	///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized
319 	void buildInternal();
320 	///***************************************** expert/internal use only *************************
321 
322 	void reportAabbOverlappingNodex(btNodeOverlapCallback * nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const;
323 	void reportRayOverlappingNodex(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const;
324 	void reportBoxCastOverlappingNodex(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax) const;
325 
326 	SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point, int isMax) const
327 	{
328 		btAssert(m_useQuantization);
329 
330 		btAssert(point.getX() <= m_bvhAabbMax.getX());
331 		btAssert(point.getY() <= m_bvhAabbMax.getY());
332 		btAssert(point.getZ() <= m_bvhAabbMax.getZ());
333 
334 		btAssert(point.getX() >= m_bvhAabbMin.getX());
335 		btAssert(point.getY() >= m_bvhAabbMin.getY());
336 		btAssert(point.getZ() >= m_bvhAabbMin.getZ());
337 
338 		btVector3 v = (point - m_bvhAabbMin) * m_bvhQuantization;
339 		///Make sure rounding is done in a way that unQuantize(quantizeWithClamp(...)) is conservative
340 		///end-points always set the first bit, so that they are sorted properly (so that neighbouring AABBs overlap properly)
341 		///@todo: double-check this
342 		if (isMax)
343 		{
344 			out[0] = (unsigned short)(((unsigned short)(v.getX() + btScalar(1.)) | 1));
345 			out[1] = (unsigned short)(((unsigned short)(v.getY() + btScalar(1.)) | 1));
346 			out[2] = (unsigned short)(((unsigned short)(v.getZ() + btScalar(1.)) | 1));
347 		}
348 		else
349 		{
350 			out[0] = (unsigned short)(((unsigned short)(v.getX()) & 0xfffe));
351 			out[1] = (unsigned short)(((unsigned short)(v.getY()) & 0xfffe));
352 			out[2] = (unsigned short)(((unsigned short)(v.getZ()) & 0xfffe));
353 		}
354 
355 #ifdef DEBUG_CHECK_DEQUANTIZATION
356 		btVector3 newPoint = unQuantize(out);
357 		if (isMax)
358 		{
359 			if (newPoint.getX() < point.getX())
360 			{
361 				printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n", newPoint.getX() - point.getX(), newPoint.getX(), point.getX());
362 			}
363 			if (newPoint.getY() < point.getY())
364 			{
365 				printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n", newPoint.getY() - point.getY(), newPoint.getY(), point.getY());
366 			}
367 			if (newPoint.getZ() < point.getZ())
368 			{
369 				printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n", newPoint.getZ() - point.getZ(), newPoint.getZ(), point.getZ());
370 			}
371 		}
372 		else
373 		{
374 			if (newPoint.getX() > point.getX())
375 			{
376 				printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n", newPoint.getX() - point.getX(), newPoint.getX(), point.getX());
377 			}
378 			if (newPoint.getY() > point.getY())
379 			{
380 				printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n", newPoint.getY() - point.getY(), newPoint.getY(), point.getY());
381 			}
382 			if (newPoint.getZ() > point.getZ())
383 			{
384 				printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n", newPoint.getZ() - point.getZ(), newPoint.getZ(), point.getZ());
385 			}
386 		}
387 #endif  //DEBUG_CHECK_DEQUANTIZATION
388 	}
389 
390 	SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2, int isMax) const
391 	{
392 		btAssert(m_useQuantization);
393 
394 		btVector3 clampedPoint(point2);
395 		clampedPoint.setMax(m_bvhAabbMin);
396 		clampedPoint.setMin(m_bvhAabbMax);
397 
398 		quantize(out, clampedPoint, isMax);
399 	}
400 
401 	SIMD_FORCE_INLINE btVector3 unQuantize(const unsigned short* vecIn) const
402 	{
403 		btVector3 vecOut;
404 		vecOut.setValue(
405 			(btScalar)(vecIn[0]) / (m_bvhQuantization.getX()),
406 			(btScalar)(vecIn[1]) / (m_bvhQuantization.getY()),
407 			(btScalar)(vecIn[2]) / (m_bvhQuantization.getZ()));
408 		vecOut += m_bvhAabbMin;
409 		return vecOut;
410 	}
411 
412 	///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
413 	void setTraversalMode(btTraversalMode traversalMode)
414 	{
415 		m_traversalMode = traversalMode;
416 	}
417 
418 	SIMD_FORCE_INLINE QuantizedNodeArray& getQuantizedNodeArray()
419 	{
420 		return m_quantizedContiguousNodes;
421 	}
422 
423 	SIMD_FORCE_INLINE BvhSubtreeInfoArray& getSubtreeInfoArray()
424 	{
425 		return m_SubtreeHeaders;
426 	}
427 
428 	////////////////////////////////////////////////////////////////////
429 
430 	/////Calculate space needed to store BVH for serialization
431 	unsigned calculateSerializeBufferSize() const;
432 
433 	/// Data buffer MUST be 16 byte aligned
434 	virtual bool serialize(void* o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const;
435 
436 	///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
437 	static btQuantizedBvh* deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
438 
439 	static unsigned int getAlignmentSerializationPadding();
440 	//////////////////////////////////////////////////////////////////////
441 
442 	virtual int calculateSerializeBufferSizeNew() const;
443 
444 	///fills the dataBuffer and returns the struct name (and 0 on failure)
445 	virtual const char* serialize(void* dataBuffer, btSerializer* serializer) const;
446 
447 	virtual void deSerializeFloat(struct btQuantizedBvhFloatData & quantizedBvhFloatData);
448 
449 	virtual void deSerializeDouble(struct btQuantizedBvhDoubleData & quantizedBvhDoubleData);
450 
451 	////////////////////////////////////////////////////////////////////
452 
453 	SIMD_FORCE_INLINE bool isQuantized()
454 	{
455 		return m_useQuantization;
456 	}
457 
458 private:
459 	// Special "copy" constructor that allows for in-place deserialization
460 	// Prevents btVector3's default constructor from being called, but doesn't inialize much else
461 	// ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need)
462 	btQuantizedBvh(btQuantizedBvh & other, bool ownsMemory);
463 };
464 
465 // clang-format off
466 // parser needs * with the name
467 struct btBvhSubtreeInfoData
468 {
469 	int m_rootNodeIndex;
470 	int m_subtreeSize;
471 	unsigned short m_quantizedAabbMin[3];
472 	unsigned short m_quantizedAabbMax[3];
473 };
474 
475 struct btOptimizedBvhNodeFloatData
476 {
477 	btVector3FloatData m_aabbMinOrg;
478 	btVector3FloatData m_aabbMaxOrg;
479 	int m_escapeIndex;
480 	int m_subPart;
481 	int m_triangleIndex;
482 	char m_pad[4];
483 };
484 
485 struct btOptimizedBvhNodeDoubleData
486 {
487 	btVector3DoubleData m_aabbMinOrg;
488 	btVector3DoubleData m_aabbMaxOrg;
489 	int m_escapeIndex;
490 	int m_subPart;
491 	int m_triangleIndex;
492 	char m_pad[4];
493 };
494 
495 
496 struct btQuantizedBvhNodeData
497 {
498 	unsigned short m_quantizedAabbMin[3];
499 	unsigned short m_quantizedAabbMax[3];
500 	int	m_escapeIndexOrTriangleIndex;
501 };
502 
503 struct	btQuantizedBvhFloatData
504 {
505 	btVector3FloatData			m_bvhAabbMin;
506 	btVector3FloatData			m_bvhAabbMax;
507 	btVector3FloatData			m_bvhQuantization;
508 	int					m_curNodeIndex;
509 	int					m_useQuantization;
510 	int					m_numContiguousLeafNodes;
511 	int					m_numQuantizedContiguousNodes;
512 	btOptimizedBvhNodeFloatData	*m_contiguousNodesPtr;
513 	btQuantizedBvhNodeData		*m_quantizedContiguousNodesPtr;
514 	btBvhSubtreeInfoData	*m_subTreeInfoPtr;
515 	int					m_traversalMode;
516 	int					m_numSubtreeHeaders;
517 
518 };
519 
520 struct	btQuantizedBvhDoubleData
521 {
522 	btVector3DoubleData			m_bvhAabbMin;
523 	btVector3DoubleData			m_bvhAabbMax;
524 	btVector3DoubleData			m_bvhQuantization;
525 	int							m_curNodeIndex;
526 	int							m_useQuantization;
527 	int							m_numContiguousLeafNodes;
528 	int							m_numQuantizedContiguousNodes;
529 	btOptimizedBvhNodeDoubleData	*m_contiguousNodesPtr;
530 	btQuantizedBvhNodeData			*m_quantizedContiguousNodesPtr;
531 
532 	int							m_traversalMode;
533 	int							m_numSubtreeHeaders;
534 	btBvhSubtreeInfoData		*m_subTreeInfoPtr;
535 };
536 // clang-format on
537 
calculateSerializeBufferSizeNew()538 SIMD_FORCE_INLINE int btQuantizedBvh::calculateSerializeBufferSizeNew() const
539 {
540 	return sizeof(btQuantizedBvhData);
541 }
542 
543 #endif  //BT_QUANTIZED_BVH_H
544