1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4 
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
10 
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15 
16 #ifndef QUANTIZED_BVH_H
17 #define QUANTIZED_BVH_H
18 
19 class btSerializer;
20 
21 //#define DEBUG_CHECK_DEQUANTIZATION 1
22 #ifdef DEBUG_CHECK_DEQUANTIZATION
23 #ifdef __SPU__
24 #define printf spu_printf
25 #endif //__SPU__
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 #endif //DEBUG_CHECK_DEQUANTIZATION
30 
31 #include "LinearMath/btVector3.h"
32 #include "LinearMath/btAlignedAllocator.h"
33 
34 #ifdef BT_USE_DOUBLE_PRECISION
35 #define btQuantizedBvhData btQuantizedBvhDoubleData
36 #define btOptimizedBvhNodeData btOptimizedBvhNodeDoubleData
37 #define btQuantizedBvhDataName "btQuantizedBvhDoubleData"
38 #else
39 #define btQuantizedBvhData btQuantizedBvhFloatData
40 #define btOptimizedBvhNodeData btOptimizedBvhNodeFloatData
41 #define btQuantizedBvhDataName "btQuantizedBvhFloatData"
42 #endif
43 
44 
45 
46 //http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp
47 
48 
49 //Note: currently we have 16 bytes per quantized node
50 #define MAX_SUBTREE_SIZE_IN_BYTES  2048
51 
52 // 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one
53 // actually) triangles each (since the sign bit is reserved
54 #define MAX_NUM_PARTS_IN_BITS 10
55 
56 ///btQuantizedBvhNode is a compressed aabb node, 16 bytes.
57 ///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
ATTRIBUTE_ALIGNED16(struct)58 ATTRIBUTE_ALIGNED16	(struct) btQuantizedBvhNode
59 {
60 	BT_DECLARE_ALIGNED_ALLOCATOR();
61 
62 	//12 bytes
63 	unsigned short int	m_quantizedAabbMin[3];
64 	unsigned short int	m_quantizedAabbMax[3];
65 	//4 bytes
66 	int	m_escapeIndexOrTriangleIndex;
67 
68 	bool isLeafNode() const
69 	{
70 		//skipindex is negative (internal node), triangleindex >=0 (leafnode)
71 		return (m_escapeIndexOrTriangleIndex >= 0);
72 	}
73 	int getEscapeIndex() const
74 	{
75 		btAssert(!isLeafNode());
76 		return -m_escapeIndexOrTriangleIndex;
77 	}
78 	int	getTriangleIndex() const
79 	{
80 		btAssert(isLeafNode());
81 		// Get only the lower bits where the triangle index is stored
82 		return (m_escapeIndexOrTriangleIndex&~((~0)<<(31-MAX_NUM_PARTS_IN_BITS)));
83 	}
84 	int	getPartId() const
85 	{
86 		btAssert(isLeafNode());
87 		// Get only the highest bits where the part index is stored
88 		return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));
89 	}
90 }
91 ;
92 
93 /// btOptimizedBvhNode contains both internal and leaf node information.
94 /// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes.
ATTRIBUTE_ALIGNED16(struct)95 ATTRIBUTE_ALIGNED16 (struct) btOptimizedBvhNode
96 {
97 	BT_DECLARE_ALIGNED_ALLOCATOR();
98 
99 	//32 bytes
100 	btVector3	m_aabbMinOrg;
101 	btVector3	m_aabbMaxOrg;
102 
103 	//4
104 	int	m_escapeIndex;
105 
106 	//8
107 	//for child nodes
108 	int	m_subPart;
109 	int	m_triangleIndex;
110 	int	m_padding[5];//bad, due to alignment
111 
112 
113 };
114 
115 
116 ///btBvhSubtreeInfo provides info to gather a subtree of limited size
ATTRIBUTE_ALIGNED16(class)117 ATTRIBUTE_ALIGNED16(class) btBvhSubtreeInfo
118 {
119 public:
120 	BT_DECLARE_ALIGNED_ALLOCATOR();
121 
122 	//12 bytes
123 	unsigned short int	m_quantizedAabbMin[3];
124 	unsigned short int	m_quantizedAabbMax[3];
125 	//4 bytes, points to the root of the subtree
126 	int			m_rootNodeIndex;
127 	//4 bytes
128 	int			m_subtreeSize;
129 	int			m_padding[3];
130 
131 	btBvhSubtreeInfo()
132 	{
133 		//memset(&m_padding[0], 0, sizeof(m_padding));
134 	}
135 
136 
137 	void	setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode)
138 	{
139 		m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
140 		m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
141 		m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
142 		m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
143 		m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
144 		m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
145 	}
146 }
147 ;
148 
149 
150 class btNodeOverlapCallback
151 {
152 public:
~btNodeOverlapCallback()153 	virtual ~btNodeOverlapCallback() {};
154 
155 	virtual void processNode(int subPart, int triangleIndex) = 0;
156 };
157 
158 #include "LinearMath/btAlignedAllocator.h"
159 #include "LinearMath/btAlignedObjectArray.h"
160 
161 
162 
163 ///for code readability:
164 typedef btAlignedObjectArray<btOptimizedBvhNode>	NodeArray;
165 typedef btAlignedObjectArray<btQuantizedBvhNode>	QuantizedNodeArray;
166 typedef btAlignedObjectArray<btBvhSubtreeInfo>		BvhSubtreeInfoArray;
167 
168 
169 ///The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
170 ///It is used by the btBvhTriangleMeshShape as midphase, and by the btMultiSapBroadphase.
171 ///It is recommended to use quantization for better performance and lower memory requirements.
ATTRIBUTE_ALIGNED16(class)172 ATTRIBUTE_ALIGNED16(class) btQuantizedBvh
173 {
174 public:
175 	enum btTraversalMode
176 	{
177 		TRAVERSAL_STACKLESS = 0,
178 		TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
179 		TRAVERSAL_RECURSIVE
180 	};
181 
182 protected:
183 
184 
185 	btVector3			m_bvhAabbMin;
186 	btVector3			m_bvhAabbMax;
187 	btVector3			m_bvhQuantization;
188 
189 	int					m_bulletVersion;	//for serialization versioning. It could also be used to detect endianess.
190 
191 	int					m_curNodeIndex;
192 	//quantization data
193 	bool				m_useQuantization;
194 
195 
196 
197 	NodeArray			m_leafNodes;
198 	NodeArray			m_contiguousNodes;
199 	QuantizedNodeArray	m_quantizedLeafNodes;
200 	QuantizedNodeArray	m_quantizedContiguousNodes;
201 
202 	btTraversalMode	m_traversalMode;
203 	BvhSubtreeInfoArray		m_SubtreeHeaders;
204 
205 	//This is only used for serialization so we don't have to add serialization directly to btAlignedObjectArray
206 	mutable int m_subtreeHeaderCount;
207 
208 
209 
210 
211 
212 	///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
213 	///this might be refactored into a virtual, it is usually not calculated at run-time
214 	void	setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin)
215 	{
216 		if (m_useQuantization)
217 		{
218 			quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] ,aabbMin,0);
219 		} else
220 		{
221 			m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin;
222 
223 		}
224 	}
225 	void	setInternalNodeAabbMax(int nodeIndex,const btVector3& aabbMax)
226 	{
227 		if (m_useQuantization)
228 		{
229 			quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0],aabbMax,1);
230 		} else
231 		{
232 			m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax;
233 		}
234 	}
235 
236 	btVector3 getAabbMin(int nodeIndex) const
237 	{
238 		if (m_useQuantization)
239 		{
240 			return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMin[0]);
241 		}
242 		//non-quantized
243 		return m_leafNodes[nodeIndex].m_aabbMinOrg;
244 
245 	}
246 	btVector3 getAabbMax(int nodeIndex) const
247 	{
248 		if (m_useQuantization)
249 		{
250 			return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]);
251 		}
252 		//non-quantized
253 		return m_leafNodes[nodeIndex].m_aabbMaxOrg;
254 
255 	}
256 
257 
258 	void	setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
259 	{
260 		if (m_useQuantization)
261 		{
262 			m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex;
263 		}
264 		else
265 		{
266 			m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex;
267 		}
268 
269 	}
270 
271 	void mergeInternalNodeAabb(int nodeIndex,const btVector3& newAabbMin,const btVector3& newAabbMax)
272 	{
273 		if (m_useQuantization)
274 		{
275 			unsigned short int quantizedAabbMin[3];
276 			unsigned short int quantizedAabbMax[3];
277 			quantize(quantizedAabbMin,newAabbMin,0);
278 			quantize(quantizedAabbMax,newAabbMax,1);
279 			for (int i=0;i<3;i++)
280 			{
281 				if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i])
282 					m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i];
283 
284 				if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i])
285 					m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i];
286 
287 			}
288 		} else
289 		{
290 			//non-quantized
291 			m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin);
292 			m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax);
293 		}
294 	}
295 
296 	void	swapLeafNodes(int firstIndex,int secondIndex);
297 
298 	void	assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex);
299 
300 protected:
301 
302 
303 
304 	void	buildTree	(int startIndex,int endIndex);
305 
306 	int	calcSplittingAxis(int startIndex,int endIndex);
307 
308 	int	sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis);
309 
310 	void	walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
311 
312 	void	walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
313 	void	walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const;
314 	void	walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
315 
316 	///tree traversal designed for small-memory processors like PS3 SPU
317 	void	walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
318 
319 	///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
320 	void	walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
321 
322 	///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
323 	void	walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA,const btQuantizedBvhNode* treeNodeB,btNodeOverlapCallback* nodeCallback) const;
324 
325 
326 
327 
328 	void	updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex);
329 
330 public:
331 
332 	BT_DECLARE_ALIGNED_ALLOCATOR();
333 
334 	btQuantizedBvh();
335 
336 	virtual ~btQuantizedBvh();
337 
338 
339 	///***************************************** expert/internal use only *************************
340 	void	setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin=btScalar(1.0));
341 	QuantizedNodeArray&	getLeafNodeArray() {			return	m_quantizedLeafNodes;	}
342 	///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized
343 	void	buildInternal();
344 	///***************************************** expert/internal use only *************************
345 
346 	void	reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
347 	void	reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const;
348 	void	reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const;
349 
350 		SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point,int isMax) const
351 	{
352 
353 		btAssert(m_useQuantization);
354 
355 		btAssert(point.getX() <= m_bvhAabbMax.getX());
356 		btAssert(point.getY() <= m_bvhAabbMax.getY());
357 		btAssert(point.getZ() <= m_bvhAabbMax.getZ());
358 
359 		btAssert(point.getX() >= m_bvhAabbMin.getX());
360 		btAssert(point.getY() >= m_bvhAabbMin.getY());
361 		btAssert(point.getZ() >= m_bvhAabbMin.getZ());
362 
363 		btVector3 v = (point - m_bvhAabbMin) * m_bvhQuantization;
364 		///Make sure rounding is done in a way that unQuantize(quantizeWithClamp(...)) is conservative
365 		///end-points always set the first bit, so that they are sorted properly (so that neighbouring AABBs overlap properly)
366 		///@todo: double-check this
367 		if (isMax)
368 		{
369 			out[0] = (unsigned short) (((unsigned short)(v.getX()+btScalar(1.)) | 1));
370 			out[1] = (unsigned short) (((unsigned short)(v.getY()+btScalar(1.)) | 1));
371 			out[2] = (unsigned short) (((unsigned short)(v.getZ()+btScalar(1.)) | 1));
372 		} else
373 		{
374 			out[0] = (unsigned short) (((unsigned short)(v.getX()) & 0xfffe));
375 			out[1] = (unsigned short) (((unsigned short)(v.getY()) & 0xfffe));
376 			out[2] = (unsigned short) (((unsigned short)(v.getZ()) & 0xfffe));
377 		}
378 
379 
380 #ifdef DEBUG_CHECK_DEQUANTIZATION
381 		btVector3 newPoint = unQuantize(out);
382 		if (isMax)
383 		{
384 			if (newPoint.getX() < point.getX())
385 			{
386 				printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
387 			}
388 			if (newPoint.getY() < point.getY())
389 			{
390 				printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
391 			}
392 			if (newPoint.getZ() < point.getZ())
393 			{
394 
395 				printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
396 			}
397 		} else
398 		{
399 			if (newPoint.getX() > point.getX())
400 			{
401 				printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
402 			}
403 			if (newPoint.getY() > point.getY())
404 			{
405 				printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
406 			}
407 			if (newPoint.getZ() > point.getZ())
408 			{
409 				printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
410 			}
411 		}
412 #endif //DEBUG_CHECK_DEQUANTIZATION
413 
414 	}
415 
416 
417 	SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2,int isMax) const
418 	{
419 
420 		btAssert(m_useQuantization);
421 
422 		btVector3 clampedPoint(point2);
423 		clampedPoint.setMax(m_bvhAabbMin);
424 		clampedPoint.setMin(m_bvhAabbMax);
425 
426 		quantize(out,clampedPoint,isMax);
427 
428 	}
429 
430 	SIMD_FORCE_INLINE btVector3	unQuantize(const unsigned short* vecIn) const
431 	{
432 			btVector3	vecOut;
433 			vecOut.setValue(
434 			(btScalar)(vecIn[0]) / (m_bvhQuantization.getX()),
435 			(btScalar)(vecIn[1]) / (m_bvhQuantization.getY()),
436 			(btScalar)(vecIn[2]) / (m_bvhQuantization.getZ()));
437 			vecOut += m_bvhAabbMin;
438 			return vecOut;
439 	}
440 
441 	///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
442 	void	setTraversalMode(btTraversalMode	traversalMode)
443 	{
444 		m_traversalMode = traversalMode;
445 	}
446 
447 
448 	SIMD_FORCE_INLINE QuantizedNodeArray&	getQuantizedNodeArray()
449 	{
450 		return	m_quantizedContiguousNodes;
451 	}
452 
453 
454 	SIMD_FORCE_INLINE BvhSubtreeInfoArray&	getSubtreeInfoArray()
455 	{
456 		return m_SubtreeHeaders;
457 	}
458 
459 ////////////////////////////////////////////////////////////////////
460 
461 	/////Calculate space needed to store BVH for serialization
462 	unsigned calculateSerializeBufferSize() const;
463 
464 	/// Data buffer MUST be 16 byte aligned
465 	virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const;
466 
467 	///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
468 	static btQuantizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
469 
470 	static unsigned int getAlignmentSerializationPadding();
471 //////////////////////////////////////////////////////////////////////
472 
473 
474 	virtual	int	calculateSerializeBufferSizeNew() const;
475 
476 	///fills the dataBuffer and returns the struct name (and 0 on failure)
477 	virtual	const char*	serialize(void* dataBuffer, btSerializer* serializer) const;
478 
479 	virtual	void deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData);
480 
481 	virtual	void deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData);
482 
483 
484 ////////////////////////////////////////////////////////////////////
485 
486 	SIMD_FORCE_INLINE bool isQuantized()
487 	{
488 		return m_useQuantization;
489 	}
490 
491 private:
492 	// Special "copy" constructor that allows for in-place deserialization
493 	// Prevents btVector3's default constructor from being called, but doesn't inialize much else
494 	// ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need)
495 	btQuantizedBvh(btQuantizedBvh &other, bool ownsMemory);
496 
497 }
498 ;
499 
500 
501 struct	btBvhSubtreeInfoData
502 {
503 	int			m_rootNodeIndex;
504 	int			m_subtreeSize;
505 	unsigned short m_quantizedAabbMin[3];
506 	unsigned short m_quantizedAabbMax[3];
507 };
508 
509 struct btOptimizedBvhNodeFloatData
510 {
511 	btVector3FloatData	m_aabbMinOrg;
512 	btVector3FloatData	m_aabbMaxOrg;
513 	int	m_escapeIndex;
514 	int	m_subPart;
515 	int	m_triangleIndex;
516 	char m_pad[4];
517 };
518 
519 struct btOptimizedBvhNodeDoubleData
520 {
521 	btVector3DoubleData	m_aabbMinOrg;
522 	btVector3DoubleData	m_aabbMaxOrg;
523 	int	m_escapeIndex;
524 	int	m_subPart;
525 	int	m_triangleIndex;
526 	char	m_pad[4];
527 };
528 
529 
530 struct btQuantizedBvhNodeData
531 {
532 	unsigned short m_quantizedAabbMin[3];
533 	unsigned short m_quantizedAabbMax[3];
534 	int	m_escapeIndexOrTriangleIndex;
535 };
536 
537 struct	btQuantizedBvhFloatData
538 {
539 	btVector3FloatData			m_bvhAabbMin;
540 	btVector3FloatData			m_bvhAabbMax;
541 	btVector3FloatData			m_bvhQuantization;
542 	int					m_curNodeIndex;
543 	int					m_useQuantization;
544 	int					m_numContiguousLeafNodes;
545 	int					m_numQuantizedContiguousNodes;
546 	btOptimizedBvhNodeFloatData	*m_contiguousNodesPtr;
547 	btQuantizedBvhNodeData		*m_quantizedContiguousNodesPtr;
548 	btBvhSubtreeInfoData	*m_subTreeInfoPtr;
549 	int					m_traversalMode;
550 	int					m_numSubtreeHeaders;
551 
552 };
553 
554 struct	btQuantizedBvhDoubleData
555 {
556 	btVector3DoubleData			m_bvhAabbMin;
557 	btVector3DoubleData			m_bvhAabbMax;
558 	btVector3DoubleData			m_bvhQuantization;
559 	int							m_curNodeIndex;
560 	int							m_useQuantization;
561 	int							m_numContiguousLeafNodes;
562 	int							m_numQuantizedContiguousNodes;
563 	btOptimizedBvhNodeDoubleData	*m_contiguousNodesPtr;
564 	btQuantizedBvhNodeData			*m_quantizedContiguousNodesPtr;
565 
566 	int							m_traversalMode;
567 	int							m_numSubtreeHeaders;
568 	btBvhSubtreeInfoData		*m_subTreeInfoPtr;
569 };
570 
571 
calculateSerializeBufferSizeNew()572 SIMD_FORCE_INLINE	int	btQuantizedBvh::calculateSerializeBufferSizeNew() const
573 {
574 	return sizeof(btQuantizedBvhData);
575 }
576 
577 
578 
579 #endif //QUANTIZED_BVH_H
580