1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2007 Erwin Coumans http://continuousphysics.com/Bullet/
4
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
10
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15 ///btDbvt implementation by Nathanael Presson
16
17 #ifndef BT_DYNAMIC_BOUNDING_VOLUME_TREE_H
18 #define BT_DYNAMIC_BOUNDING_VOLUME_TREE_H
19
20 #include "LinearMath/btAlignedObjectArray.h"
21 #include "LinearMath/btVector3.h"
22 #include "LinearMath/btTransform.h"
23 #include "LinearMath/btAabbUtil2.h"
24 //
25 // Compile time configuration
26 //
27
28 // Implementation profiles
29 #define DBVT_IMPL_GENERIC 0 // Generic implementation
30 #define DBVT_IMPL_SSE 1 // SSE
31
32 // Template implementation of ICollide
33 #ifdef _WIN32
34 #if (defined(_MSC_VER) && _MSC_VER >= 1400)
35 #define DBVT_USE_TEMPLATE 1
36 #else
37 #define DBVT_USE_TEMPLATE 0
38 #endif
39 #else
40 #define DBVT_USE_TEMPLATE 0
41 #endif
42
43 // Use only intrinsics instead of inline asm
44 #define DBVT_USE_INTRINSIC_SSE 1
45
46 // Using memmov for collideOCL
47 #define DBVT_USE_MEMMOVE 1
48
49 // Enable benchmarking code
50 #define DBVT_ENABLE_BENCHMARK 0
51
52 // Inlining
53 #define DBVT_INLINE SIMD_FORCE_INLINE
54
55 // Specific methods implementation
56
57 //SSE gives errors on a MSVC 7.1
58 #if defined(BT_USE_SSE) //&& defined (_WIN32)
59 #define DBVT_SELECT_IMPL DBVT_IMPL_SSE
60 #define DBVT_MERGE_IMPL DBVT_IMPL_SSE
61 #define DBVT_INT0_IMPL DBVT_IMPL_SSE
62 #else
63 #define DBVT_SELECT_IMPL DBVT_IMPL_GENERIC
64 #define DBVT_MERGE_IMPL DBVT_IMPL_GENERIC
65 #define DBVT_INT0_IMPL DBVT_IMPL_GENERIC
66 #endif
67
68 #if (DBVT_SELECT_IMPL == DBVT_IMPL_SSE) || \
69 (DBVT_MERGE_IMPL == DBVT_IMPL_SSE) || \
70 (DBVT_INT0_IMPL == DBVT_IMPL_SSE)
71 #include <emmintrin.h>
72 #endif
73
74 //
75 // Auto config and checks
76 //
77
78 #if DBVT_USE_TEMPLATE
79 #define DBVT_VIRTUAL
80 #define DBVT_VIRTUAL_DTOR(a)
81 #define DBVT_PREFIX template <typename T>
82 #define DBVT_IPOLICY T& policy
83 #define DBVT_CHECKTYPE \
84 static const ICollide& typechecker = *(T*)1; \
85 (void)typechecker;
86 #else
87 #define DBVT_VIRTUAL_DTOR(a) \
88 virtual ~a() {}
89 #define DBVT_VIRTUAL virtual
90 #define DBVT_PREFIX
91 #define DBVT_IPOLICY ICollide& policy
92 #define DBVT_CHECKTYPE
93 #endif
94
95 #if DBVT_USE_MEMMOVE
96 #if !defined(__CELLOS_LV2__) && !defined(__MWERKS__)
97 #include <memory.h>
98 #endif
99 #include <string.h>
100 #endif
101
102 #ifndef DBVT_USE_TEMPLATE
103 #error "DBVT_USE_TEMPLATE undefined"
104 #endif
105
106 #ifndef DBVT_USE_MEMMOVE
107 #error "DBVT_USE_MEMMOVE undefined"
108 #endif
109
110 #ifndef DBVT_ENABLE_BENCHMARK
111 #error "DBVT_ENABLE_BENCHMARK undefined"
112 #endif
113
114 #ifndef DBVT_SELECT_IMPL
115 #error "DBVT_SELECT_IMPL undefined"
116 #endif
117
118 #ifndef DBVT_MERGE_IMPL
119 #error "DBVT_MERGE_IMPL undefined"
120 #endif
121
122 #ifndef DBVT_INT0_IMPL
123 #error "DBVT_INT0_IMPL undefined"
124 #endif
125
126 //
127 // Defaults volumes
128 //
129
130 /* btDbvtAabbMm */
131 struct btDbvtAabbMm
132 {
btDbvtAabbMmbtDbvtAabbMm133 DBVT_INLINE btDbvtAabbMm(){}
CenterbtDbvtAabbMm134 DBVT_INLINE btVector3 Center() const { return ((mi + mx) / 2); }
LengthsbtDbvtAabbMm135 DBVT_INLINE btVector3 Lengths() const { return (mx - mi); }
ExtentsbtDbvtAabbMm136 DBVT_INLINE btVector3 Extents() const { return ((mx - mi) / 2); }
MinsbtDbvtAabbMm137 DBVT_INLINE const btVector3& Mins() const { return (mi); }
MaxsbtDbvtAabbMm138 DBVT_INLINE const btVector3& Maxs() const { return (mx); }
139 static inline btDbvtAabbMm FromCE(const btVector3& c, const btVector3& e);
140 static inline btDbvtAabbMm FromCR(const btVector3& c, btScalar r);
141 static inline btDbvtAabbMm FromMM(const btVector3& mi, const btVector3& mx);
142 static inline btDbvtAabbMm FromPoints(const btVector3* pts, int n);
143 static inline btDbvtAabbMm FromPoints(const btVector3** ppts, int n);
144 DBVT_INLINE void Expand(const btVector3& e);
145 DBVT_INLINE void SignedExpand(const btVector3& e);
146 DBVT_INLINE bool Contain(const btDbvtAabbMm& a) const;
147 DBVT_INLINE int Classify(const btVector3& n, btScalar o, int s) const;
148 DBVT_INLINE btScalar ProjectMinimum(const btVector3& v, unsigned signs) const;
149 DBVT_INLINE friend bool Intersect(const btDbvtAabbMm& a,
150 const btDbvtAabbMm& b);
151
152 DBVT_INLINE friend bool Intersect(const btDbvtAabbMm& a,
153 const btVector3& b);
154
155 DBVT_INLINE friend btScalar Proximity(const btDbvtAabbMm& a,
156 const btDbvtAabbMm& b);
157 DBVT_INLINE friend int Select(const btDbvtAabbMm& o,
158 const btDbvtAabbMm& a,
159 const btDbvtAabbMm& b);
160 DBVT_INLINE friend void Merge(const btDbvtAabbMm& a,
161 const btDbvtAabbMm& b,
162 btDbvtAabbMm& r);
163 DBVT_INLINE friend bool NotEqual(const btDbvtAabbMm& a,
164 const btDbvtAabbMm& b);
165
tMinsbtDbvtAabbMm166 DBVT_INLINE btVector3& tMins() { return (mi); }
tMaxsbtDbvtAabbMm167 DBVT_INLINE btVector3& tMaxs() { return (mx); }
168
169 private:
170 DBVT_INLINE void AddSpan(const btVector3& d, btScalar& smi, btScalar& smx) const;
171
172 private:
173 btVector3 mi, mx;
174 };
175
176 // Types
177 typedef btDbvtAabbMm btDbvtVolume;
178
179 /* btDbvtNode */
180 struct btDbvtNode
181 {
182 btDbvtVolume volume;
183 btDbvtNode* parent;
isleafbtDbvtNode184 DBVT_INLINE bool isleaf() const { return (childs[1] == 0); }
isinternalbtDbvtNode185 DBVT_INLINE bool isinternal() const { return (!isleaf()); }
186 union {
187 btDbvtNode* childs[2];
188 void* data;
189 int dataAsInt;
190 };
191 };
192
193 /* btDbv(normal)tNode */
194 struct btDbvntNode
195 {
196 btDbvtVolume volume;
197 btVector3 normal;
198 btScalar angle;
isleafbtDbvntNode199 DBVT_INLINE bool isleaf() const { return (childs[1] == 0); }
isinternalbtDbvntNode200 DBVT_INLINE bool isinternal() const { return (!isleaf()); }
201 btDbvntNode* childs[2];
202 void* data;
203
btDbvntNodebtDbvntNode204 btDbvntNode(const btDbvtNode* n)
205 : volume(n->volume)
206 , angle(0)
207 , normal(0,0,0)
208 , data(n->data)
209 {
210 childs[0] = 0;
211 childs[1] = 0;
212 }
213
~btDbvntNodebtDbvntNode214 ~btDbvntNode()
215 {
216 if (childs[0])
217 delete childs[0];
218 if (childs[1])
219 delete childs[1];
220 }
221 };
222
223 typedef btAlignedObjectArray<const btDbvtNode*> btNodeStack;
224
225 ///The btDbvt class implements a fast dynamic bounding volume tree based on axis aligned bounding boxes (aabb tree).
226 ///This btDbvt is used for soft body collision detection and for the btDbvtBroadphase. It has a fast insert, remove and update of nodes.
227 ///Unlike the btQuantizedBvh, nodes can be dynamically moved around, which allows for change in topology of the underlying data structure.
228 struct btDbvt
229 {
230 /* Stack element */
231 struct sStkNN
232 {
233 const btDbvtNode* a;
234 const btDbvtNode* b;
sStkNNbtDbvt::sStkNN235 sStkNN() {}
sStkNNbtDbvt::sStkNN236 sStkNN(const btDbvtNode* na, const btDbvtNode* nb) : a(na), b(nb) {}
237 };
238 struct sStkNP
239 {
240 const btDbvtNode* node;
241 int mask;
sStkNPbtDbvt::sStkNP242 sStkNP(const btDbvtNode* n, unsigned m) : node(n), mask(m) {}
243 };
244 struct sStkNPS
245 {
246 const btDbvtNode* node;
247 int mask;
248 btScalar value;
sStkNPSbtDbvt::sStkNPS249 sStkNPS() {}
sStkNPSbtDbvt::sStkNPS250 sStkNPS(const btDbvtNode* n, unsigned m, btScalar v) : node(n), mask(m), value(v) {}
251 };
252 struct sStkCLN
253 {
254 const btDbvtNode* node;
255 btDbvtNode* parent;
sStkCLNbtDbvt::sStkCLN256 sStkCLN(const btDbvtNode* n, btDbvtNode* p) : node(n), parent(p) {}
257 };
258
259 struct sStknNN
260 {
261 const btDbvntNode* a;
262 const btDbvntNode* b;
sStknNNbtDbvt::sStknNN263 sStknNN() {}
sStknNNbtDbvt::sStknNN264 sStknNN(const btDbvntNode* na, const btDbvntNode* nb) : a(na), b(nb) {}
265 };
266 // Policies/Interfaces
267
268 /* ICollide */
269 struct ICollide
270 {
DBVT_VIRTUAL_DTORbtDbvt::ICollide271 DBVT_VIRTUAL_DTOR(ICollide)
272 DBVT_VIRTUAL void Process(const btDbvtNode*, const btDbvtNode*) {}
ProcessbtDbvt::ICollide273 DBVT_VIRTUAL void Process(const btDbvtNode*) {}
ProcessbtDbvt::ICollide274 DBVT_VIRTUAL void Process(const btDbvtNode* n, btScalar) { Process(n); }
ProcessbtDbvt::ICollide275 DBVT_VIRTUAL void Process(const btDbvntNode*, const btDbvntNode*) {}
DescentbtDbvt::ICollide276 DBVT_VIRTUAL bool Descent(const btDbvtNode*) { return (true); }
AllLeavesbtDbvt::ICollide277 DBVT_VIRTUAL bool AllLeaves(const btDbvtNode*) { return (true); }
278 };
279 /* IWriter */
280 struct IWriter
281 {
~IWriterbtDbvt::IWriter282 virtual ~IWriter() {}
283 virtual void Prepare(const btDbvtNode* root, int numnodes) = 0;
284 virtual void WriteNode(const btDbvtNode*, int index, int parent, int child0, int child1) = 0;
285 virtual void WriteLeaf(const btDbvtNode*, int index, int parent) = 0;
286 };
287 /* IClone */
288 struct IClone
289 {
~IClonebtDbvt::IClone290 virtual ~IClone() {}
CloneLeafbtDbvt::IClone291 virtual void CloneLeaf(btDbvtNode*) {}
292 };
293
294 // Constants
295 enum
296 {
297 SIMPLE_STACKSIZE = 64,
298 DOUBLE_STACKSIZE = SIMPLE_STACKSIZE * 2
299 };
300
301 // Fields
302 btDbvtNode* m_root;
303 btDbvtNode* m_free;
304 int m_lkhd;
305 int m_leaves;
306 unsigned m_opath;
307
308 btAlignedObjectArray<sStkNN> m_stkStack;
309
310 // Methods
311 btDbvt();
312 ~btDbvt();
313 void clear();
emptybtDbvt314 bool empty() const { return (0 == m_root); }
315 void optimizeBottomUp();
316 void optimizeTopDown(int bu_treshold = 128);
317 void optimizeIncremental(int passes);
318 btDbvtNode* insert(const btDbvtVolume& box, void* data);
319 void update(btDbvtNode* leaf, int lookahead = -1);
320 void update(btDbvtNode* leaf, btDbvtVolume& volume);
321 bool update(btDbvtNode* leaf, btDbvtVolume& volume, const btVector3& velocity, btScalar margin);
322 bool update(btDbvtNode* leaf, btDbvtVolume& volume, const btVector3& velocity);
323 bool update(btDbvtNode* leaf, btDbvtVolume& volume, btScalar margin);
324 void remove(btDbvtNode* leaf);
325 void write(IWriter* iwriter) const;
326 void clone(btDbvt& dest, IClone* iclone = 0) const;
327 static int maxdepth(const btDbvtNode* node);
328 static int countLeaves(const btDbvtNode* node);
329 static void extractLeaves(const btDbvtNode* node, btAlignedObjectArray<const btDbvtNode*>& leaves);
330 #if DBVT_ENABLE_BENCHMARK
331 static void benchmark();
332 #else
benchmarkbtDbvt333 static void benchmark()
334 {
335 }
336 #endif
337 // DBVT_IPOLICY must support ICollide policy/interface
338 DBVT_PREFIX
339 static void enumNodes(const btDbvtNode* root,
340 DBVT_IPOLICY);
341 DBVT_PREFIX
342 static void enumLeaves(const btDbvtNode* root,
343 DBVT_IPOLICY);
344 DBVT_PREFIX
345 void collideTT(const btDbvtNode* root0,
346 const btDbvtNode* root1,
347 DBVT_IPOLICY);
348 DBVT_PREFIX
349 void selfCollideT(const btDbvntNode* root,
350 DBVT_IPOLICY);
351 DBVT_PREFIX
352 void selfCollideTT(const btDbvtNode* root,
353 DBVT_IPOLICY);
354
355 DBVT_PREFIX
356 void collideTTpersistentStack(const btDbvtNode* root0,
357 const btDbvtNode* root1,
358 DBVT_IPOLICY);
359 #if 0
360 DBVT_PREFIX
361 void collideTT( const btDbvtNode* root0,
362 const btDbvtNode* root1,
363 const btTransform& xform,
364 DBVT_IPOLICY);
365 DBVT_PREFIX
366 void collideTT( const btDbvtNode* root0,
367 const btTransform& xform0,
368 const btDbvtNode* root1,
369 const btTransform& xform1,
370 DBVT_IPOLICY);
371 #endif
372
373 DBVT_PREFIX
374 void collideTV(const btDbvtNode* root,
375 const btDbvtVolume& volume,
376 DBVT_IPOLICY) const;
377
378 DBVT_PREFIX
379 void collideTVNoStackAlloc(const btDbvtNode* root,
380 const btDbvtVolume& volume,
381 btNodeStack& stack,
382 DBVT_IPOLICY) const;
383
384 ///rayTest is a re-entrant ray test, and can be called in parallel as long as the btAlignedAlloc is thread-safe (uses locking etc)
385 ///rayTest is slower than rayTestInternal, because it builds a local stack, using memory allocations, and it recomputes signs/rayDirectionInverses each time
386 DBVT_PREFIX
387 static void rayTest(const btDbvtNode* root,
388 const btVector3& rayFrom,
389 const btVector3& rayTo,
390 DBVT_IPOLICY);
391 ///rayTestInternal is faster than rayTest, because it uses a persistent stack (to reduce dynamic memory allocations to a minimum) and it uses precomputed signs/rayInverseDirections
392 ///rayTestInternal is used by btDbvtBroadphase to accelerate world ray casts
393 DBVT_PREFIX
394 void rayTestInternal(const btDbvtNode* root,
395 const btVector3& rayFrom,
396 const btVector3& rayTo,
397 const btVector3& rayDirectionInverse,
398 unsigned int signs[3],
399 btScalar lambda_max,
400 const btVector3& aabbMin,
401 const btVector3& aabbMax,
402 btAlignedObjectArray<const btDbvtNode*>& stack,
403 DBVT_IPOLICY) const;
404
405 DBVT_PREFIX
406 static void collideKDOP(const btDbvtNode* root,
407 const btVector3* normals,
408 const btScalar* offsets,
409 int count,
410 DBVT_IPOLICY);
411 DBVT_PREFIX
412 static void collideOCL(const btDbvtNode* root,
413 const btVector3* normals,
414 const btScalar* offsets,
415 const btVector3& sortaxis,
416 int count,
417 DBVT_IPOLICY,
418 bool fullsort = true);
419 DBVT_PREFIX
420 static void collideTU(const btDbvtNode* root,
421 DBVT_IPOLICY);
422 // Helpers
nearestbtDbvt423 static DBVT_INLINE int nearest(const int* i, const btDbvt::sStkNPS* a, btScalar v, int l, int h)
424 {
425 int m = 0;
426 while (l < h)
427 {
428 m = (l + h) >> 1;
429 if (a[i[m]].value >= v)
430 l = m + 1;
431 else
432 h = m;
433 }
434 return (h);
435 }
allocatebtDbvt436 static DBVT_INLINE int allocate(btAlignedObjectArray<int>& ifree,
437 btAlignedObjectArray<sStkNPS>& stock,
438 const sStkNPS& value)
439 {
440 int i;
441 if (ifree.size() > 0)
442 {
443 i = ifree[ifree.size() - 1];
444 ifree.pop_back();
445 stock[i] = value;
446 }
447 else
448 {
449 i = stock.size();
450 stock.push_back(value);
451 }
452 return (i);
453 }
454 //
455 private:
btDbvtbtDbvt456 btDbvt(const btDbvt&) {}
457 };
458
459 //
460 // Inline's
461 //
462
463 //
FromCE(const btVector3 & c,const btVector3 & e)464 inline btDbvtAabbMm btDbvtAabbMm::FromCE(const btVector3& c, const btVector3& e)
465 {
466 btDbvtAabbMm box;
467 box.mi = c - e;
468 box.mx = c + e;
469 return (box);
470 }
471
472 //
FromCR(const btVector3 & c,btScalar r)473 inline btDbvtAabbMm btDbvtAabbMm::FromCR(const btVector3& c, btScalar r)
474 {
475 return (FromCE(c, btVector3(r, r, r)));
476 }
477
478 //
FromMM(const btVector3 & mi,const btVector3 & mx)479 inline btDbvtAabbMm btDbvtAabbMm::FromMM(const btVector3& mi, const btVector3& mx)
480 {
481 btDbvtAabbMm box;
482 box.mi = mi;
483 box.mx = mx;
484 return (box);
485 }
486
487 //
FromPoints(const btVector3 * pts,int n)488 inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3* pts, int n)
489 {
490 btDbvtAabbMm box;
491 box.mi = box.mx = pts[0];
492 for (int i = 1; i < n; ++i)
493 {
494 box.mi.setMin(pts[i]);
495 box.mx.setMax(pts[i]);
496 }
497 return (box);
498 }
499
500 //
FromPoints(const btVector3 ** ppts,int n)501 inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3** ppts, int n)
502 {
503 btDbvtAabbMm box;
504 box.mi = box.mx = *ppts[0];
505 for (int i = 1; i < n; ++i)
506 {
507 box.mi.setMin(*ppts[i]);
508 box.mx.setMax(*ppts[i]);
509 }
510 return (box);
511 }
512
513 //
Expand(const btVector3 & e)514 DBVT_INLINE void btDbvtAabbMm::Expand(const btVector3& e)
515 {
516 mi -= e;
517 mx += e;
518 }
519
520 //
SignedExpand(const btVector3 & e)521 DBVT_INLINE void btDbvtAabbMm::SignedExpand(const btVector3& e)
522 {
523 if (e.x() > 0)
524 mx.setX(mx.x() + e[0]);
525 else
526 mi.setX(mi.x() + e[0]);
527 if (e.y() > 0)
528 mx.setY(mx.y() + e[1]);
529 else
530 mi.setY(mi.y() + e[1]);
531 if (e.z() > 0)
532 mx.setZ(mx.z() + e[2]);
533 else
534 mi.setZ(mi.z() + e[2]);
535 }
536
537 //
Contain(const btDbvtAabbMm & a)538 DBVT_INLINE bool btDbvtAabbMm::Contain(const btDbvtAabbMm& a) const
539 {
540 return ((mi.x() <= a.mi.x()) &&
541 (mi.y() <= a.mi.y()) &&
542 (mi.z() <= a.mi.z()) &&
543 (mx.x() >= a.mx.x()) &&
544 (mx.y() >= a.mx.y()) &&
545 (mx.z() >= a.mx.z()));
546 }
547
548 //
Classify(const btVector3 & n,btScalar o,int s)549 DBVT_INLINE int btDbvtAabbMm::Classify(const btVector3& n, btScalar o, int s) const
550 {
551 btVector3 pi, px;
552 switch (s)
553 {
554 case (0 + 0 + 0):
555 px = btVector3(mi.x(), mi.y(), mi.z());
556 pi = btVector3(mx.x(), mx.y(), mx.z());
557 break;
558 case (1 + 0 + 0):
559 px = btVector3(mx.x(), mi.y(), mi.z());
560 pi = btVector3(mi.x(), mx.y(), mx.z());
561 break;
562 case (0 + 2 + 0):
563 px = btVector3(mi.x(), mx.y(), mi.z());
564 pi = btVector3(mx.x(), mi.y(), mx.z());
565 break;
566 case (1 + 2 + 0):
567 px = btVector3(mx.x(), mx.y(), mi.z());
568 pi = btVector3(mi.x(), mi.y(), mx.z());
569 break;
570 case (0 + 0 + 4):
571 px = btVector3(mi.x(), mi.y(), mx.z());
572 pi = btVector3(mx.x(), mx.y(), mi.z());
573 break;
574 case (1 + 0 + 4):
575 px = btVector3(mx.x(), mi.y(), mx.z());
576 pi = btVector3(mi.x(), mx.y(), mi.z());
577 break;
578 case (0 + 2 + 4):
579 px = btVector3(mi.x(), mx.y(), mx.z());
580 pi = btVector3(mx.x(), mi.y(), mi.z());
581 break;
582 case (1 + 2 + 4):
583 px = btVector3(mx.x(), mx.y(), mx.z());
584 pi = btVector3(mi.x(), mi.y(), mi.z());
585 break;
586 }
587 if ((btDot(n, px) + o) < 0) return (-1);
588 if ((btDot(n, pi) + o) >= 0) return (+1);
589 return (0);
590 }
591
592 //
ProjectMinimum(const btVector3 & v,unsigned signs)593 DBVT_INLINE btScalar btDbvtAabbMm::ProjectMinimum(const btVector3& v, unsigned signs) const
594 {
595 const btVector3* b[] = {&mx, &mi};
596 const btVector3 p(b[(signs >> 0) & 1]->x(),
597 b[(signs >> 1) & 1]->y(),
598 b[(signs >> 2) & 1]->z());
599 return (btDot(p, v));
600 }
601
602 //
AddSpan(const btVector3 & d,btScalar & smi,btScalar & smx)603 DBVT_INLINE void btDbvtAabbMm::AddSpan(const btVector3& d, btScalar& smi, btScalar& smx) const
604 {
605 for (int i = 0; i < 3; ++i)
606 {
607 if (d[i] < 0)
608 {
609 smi += mx[i] * d[i];
610 smx += mi[i] * d[i];
611 }
612 else
613 {
614 smi += mi[i] * d[i];
615 smx += mx[i] * d[i];
616 }
617 }
618 }
619
620 //
Intersect(const btDbvtAabbMm & a,const btDbvtAabbMm & b)621 DBVT_INLINE bool Intersect(const btDbvtAabbMm& a,
622 const btDbvtAabbMm& b)
623 {
624 #if DBVT_INT0_IMPL == DBVT_IMPL_SSE
625 const __m128 rt(_mm_or_ps(_mm_cmplt_ps(_mm_load_ps(b.mx), _mm_load_ps(a.mi)),
626 _mm_cmplt_ps(_mm_load_ps(a.mx), _mm_load_ps(b.mi))));
627 #if defined(_WIN32)
628 const __int32* pu((const __int32*)&rt);
629 #else
630 const int* pu((const int*)&rt);
631 #endif
632 return ((pu[0] | pu[1] | pu[2]) == 0);
633 #else
634 return ((a.mi.x() <= b.mx.x()) &&
635 (a.mx.x() >= b.mi.x()) &&
636 (a.mi.y() <= b.mx.y()) &&
637 (a.mx.y() >= b.mi.y()) &&
638 (a.mi.z() <= b.mx.z()) &&
639 (a.mx.z() >= b.mi.z()));
640 #endif
641 }
642
643 //
Intersect(const btDbvtAabbMm & a,const btVector3 & b)644 DBVT_INLINE bool Intersect(const btDbvtAabbMm& a,
645 const btVector3& b)
646 {
647 return ((b.x() >= a.mi.x()) &&
648 (b.y() >= a.mi.y()) &&
649 (b.z() >= a.mi.z()) &&
650 (b.x() <= a.mx.x()) &&
651 (b.y() <= a.mx.y()) &&
652 (b.z() <= a.mx.z()));
653 }
654
655 //////////////////////////////////////
656
657 //
Proximity(const btDbvtAabbMm & a,const btDbvtAabbMm & b)658 DBVT_INLINE btScalar Proximity(const btDbvtAabbMm& a,
659 const btDbvtAabbMm& b)
660 {
661 const btVector3 d = (a.mi + a.mx) - (b.mi + b.mx);
662 return (btFabs(d.x()) + btFabs(d.y()) + btFabs(d.z()));
663 }
664
665 //
Select(const btDbvtAabbMm & o,const btDbvtAabbMm & a,const btDbvtAabbMm & b)666 DBVT_INLINE int Select(const btDbvtAabbMm& o,
667 const btDbvtAabbMm& a,
668 const btDbvtAabbMm& b)
669 {
670 #if DBVT_SELECT_IMPL == DBVT_IMPL_SSE
671
672 #if defined(_WIN32)
673 static ATTRIBUTE_ALIGNED16(const unsigned __int32) mask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
674 #else
675 static ATTRIBUTE_ALIGNED16(const unsigned int) mask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x00000000 /*0x7fffffff*/};
676 #endif
677 ///@todo: the intrinsic version is 11% slower
678 #if DBVT_USE_INTRINSIC_SSE
679
680 union btSSEUnion ///NOTE: if we use more intrinsics, move btSSEUnion into the LinearMath directory
681 {
682 __m128 ssereg;
683 float floats[4];
684 int ints[4];
685 };
686
687 __m128 omi(_mm_load_ps(o.mi));
688 omi = _mm_add_ps(omi, _mm_load_ps(o.mx));
689 __m128 ami(_mm_load_ps(a.mi));
690 ami = _mm_add_ps(ami, _mm_load_ps(a.mx));
691 ami = _mm_sub_ps(ami, omi);
692 ami = _mm_and_ps(ami, _mm_load_ps((const float*)mask));
693 __m128 bmi(_mm_load_ps(b.mi));
694 bmi = _mm_add_ps(bmi, _mm_load_ps(b.mx));
695 bmi = _mm_sub_ps(bmi, omi);
696 bmi = _mm_and_ps(bmi, _mm_load_ps((const float*)mask));
697 __m128 t0(_mm_movehl_ps(ami, ami));
698 ami = _mm_add_ps(ami, t0);
699 ami = _mm_add_ss(ami, _mm_shuffle_ps(ami, ami, 1));
700 __m128 t1(_mm_movehl_ps(bmi, bmi));
701 bmi = _mm_add_ps(bmi, t1);
702 bmi = _mm_add_ss(bmi, _mm_shuffle_ps(bmi, bmi, 1));
703
704 btSSEUnion tmp;
705 tmp.ssereg = _mm_cmple_ss(bmi, ami);
706 return tmp.ints[0] & 1;
707
708 #else
709 ATTRIBUTE_ALIGNED16(__int32 r[1]);
710 __asm
711 {
712 mov eax,o
713 mov ecx,a
714 mov edx,b
715 movaps xmm0,[eax]
716 movaps xmm5,mask
717 addps xmm0,[eax+16]
718 movaps xmm1,[ecx]
719 movaps xmm2,[edx]
720 addps xmm1,[ecx+16]
721 addps xmm2,[edx+16]
722 subps xmm1,xmm0
723 subps xmm2,xmm0
724 andps xmm1,xmm5
725 andps xmm2,xmm5
726 movhlps xmm3,xmm1
727 movhlps xmm4,xmm2
728 addps xmm1,xmm3
729 addps xmm2,xmm4
730 pshufd xmm3,xmm1,1
731 pshufd xmm4,xmm2,1
732 addss xmm1,xmm3
733 addss xmm2,xmm4
734 cmpless xmm2,xmm1
735 movss r,xmm2
736 }
737 return (r[0] & 1);
738 #endif
739 #else
740 return (Proximity(o, a) < Proximity(o, b) ? 0 : 1);
741 #endif
742 }
743
744 //
Merge(const btDbvtAabbMm & a,const btDbvtAabbMm & b,btDbvtAabbMm & r)745 DBVT_INLINE void Merge(const btDbvtAabbMm& a,
746 const btDbvtAabbMm& b,
747 btDbvtAabbMm& r)
748 {
749 #if DBVT_MERGE_IMPL == DBVT_IMPL_SSE
750 __m128 ami(_mm_load_ps(a.mi));
751 __m128 amx(_mm_load_ps(a.mx));
752 __m128 bmi(_mm_load_ps(b.mi));
753 __m128 bmx(_mm_load_ps(b.mx));
754 ami = _mm_min_ps(ami, bmi);
755 amx = _mm_max_ps(amx, bmx);
756 _mm_store_ps(r.mi, ami);
757 _mm_store_ps(r.mx, amx);
758 #else
759 for (int i = 0; i < 3; ++i)
760 {
761 if (a.mi[i] < b.mi[i])
762 r.mi[i] = a.mi[i];
763 else
764 r.mi[i] = b.mi[i];
765 if (a.mx[i] > b.mx[i])
766 r.mx[i] = a.mx[i];
767 else
768 r.mx[i] = b.mx[i];
769 }
770 #endif
771 }
772
773 //
NotEqual(const btDbvtAabbMm & a,const btDbvtAabbMm & b)774 DBVT_INLINE bool NotEqual(const btDbvtAabbMm& a,
775 const btDbvtAabbMm& b)
776 {
777 return ((a.mi.x() != b.mi.x()) ||
778 (a.mi.y() != b.mi.y()) ||
779 (a.mi.z() != b.mi.z()) ||
780 (a.mx.x() != b.mx.x()) ||
781 (a.mx.y() != b.mx.y()) ||
782 (a.mx.z() != b.mx.z()));
783 }
784
785 //
786 // Inline's
787 //
788
789 //
790 DBVT_PREFIX
enumNodes(const btDbvtNode * root,DBVT_IPOLICY)791 inline void btDbvt::enumNodes(const btDbvtNode* root,
792 DBVT_IPOLICY)
793 {
794 DBVT_CHECKTYPE
795 policy.Process(root);
796 if (root->isinternal())
797 {
798 enumNodes(root->childs[0], policy);
799 enumNodes(root->childs[1], policy);
800 }
801 }
802
803 //
804 DBVT_PREFIX
enumLeaves(const btDbvtNode * root,DBVT_IPOLICY)805 inline void btDbvt::enumLeaves(const btDbvtNode* root,
806 DBVT_IPOLICY)
807 {
808 DBVT_CHECKTYPE
809 if (root->isinternal())
810 {
811 enumLeaves(root->childs[0], policy);
812 enumLeaves(root->childs[1], policy);
813 }
814 else
815 {
816 policy.Process(root);
817 }
818 }
819
820 //
821 DBVT_PREFIX
collideTT(const btDbvtNode * root0,const btDbvtNode * root1,DBVT_IPOLICY)822 inline void btDbvt::collideTT(const btDbvtNode* root0,
823 const btDbvtNode* root1,
824 DBVT_IPOLICY)
825 {
826 DBVT_CHECKTYPE
827 if (root0 && root1)
828 {
829 int depth = 1;
830 int treshold = DOUBLE_STACKSIZE - 4;
831 btAlignedObjectArray<sStkNN> stkStack;
832 stkStack.resize(DOUBLE_STACKSIZE);
833 stkStack[0] = sStkNN(root0, root1);
834 do
835 {
836 sStkNN p = stkStack[--depth];
837 if (depth > treshold)
838 {
839 stkStack.resize(stkStack.size() * 2);
840 treshold = stkStack.size() - 4;
841 }
842 if (p.a == p.b)
843 {
844 if (p.a->isinternal())
845 {
846 stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[0]);
847 stkStack[depth++] = sStkNN(p.a->childs[1], p.a->childs[1]);
848 stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[1]);
849 }
850 }
851 else if (Intersect(p.a->volume, p.b->volume))
852 {
853 if (p.a->isinternal())
854 {
855 if (p.b->isinternal())
856 {
857 stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[0]);
858 stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[0]);
859 stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[1]);
860 stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[1]);
861 }
862 else
863 {
864 stkStack[depth++] = sStkNN(p.a->childs[0], p.b);
865 stkStack[depth++] = sStkNN(p.a->childs[1], p.b);
866 }
867 }
868 else
869 {
870 if (p.b->isinternal())
871 {
872 stkStack[depth++] = sStkNN(p.a, p.b->childs[0]);
873 stkStack[depth++] = sStkNN(p.a, p.b->childs[1]);
874 }
875 else
876 {
877 policy.Process(p.a, p.b);
878 }
879 }
880 }
881 } while (depth);
882 }
883 }
884
885 //
886 DBVT_PREFIX
selfCollideT(const btDbvntNode * root,DBVT_IPOLICY)887 inline void btDbvt::selfCollideT(const btDbvntNode* root,
888 DBVT_IPOLICY)
889 {
890 DBVT_CHECKTYPE
891 if (root)
892 {
893 int depth = 1;
894 int treshold = DOUBLE_STACKSIZE - 4;
895 btAlignedObjectArray<sStknNN> stkStack;
896 stkStack.resize(DOUBLE_STACKSIZE);
897 stkStack[0] = sStknNN(root, root);
898 do
899 {
900 sStknNN p = stkStack[--depth];
901 if (depth > treshold)
902 {
903 stkStack.resize(stkStack.size() * 2);
904 treshold = stkStack.size() - 4;
905 }
906 if (p.a == p.b)
907 {
908 if (p.a->isinternal() && p.a->angle > SIMD_PI)
909 {
910 stkStack[depth++] = sStknNN(p.a->childs[0], p.a->childs[0]);
911 stkStack[depth++] = sStknNN(p.a->childs[1], p.a->childs[1]);
912 stkStack[depth++] = sStknNN(p.a->childs[0], p.a->childs[1]);
913 }
914 }
915 else if (Intersect(p.a->volume, p.b->volume))
916 {
917 if (p.a->isinternal())
918 {
919 if (p.b->isinternal())
920 {
921 stkStack[depth++] = sStknNN(p.a->childs[0], p.b->childs[0]);
922 stkStack[depth++] = sStknNN(p.a->childs[1], p.b->childs[0]);
923 stkStack[depth++] = sStknNN(p.a->childs[0], p.b->childs[1]);
924 stkStack[depth++] = sStknNN(p.a->childs[1], p.b->childs[1]);
925 }
926 else
927 {
928 stkStack[depth++] = sStknNN(p.a->childs[0], p.b);
929 stkStack[depth++] = sStknNN(p.a->childs[1], p.b);
930 }
931 }
932 else
933 {
934 if (p.b->isinternal())
935 {
936 stkStack[depth++] = sStknNN(p.a, p.b->childs[0]);
937 stkStack[depth++] = sStknNN(p.a, p.b->childs[1]);
938 }
939 else
940 {
941 policy.Process(p.a, p.b);
942 }
943 }
944 }
945 } while (depth);
946 }
947 }
948
949 //
950 DBVT_PREFIX
selfCollideTT(const btDbvtNode * root,DBVT_IPOLICY)951 inline void btDbvt::selfCollideTT(const btDbvtNode* root,
952 DBVT_IPOLICY)
953 {
954 DBVT_CHECKTYPE
955 if (root)
956 {
957 int depth = 1;
958 int treshold = DOUBLE_STACKSIZE - 4;
959 btAlignedObjectArray<sStkNN> stkStack;
960 stkStack.resize(DOUBLE_STACKSIZE);
961 stkStack[0] = sStkNN(root, root);
962 do
963 {
964 sStkNN p = stkStack[--depth];
965 if (depth > treshold)
966 {
967 stkStack.resize(stkStack.size() * 2);
968 treshold = stkStack.size() - 4;
969 }
970 if (p.a == p.b)
971 {
972 if (p.a->isinternal())
973 {
974 stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[0]);
975 stkStack[depth++] = sStkNN(p.a->childs[1], p.a->childs[1]);
976 stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[1]);
977 }
978 }
979 else if (Intersect(p.a->volume, p.b->volume))
980 {
981 if (p.a->isinternal())
982 {
983 if (p.b->isinternal())
984 {
985 stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[0]);
986 stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[0]);
987 stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[1]);
988 stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[1]);
989 }
990 else
991 {
992 stkStack[depth++] = sStkNN(p.a->childs[0], p.b);
993 stkStack[depth++] = sStkNN(p.a->childs[1], p.b);
994 }
995 }
996 else
997 {
998 if (p.b->isinternal())
999 {
1000 stkStack[depth++] = sStkNN(p.a, p.b->childs[0]);
1001 stkStack[depth++] = sStkNN(p.a, p.b->childs[1]);
1002 }
1003 else
1004 {
1005 policy.Process(p.a, p.b);
1006 }
1007 }
1008 }
1009 } while (depth);
1010 }
1011 }
1012
1013
1014 DBVT_PREFIX
collideTTpersistentStack(const btDbvtNode * root0,const btDbvtNode * root1,DBVT_IPOLICY)1015 inline void btDbvt::collideTTpersistentStack(const btDbvtNode* root0,
1016 const btDbvtNode* root1,
1017 DBVT_IPOLICY)
1018 {
1019 DBVT_CHECKTYPE
1020 if (root0 && root1)
1021 {
1022 int depth = 1;
1023 int treshold = DOUBLE_STACKSIZE - 4;
1024
1025 m_stkStack.resize(DOUBLE_STACKSIZE);
1026 m_stkStack[0] = sStkNN(root0, root1);
1027 do
1028 {
1029 sStkNN p = m_stkStack[--depth];
1030 if (depth > treshold)
1031 {
1032 m_stkStack.resize(m_stkStack.size() * 2);
1033 treshold = m_stkStack.size() - 4;
1034 }
1035 if (p.a == p.b)
1036 {
1037 if (p.a->isinternal())
1038 {
1039 m_stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[0]);
1040 m_stkStack[depth++] = sStkNN(p.a->childs[1], p.a->childs[1]);
1041 m_stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[1]);
1042 }
1043 }
1044 else if (Intersect(p.a->volume, p.b->volume))
1045 {
1046 if (p.a->isinternal())
1047 {
1048 if (p.b->isinternal())
1049 {
1050 m_stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[0]);
1051 m_stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[0]);
1052 m_stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[1]);
1053 m_stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[1]);
1054 }
1055 else
1056 {
1057 m_stkStack[depth++] = sStkNN(p.a->childs[0], p.b);
1058 m_stkStack[depth++] = sStkNN(p.a->childs[1], p.b);
1059 }
1060 }
1061 else
1062 {
1063 if (p.b->isinternal())
1064 {
1065 m_stkStack[depth++] = sStkNN(p.a, p.b->childs[0]);
1066 m_stkStack[depth++] = sStkNN(p.a, p.b->childs[1]);
1067 }
1068 else
1069 {
1070 policy.Process(p.a, p.b);
1071 }
1072 }
1073 }
1074 } while (depth);
1075 }
1076 }
1077
1078 #if 0
1079 //
1080 DBVT_PREFIX
1081 inline void btDbvt::collideTT( const btDbvtNode* root0,
1082 const btDbvtNode* root1,
1083 const btTransform& xform,
1084 DBVT_IPOLICY)
1085 {
1086 DBVT_CHECKTYPE
1087 if(root0&&root1)
1088 {
1089 int depth=1;
1090 int treshold=DOUBLE_STACKSIZE-4;
1091 btAlignedObjectArray<sStkNN> stkStack;
1092 stkStack.resize(DOUBLE_STACKSIZE);
1093 stkStack[0]=sStkNN(root0,root1);
1094 do {
1095 sStkNN p=stkStack[--depth];
1096 if(Intersect(p.a->volume,p.b->volume,xform))
1097 {
1098 if(depth>treshold)
1099 {
1100 stkStack.resize(stkStack.size()*2);
1101 treshold=stkStack.size()-4;
1102 }
1103 if(p.a->isinternal())
1104 {
1105 if(p.b->isinternal())
1106 {
1107 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]);
1108 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]);
1109 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]);
1110 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]);
1111 }
1112 else
1113 {
1114 stkStack[depth++]=sStkNN(p.a->childs[0],p.b);
1115 stkStack[depth++]=sStkNN(p.a->childs[1],p.b);
1116 }
1117 }
1118 else
1119 {
1120 if(p.b->isinternal())
1121 {
1122 stkStack[depth++]=sStkNN(p.a,p.b->childs[0]);
1123 stkStack[depth++]=sStkNN(p.a,p.b->childs[1]);
1124 }
1125 else
1126 {
1127 policy.Process(p.a,p.b);
1128 }
1129 }
1130 }
1131 } while(depth);
1132 }
1133 }
1134 //
1135 DBVT_PREFIX
1136 inline void btDbvt::collideTT( const btDbvtNode* root0,
1137 const btTransform& xform0,
1138 const btDbvtNode* root1,
1139 const btTransform& xform1,
1140 DBVT_IPOLICY)
1141 {
1142 const btTransform xform=xform0.inverse()*xform1;
1143 collideTT(root0,root1,xform,policy);
1144 }
1145 #endif
1146
1147 DBVT_PREFIX
collideTV(const btDbvtNode * root,const btDbvtVolume & vol,DBVT_IPOLICY)1148 inline void btDbvt::collideTV(const btDbvtNode* root,
1149 const btDbvtVolume& vol,
1150 DBVT_IPOLICY) const
1151 {
1152 DBVT_CHECKTYPE
1153 if (root)
1154 {
1155 ATTRIBUTE_ALIGNED16(btDbvtVolume)
1156 volume(vol);
1157 btAlignedObjectArray<const btDbvtNode*> stack;
1158 stack.resize(0);
1159 #ifndef BT_DISABLE_STACK_TEMP_MEMORY
1160 char tempmemory[SIMPLE_STACKSIZE * sizeof(const btDbvtNode*)];
1161 stack.initializeFromBuffer(tempmemory, 0, SIMPLE_STACKSIZE);
1162 #else
1163 stack.reserve(SIMPLE_STACKSIZE);
1164 #endif //BT_DISABLE_STACK_TEMP_MEMORY
1165
1166 stack.push_back(root);
1167 do
1168 {
1169 const btDbvtNode* n = stack[stack.size() - 1];
1170 stack.pop_back();
1171 if (Intersect(n->volume, volume))
1172 {
1173 if (n->isinternal())
1174 {
1175 stack.push_back(n->childs[0]);
1176 stack.push_back(n->childs[1]);
1177 }
1178 else
1179 {
1180 policy.Process(n);
1181 }
1182 }
1183 } while (stack.size() > 0);
1184 }
1185 }
1186
1187 //
1188 DBVT_PREFIX
collideTVNoStackAlloc(const btDbvtNode * root,const btDbvtVolume & vol,btNodeStack & stack,DBVT_IPOLICY)1189 inline void btDbvt::collideTVNoStackAlloc(const btDbvtNode* root,
1190 const btDbvtVolume& vol,
1191 btNodeStack& stack,
1192 DBVT_IPOLICY) const
1193 {
1194 DBVT_CHECKTYPE
1195 if (root)
1196 {
1197 ATTRIBUTE_ALIGNED16(btDbvtVolume)
1198 volume(vol);
1199 stack.resize(0);
1200 stack.reserve(SIMPLE_STACKSIZE);
1201 stack.push_back(root);
1202 do
1203 {
1204 const btDbvtNode* n = stack[stack.size() - 1];
1205 stack.pop_back();
1206 if (Intersect(n->volume, volume))
1207 {
1208 if (n->isinternal())
1209 {
1210 stack.push_back(n->childs[0]);
1211 stack.push_back(n->childs[1]);
1212 }
1213 else
1214 {
1215 policy.Process(n);
1216 }
1217 }
1218 } while (stack.size() > 0);
1219 }
1220 }
1221
1222 DBVT_PREFIX
rayTestInternal(const btDbvtNode * root,const btVector3 & rayFrom,const btVector3 & rayTo,const btVector3 & rayDirectionInverse,unsigned int signs[3],btScalar lambda_max,const btVector3 & aabbMin,const btVector3 & aabbMax,btAlignedObjectArray<const btDbvtNode * > & stack,DBVT_IPOLICY)1223 inline void btDbvt::rayTestInternal(const btDbvtNode* root,
1224 const btVector3& rayFrom,
1225 const btVector3& rayTo,
1226 const btVector3& rayDirectionInverse,
1227 unsigned int signs[3],
1228 btScalar lambda_max,
1229 const btVector3& aabbMin,
1230 const btVector3& aabbMax,
1231 btAlignedObjectArray<const btDbvtNode*>& stack,
1232 DBVT_IPOLICY) const
1233 {
1234 (void)rayTo;
1235 DBVT_CHECKTYPE
1236 if (root)
1237 {
1238 btVector3 resultNormal;
1239
1240 int depth = 1;
1241 int treshold = DOUBLE_STACKSIZE - 2;
1242 stack.resize(DOUBLE_STACKSIZE);
1243 stack[0] = root;
1244 btVector3 bounds[2];
1245 do
1246 {
1247 const btDbvtNode* node = stack[--depth];
1248 bounds[0] = node->volume.Mins() - aabbMax;
1249 bounds[1] = node->volume.Maxs() - aabbMin;
1250 btScalar tmin = 1.f, lambda_min = 0.f;
1251 unsigned int result1 = false;
1252 result1 = btRayAabb2(rayFrom, rayDirectionInverse, signs, bounds, tmin, lambda_min, lambda_max);
1253 if (result1)
1254 {
1255 if (node->isinternal())
1256 {
1257 if (depth > treshold)
1258 {
1259 stack.resize(stack.size() * 2);
1260 treshold = stack.size() - 2;
1261 }
1262 stack[depth++] = node->childs[0];
1263 stack[depth++] = node->childs[1];
1264 }
1265 else
1266 {
1267 policy.Process(node);
1268 }
1269 }
1270 } while (depth);
1271 }
1272 }
1273
1274 //
1275 DBVT_PREFIX
rayTest(const btDbvtNode * root,const btVector3 & rayFrom,const btVector3 & rayTo,DBVT_IPOLICY)1276 inline void btDbvt::rayTest(const btDbvtNode* root,
1277 const btVector3& rayFrom,
1278 const btVector3& rayTo,
1279 DBVT_IPOLICY)
1280 {
1281 DBVT_CHECKTYPE
1282 if (root)
1283 {
1284 btVector3 rayDir = (rayTo - rayFrom);
1285 rayDir.normalize();
1286
1287 ///what about division by zero? --> just set rayDirection[i] to INF/BT_LARGE_FLOAT
1288 btVector3 rayDirectionInverse;
1289 rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
1290 rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
1291 rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
1292 unsigned int signs[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
1293
1294 btScalar lambda_max = rayDir.dot(rayTo - rayFrom);
1295
1296 btVector3 resultNormal;
1297
1298 btAlignedObjectArray<const btDbvtNode*> stack;
1299
1300 int depth = 1;
1301 int treshold = DOUBLE_STACKSIZE - 2;
1302
1303 char tempmemory[DOUBLE_STACKSIZE * sizeof(const btDbvtNode*)];
1304 #ifndef BT_DISABLE_STACK_TEMP_MEMORY
1305 stack.initializeFromBuffer(tempmemory, DOUBLE_STACKSIZE, DOUBLE_STACKSIZE);
1306 #else //BT_DISABLE_STACK_TEMP_MEMORY
1307 stack.resize(DOUBLE_STACKSIZE);
1308 #endif //BT_DISABLE_STACK_TEMP_MEMORY
1309 stack[0] = root;
1310 btVector3 bounds[2];
1311 do
1312 {
1313 const btDbvtNode* node = stack[--depth];
1314
1315 bounds[0] = node->volume.Mins();
1316 bounds[1] = node->volume.Maxs();
1317
1318 btScalar tmin = 1.f, lambda_min = 0.f;
1319 unsigned int result1 = btRayAabb2(rayFrom, rayDirectionInverse, signs, bounds, tmin, lambda_min, lambda_max);
1320
1321 #ifdef COMPARE_BTRAY_AABB2
1322 btScalar param = 1.f;
1323 bool result2 = btRayAabb(rayFrom, rayTo, node->volume.Mins(), node->volume.Maxs(), param, resultNormal);
1324 btAssert(result1 == result2);
1325 #endif //TEST_BTRAY_AABB2
1326
1327 if (result1)
1328 {
1329 if (node->isinternal())
1330 {
1331 if (depth > treshold)
1332 {
1333 stack.resize(stack.size() * 2);
1334 treshold = stack.size() - 2;
1335 }
1336 stack[depth++] = node->childs[0];
1337 stack[depth++] = node->childs[1];
1338 }
1339 else
1340 {
1341 policy.Process(node);
1342 }
1343 }
1344 } while (depth);
1345 }
1346 }
1347
1348 //
1349 DBVT_PREFIX
collideKDOP(const btDbvtNode * root,const btVector3 * normals,const btScalar * offsets,int count,DBVT_IPOLICY)1350 inline void btDbvt::collideKDOP(const btDbvtNode* root,
1351 const btVector3* normals,
1352 const btScalar* offsets,
1353 int count,
1354 DBVT_IPOLICY)
1355 {
1356 DBVT_CHECKTYPE
1357 if (root)
1358 {
1359 const int inside = (1 << count) - 1;
1360 btAlignedObjectArray<sStkNP> stack;
1361 int signs[sizeof(unsigned) * 8];
1362 btAssert(count < int(sizeof(signs) / sizeof(signs[0])));
1363 for (int i = 0; i < count; ++i)
1364 {
1365 signs[i] = ((normals[i].x() >= 0) ? 1 : 0) +
1366 ((normals[i].y() >= 0) ? 2 : 0) +
1367 ((normals[i].z() >= 0) ? 4 : 0);
1368 }
1369 stack.reserve(SIMPLE_STACKSIZE);
1370 stack.push_back(sStkNP(root, 0));
1371 do
1372 {
1373 sStkNP se = stack[stack.size() - 1];
1374 bool out = false;
1375 stack.pop_back();
1376 for (int i = 0, j = 1; (!out) && (i < count); ++i, j <<= 1)
1377 {
1378 if (0 == (se.mask & j))
1379 {
1380 const int side = se.node->volume.Classify(normals[i], offsets[i], signs[i]);
1381 switch (side)
1382 {
1383 case -1:
1384 out = true;
1385 break;
1386 case +1:
1387 se.mask |= j;
1388 break;
1389 }
1390 }
1391 }
1392 if (!out)
1393 {
1394 if ((se.mask != inside) && (se.node->isinternal()))
1395 {
1396 stack.push_back(sStkNP(se.node->childs[0], se.mask));
1397 stack.push_back(sStkNP(se.node->childs[1], se.mask));
1398 }
1399 else
1400 {
1401 if (policy.AllLeaves(se.node)) enumLeaves(se.node, policy);
1402 }
1403 }
1404 } while (stack.size());
1405 }
1406 }
1407
1408 //
1409 DBVT_PREFIX
collideOCL(const btDbvtNode * root,const btVector3 * normals,const btScalar * offsets,const btVector3 & sortaxis,int count,DBVT_IPOLICY,bool fsort)1410 inline void btDbvt::collideOCL(const btDbvtNode* root,
1411 const btVector3* normals,
1412 const btScalar* offsets,
1413 const btVector3& sortaxis,
1414 int count,
1415 DBVT_IPOLICY,
1416 bool fsort)
1417 {
1418 DBVT_CHECKTYPE
1419 if (root)
1420 {
1421 const unsigned srtsgns = (sortaxis[0] >= 0 ? 1 : 0) +
1422 (sortaxis[1] >= 0 ? 2 : 0) +
1423 (sortaxis[2] >= 0 ? 4 : 0);
1424 const int inside = (1 << count) - 1;
1425 btAlignedObjectArray<sStkNPS> stock;
1426 btAlignedObjectArray<int> ifree;
1427 btAlignedObjectArray<int> stack;
1428 int signs[sizeof(unsigned) * 8];
1429 btAssert(count < int(sizeof(signs) / sizeof(signs[0])));
1430 for (int i = 0; i < count; ++i)
1431 {
1432 signs[i] = ((normals[i].x() >= 0) ? 1 : 0) +
1433 ((normals[i].y() >= 0) ? 2 : 0) +
1434 ((normals[i].z() >= 0) ? 4 : 0);
1435 }
1436 stock.reserve(SIMPLE_STACKSIZE);
1437 stack.reserve(SIMPLE_STACKSIZE);
1438 ifree.reserve(SIMPLE_STACKSIZE);
1439 stack.push_back(allocate(ifree, stock, sStkNPS(root, 0, root->volume.ProjectMinimum(sortaxis, srtsgns))));
1440 do
1441 {
1442 const int id = stack[stack.size() - 1];
1443 sStkNPS se = stock[id];
1444 stack.pop_back();
1445 ifree.push_back(id);
1446 if (se.mask != inside)
1447 {
1448 bool out = false;
1449 for (int i = 0, j = 1; (!out) && (i < count); ++i, j <<= 1)
1450 {
1451 if (0 == (se.mask & j))
1452 {
1453 const int side = se.node->volume.Classify(normals[i], offsets[i], signs[i]);
1454 switch (side)
1455 {
1456 case -1:
1457 out = true;
1458 break;
1459 case +1:
1460 se.mask |= j;
1461 break;
1462 }
1463 }
1464 }
1465 if (out) continue;
1466 }
1467 if (policy.Descent(se.node))
1468 {
1469 if (se.node->isinternal())
1470 {
1471 const btDbvtNode* pns[] = {se.node->childs[0], se.node->childs[1]};
1472 sStkNPS nes[] = {sStkNPS(pns[0], se.mask, pns[0]->volume.ProjectMinimum(sortaxis, srtsgns)),
1473 sStkNPS(pns[1], se.mask, pns[1]->volume.ProjectMinimum(sortaxis, srtsgns))};
1474 const int q = nes[0].value < nes[1].value ? 1 : 0;
1475 int j = stack.size();
1476 if (fsort && (j > 0))
1477 {
1478 /* Insert 0 */
1479 j = nearest(&stack[0], &stock[0], nes[q].value, 0, stack.size());
1480 stack.push_back(0);
1481
1482 //void * memmove ( void * destination, const void * source, size_t num );
1483
1484 #if DBVT_USE_MEMMOVE
1485 {
1486 int num_items_to_move = stack.size() - 1 - j;
1487 if (num_items_to_move > 0)
1488 memmove(&stack[j + 1], &stack[j], sizeof(int) * num_items_to_move);
1489 }
1490 #else
1491 for (int k = stack.size() - 1; k > j; --k)
1492 {
1493 stack[k] = stack[k - 1];
1494 }
1495 #endif
1496 stack[j] = allocate(ifree, stock, nes[q]);
1497 /* Insert 1 */
1498 j = nearest(&stack[0], &stock[0], nes[1 - q].value, j, stack.size());
1499 stack.push_back(0);
1500 #if DBVT_USE_MEMMOVE
1501 {
1502 int num_items_to_move = stack.size() - 1 - j;
1503 if (num_items_to_move > 0)
1504 memmove(&stack[j + 1], &stack[j], sizeof(int) * num_items_to_move);
1505 }
1506 #else
1507 for (int k = stack.size() - 1; k > j; --k)
1508 {
1509 stack[k] = stack[k - 1];
1510 }
1511 #endif
1512 stack[j] = allocate(ifree, stock, nes[1 - q]);
1513 }
1514 else
1515 {
1516 stack.push_back(allocate(ifree, stock, nes[q]));
1517 stack.push_back(allocate(ifree, stock, nes[1 - q]));
1518 }
1519 }
1520 else
1521 {
1522 policy.Process(se.node, se.value);
1523 }
1524 }
1525 } while (stack.size());
1526 }
1527 }
1528
1529 //
1530 DBVT_PREFIX
collideTU(const btDbvtNode * root,DBVT_IPOLICY)1531 inline void btDbvt::collideTU(const btDbvtNode* root,
1532 DBVT_IPOLICY)
1533 {
1534 DBVT_CHECKTYPE
1535 if (root)
1536 {
1537 btAlignedObjectArray<const btDbvtNode*> stack;
1538 stack.reserve(SIMPLE_STACKSIZE);
1539 stack.push_back(root);
1540 do
1541 {
1542 const btDbvtNode* n = stack[stack.size() - 1];
1543 stack.pop_back();
1544 if (policy.Descent(n))
1545 {
1546 if (n->isinternal())
1547 {
1548 stack.push_back(n->childs[0]);
1549 stack.push_back(n->childs[1]);
1550 }
1551 else
1552 {
1553 policy.Process(n);
1554 }
1555 }
1556 } while (stack.size() > 0);
1557 }
1558 }
1559
1560 //
1561 // PP Cleanup
1562 //
1563
1564 #undef DBVT_USE_MEMMOVE
1565 #undef DBVT_USE_TEMPLATE
1566 #undef DBVT_VIRTUAL_DTOR
1567 #undef DBVT_VIRTUAL
1568 #undef DBVT_PREFIX
1569 #undef DBVT_IPOLICY
1570 #undef DBVT_CHECKTYPE
1571 #undef DBVT_IMPL_GENERIC
1572 #undef DBVT_IMPL_SSE
1573 #undef DBVT_USE_INTRINSIC_SSE
1574 #undef DBVT_SELECT_IMPL
1575 #undef DBVT_MERGE_IMPL
1576 #undef DBVT_INT0_IMPL
1577
1578 #endif
1579