Bullet Collision Detection & Physics Library
btQuantizedBvh.cpp
Go to the documentation of this file.
1/*
2Bullet Continuous Collision Detection and Physics Library
3Copyright (c) 2003-2006 Erwin Coumans https://bulletphysics.org
4
5This software is provided 'as-is', without any express or implied warranty.
6In no event will the authors be held liable for any damages arising from the use of this software.
7Permission is granted to anyone to use this software for any purpose,
8including commercial applications, and to alter it and redistribute it freely,
9subject to the following restrictions:
10
111. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
122. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
133. This notice may not be removed or altered from any source distribution.
14*/
15
16#include "btQuantizedBvh.h"
17
21
22#define RAYAABB2
23
25 m_useQuantization(false),
26 //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
27 m_traversalMode(TRAVERSAL_STACKLESS)
28 //m_traversalMode(TRAVERSAL_RECURSIVE)
29 ,
30 m_subtreeHeaderCount(0) //PCK: add this line
31{
34}
35
37{
39 m_useQuantization = true;
40 int numLeafNodes = 0;
41
43 {
44 //now we have an array of leafnodes in m_leafNodes
45 numLeafNodes = m_quantizedLeafNodes.size();
46
47 m_quantizedContiguousNodes.resize(2 * numLeafNodes);
48 }
49
51
52 buildTree(0, numLeafNodes);
53
56 {
59 subtree.m_rootNodeIndex = 0;
60 subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
61 }
62
63 //PCK: update the copy of the size
65
66 //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
69}
70
72#ifdef DEBUG_PATCH_COLORS
73btVector3 color[4] =
74 {
75 btVector3(1, 0, 0),
76 btVector3(0, 1, 0),
77 btVector3(0, 0, 1),
78 btVector3(0, 1, 1)};
79#endif //DEBUG_PATCH_COLORS
80
81void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin, const btVector3& bvhAabbMax, btScalar quantizationMargin)
82{
83 //enlarge the AABB to avoid division by zero when initializing the quantization values
84 btVector3 clampValue(quantizationMargin, quantizationMargin, quantizationMargin);
85 m_bvhAabbMin = bvhAabbMin - clampValue;
86 m_bvhAabbMax = bvhAabbMax + clampValue;
88 m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize;
89
90 m_useQuantization = true;
91
92 {
93 unsigned short vecIn[3];
94 btVector3 v;
95 {
96 quantize(vecIn, m_bvhAabbMin, false);
97 v = unQuantize(vecIn);
98 m_bvhAabbMin.setMin(v - clampValue);
99 }
100 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
101 m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize;
102 {
103 quantize(vecIn, m_bvhAabbMax, true);
104 v = unQuantize(vecIn);
105 m_bvhAabbMax.setMax(v + clampValue);
106 }
107 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
108 m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize;
109 }
110}
111
113{
114}
115
116#ifdef DEBUG_TREE_BUILDING
117int gStackDepth = 0;
118int gMaxStackDepth = 0;
119#endif //DEBUG_TREE_BUILDING
120
121void btQuantizedBvh::buildTree(int startIndex, int endIndex)
122{
123#ifdef DEBUG_TREE_BUILDING
124 gStackDepth++;
125 if (gStackDepth > gMaxStackDepth)
126 gMaxStackDepth = gStackDepth;
127#endif //DEBUG_TREE_BUILDING
128
129 int splitAxis, splitIndex, i;
130 int numIndices = endIndex - startIndex;
131 int curIndex = m_curNodeIndex;
132
133 btAssert(numIndices > 0);
134
135 if (numIndices == 1)
136 {
137#ifdef DEBUG_TREE_BUILDING
138 gStackDepth--;
139#endif //DEBUG_TREE_BUILDING
140
142
144 return;
145 }
146 //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
147
148 splitAxis = calcSplittingAxis(startIndex, endIndex);
149
150 splitIndex = sortAndCalcSplittingIndex(startIndex, endIndex, splitAxis);
151
152 int internalNodeIndex = m_curNodeIndex;
153
154 //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
155 //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
156 setInternalNodeAabbMin(m_curNodeIndex, m_bvhAabbMax); //can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
157 setInternalNodeAabbMax(m_curNodeIndex, m_bvhAabbMin); //can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
158
159 for (i = startIndex; i < endIndex; i++)
160 {
162 }
163
165
166 //internalNode->m_escapeIndex;
167
168 int leftChildNodexIndex = m_curNodeIndex;
169
170 //build left child tree
171 buildTree(startIndex, splitIndex);
172
173 int rightChildNodexIndex = m_curNodeIndex;
174 //build right child tree
175 buildTree(splitIndex, endIndex);
176
177#ifdef DEBUG_TREE_BUILDING
178 gStackDepth--;
179#endif //DEBUG_TREE_BUILDING
180
181 int escapeIndex = m_curNodeIndex - curIndex;
182
184 {
185 //escapeIndex is the number of nodes of this subtree
186 const int sizeQuantizedNode = sizeof(btQuantizedBvhNode);
187 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
188 if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
189 {
190 updateSubtreeHeaders(leftChildNodexIndex, rightChildNodexIndex);
191 }
192 }
193 else
194 {
195 }
196
197 setInternalNodeEscapeIndex(internalNodeIndex, escapeIndex);
198}
199
200void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
201{
203
204 btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
205 int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
206 int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
207
208 btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
209 int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
210 int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
211
212 if (leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
213 {
215 subtree.setAabbFromQuantizeNode(leftChildNode);
216 subtree.m_rootNodeIndex = leftChildNodexIndex;
217 subtree.m_subtreeSize = leftSubTreeSize;
218 }
219
220 if (rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
221 {
223 subtree.setAabbFromQuantizeNode(rightChildNode);
224 subtree.m_rootNodeIndex = rightChildNodexIndex;
225 subtree.m_subtreeSize = rightSubTreeSize;
226 }
227
228 //PCK: update the copy of the size
230}
231
232int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
233{
234 int i;
235 int splitIndex = startIndex;
236 int numIndices = endIndex - startIndex;
237 btScalar splitValue;
238
239 btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.));
240 for (i = startIndex; i < endIndex; i++)
241 {
242 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
243 means += center;
244 }
245 means *= (btScalar(1.) / (btScalar)numIndices);
246
247 splitValue = means[splitAxis];
248
249 //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
250 for (i = startIndex; i < endIndex; i++)
251 {
252 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
253 if (center[splitAxis] > splitValue)
254 {
255 //swap
256 swapLeafNodes(i, splitIndex);
257 splitIndex++;
258 }
259 }
260
261 //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
262 //otherwise the tree-building might fail due to stack-overflows in certain cases.
263 //unbalanced1 is unsafe: it can cause stack overflows
264 //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
265
266 //unbalanced2 should work too: always use center (perfect balanced trees)
267 //bool unbalanced2 = true;
268
269 //this should be safe too:
270 int rangeBalancedIndices = numIndices / 3;
271 bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices)));
272
273 if (unbalanced)
274 {
275 splitIndex = startIndex + (numIndices >> 1);
276 }
277
278 bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex));
279 (void)unbal;
280 btAssert(!unbal);
281
282 return splitIndex;
283}
284
285int btQuantizedBvh::calcSplittingAxis(int startIndex, int endIndex)
286{
287 int i;
288
289 btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.));
290 btVector3 variance(btScalar(0.), btScalar(0.), btScalar(0.));
291 int numIndices = endIndex - startIndex;
292
293 for (i = startIndex; i < endIndex; i++)
294 {
295 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
296 means += center;
297 }
298 means *= (btScalar(1.) / (btScalar)numIndices);
299
300 for (i = startIndex; i < endIndex; i++)
301 {
302 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
303 btVector3 diff2 = center - means;
304 diff2 = diff2 * diff2;
305 variance += diff2;
306 }
307 variance *= (btScalar(1.) / ((btScalar)numIndices - 1));
308
309 return variance.maxAxis();
310}
311
312void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const
313{
314 //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
315
317 {
319 unsigned short int quantizedQueryAabbMin[3];
320 unsigned short int quantizedQueryAabbMax[3];
321 quantizeWithClamp(quantizedQueryAabbMin, aabbMin, 0);
322 quantizeWithClamp(quantizedQueryAabbMax, aabbMax, 1);
323
324 switch (m_traversalMode)
325 {
327 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, 0, m_curNodeIndex);
328 break;
330 walkStacklessQuantizedTreeCacheFriendly(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
331 break;
333 {
335 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
336 }
337 break;
338 default:
339 //unsupported
340 btAssert(0);
341 }
342 }
343 else
344 {
345 walkStacklessTree(nodeCallback, aabbMin, aabbMax);
346 }
347}
348
349void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const
350{
352
353 const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
354 int escapeIndex, curIndex = 0;
355 int walkIterations = 0;
356 bool isLeafNode;
357 //PCK: unsigned instead of bool
358 unsigned aabbOverlap;
359
360 while (curIndex < m_curNodeIndex)
361 {
362 //catch bugs in tree data
363 btAssert(walkIterations < m_curNodeIndex);
364
365 walkIterations++;
366 aabbOverlap = TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
367 isLeafNode = rootNode->m_escapeIndex == -1;
368
369 //PCK: unsigned instead of bool
370 if (isLeafNode && (aabbOverlap != 0))
371 {
372 nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
373 }
374
375 //PCK: unsigned instead of bool
376 if ((aabbOverlap != 0) || isLeafNode)
377 {
378 rootNode++;
379 curIndex++;
380 }
381 else
382 {
383 escapeIndex = rootNode->m_escapeIndex;
384 rootNode += escapeIndex;
385 curIndex += escapeIndex;
386 }
387 }
388}
389
390/*
392void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
393{
394 bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
395 if (aabbOverlap)
396 {
397 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
398 if (isLeafNode)
399 {
400 nodeCallback->processNode(rootNode);
401 } else
402 {
403 walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
404 walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
405 }
406 }
407
408}
409*/
410
411void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode, btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
412{
414
415 bool isLeafNode;
416 //PCK: unsigned instead of bool
417 unsigned aabbOverlap;
418
419 //PCK: unsigned instead of bool
420 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, currentNode->m_quantizedAabbMin, currentNode->m_quantizedAabbMax);
421 isLeafNode = currentNode->isLeafNode();
422
423 //PCK: unsigned instead of bool
424 if (aabbOverlap != 0)
425 {
426 if (isLeafNode)
427 {
428 nodeCallback->processNode(currentNode->getPartId(), currentNode->getTriangleIndex());
429 }
430 else
431 {
432 //process left and right children
433 const btQuantizedBvhNode* leftChildNode = currentNode + 1;
434 walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
435
436 const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex();
437 walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
438 }
439 }
440}
441
442void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const
443{
445
446 const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
447 int escapeIndex, curIndex = 0;
448 int walkIterations = 0;
449 bool isLeafNode;
450 //PCK: unsigned instead of bool
451 unsigned aabbOverlap = 0;
452 unsigned rayBoxOverlap = 0;
453 btScalar lambda_max = 1.0;
454
455 /* Quick pruning by quantized box */
456 btVector3 rayAabbMin = raySource;
457 btVector3 rayAabbMax = raySource;
458 rayAabbMin.setMin(rayTarget);
459 rayAabbMax.setMax(rayTarget);
460
461 /* Add box cast extents to bounding box */
462 rayAabbMin += aabbMin;
463 rayAabbMax += aabbMax;
464
465#ifdef RAYAABB2
466 btVector3 rayDir = (rayTarget - raySource);
467 rayDir.safeNormalize();// stephengold changed normalize to safeNormalize 2020-02-17
468 lambda_max = rayDir.dot(rayTarget - raySource);
470 btVector3 rayDirectionInverse;
471 rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
472 rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
473 rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
474 unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
475#endif
476
477 btVector3 bounds[2];
478
479 while (curIndex < m_curNodeIndex)
480 {
481 btScalar param = 1.0;
482 //catch bugs in tree data
483 btAssert(walkIterations < m_curNodeIndex);
484
485 walkIterations++;
486
487 bounds[0] = rootNode->m_aabbMinOrg;
488 bounds[1] = rootNode->m_aabbMaxOrg;
489 /* Add box cast extents */
490 bounds[0] -= aabbMax;
491 bounds[1] -= aabbMin;
492
493 aabbOverlap = TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
494 //perhaps profile if it is worth doing the aabbOverlap test first
495
496#ifdef RAYAABB2
500 rayBoxOverlap = aabbOverlap ? btRayAabb2(raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
501
502#else
503 btVector3 normal;
504 rayBoxOverlap = btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
505#endif
506
507 isLeafNode = rootNode->m_escapeIndex == -1;
508
509 //PCK: unsigned instead of bool
510 if (isLeafNode && (rayBoxOverlap != 0))
511 {
512 nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
513 }
514
515 //PCK: unsigned instead of bool
516 if ((rayBoxOverlap != 0) || isLeafNode)
517 {
518 rootNode++;
519 curIndex++;
520 }
521 else
522 {
523 escapeIndex = rootNode->m_escapeIndex;
524 rootNode += escapeIndex;
525 curIndex += escapeIndex;
526 }
527 }
528}
529
530void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const
531{
533
534 int curIndex = startNodeIndex;
535 int walkIterations = 0;
536 int subTreeSize = endNodeIndex - startNodeIndex;
537 (void)subTreeSize;
538
539 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
540 int escapeIndex;
541
542 bool isLeafNode;
543 //PCK: unsigned instead of bool
544 unsigned boxBoxOverlap = 0;
545 unsigned rayBoxOverlap = 0;
546
547 btScalar lambda_max = 1.0;
548
549#ifdef RAYAABB2
550 btVector3 rayDirection = (rayTarget - raySource);
551 rayDirection.safeNormalize();// stephengold changed normalize to safeNormalize 2020-02-17
552 lambda_max = rayDirection.dot(rayTarget - raySource);
554 rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
555 rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
556 rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
557 unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
558#endif
559
560 /* Quick pruning by quantized box */
561 btVector3 rayAabbMin = raySource;
562 btVector3 rayAabbMax = raySource;
563 rayAabbMin.setMin(rayTarget);
564 rayAabbMax.setMax(rayTarget);
565
566 /* Add box cast extents to bounding box */
567 rayAabbMin += aabbMin;
568 rayAabbMax += aabbMax;
569
570 unsigned short int quantizedQueryAabbMin[3];
571 unsigned short int quantizedQueryAabbMax[3];
572 quantizeWithClamp(quantizedQueryAabbMin, rayAabbMin, 0);
573 quantizeWithClamp(quantizedQueryAabbMax, rayAabbMax, 1);
574
575 while (curIndex < endNodeIndex)
576 {
577//#define VISUALLY_ANALYZE_BVH 1
578#ifdef VISUALLY_ANALYZE_BVH
579 //some code snippet to debugDraw aabb, to visually analyze bvh structure
580 static int drawPatch = 0;
581 //need some global access to a debugDrawer
582 extern btIDebugDraw* debugDrawerPtr;
583 if (curIndex == drawPatch)
584 {
585 btVector3 aabbMin, aabbMax;
586 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
587 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
588 btVector3 color(1, 0, 0);
589 debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
590 }
591#endif //VISUALLY_ANALYZE_BVH
592
593 //catch bugs in tree data
594 btAssert(walkIterations < subTreeSize);
595
596 walkIterations++;
597 //PCK: unsigned instead of bool
598 // only interested if this is closer than any previous hit
599 btScalar param = 1.0;
600 rayBoxOverlap = 0;
601 boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
602 isLeafNode = rootNode->isLeafNode();
603 if (boxBoxOverlap)
604 {
605 btVector3 bounds[2];
606 bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
607 bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
608 /* Add box cast extents */
609 bounds[0] -= aabbMax;
610 bounds[1] -= aabbMin;
611 btVector3 normal;
612#if 0
613 bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
614 bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
615 if (ra2 != ra)
616 {
617 printf("functions don't match\n");
618 }
619#endif
620#ifdef RAYAABB2
624
625 //BT_PROFILE("btRayAabb2");
626 rayBoxOverlap = btRayAabb2(raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
627
628#else
629 rayBoxOverlap = true; //btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
630#endif
631 }
632
633 if (isLeafNode && rayBoxOverlap)
634 {
635 nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
636 }
637
638 //PCK: unsigned instead of bool
639 if ((rayBoxOverlap != 0) || isLeafNode)
640 {
641 rootNode++;
642 curIndex++;
643 }
644 else
645 {
646 escapeIndex = rootNode->getEscapeIndex();
647 rootNode += escapeIndex;
648 curIndex += escapeIndex;
649 }
650 }
651}
652
653void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
654{
656
657 int curIndex = startNodeIndex;
658 int walkIterations = 0;
659 int subTreeSize = endNodeIndex - startNodeIndex;
660 (void)subTreeSize;
661
662 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
663 int escapeIndex;
664
665 bool isLeafNode;
666 //PCK: unsigned instead of bool
667 unsigned aabbOverlap;
668
669 while (curIndex < endNodeIndex)
670 {
671//#define VISUALLY_ANALYZE_BVH 1
672#ifdef VISUALLY_ANALYZE_BVH
673 //some code snippet to debugDraw aabb, to visually analyze bvh structure
674 static int drawPatch = 0;
675 //need some global access to a debugDrawer
676 extern btIDebugDraw* debugDrawerPtr;
677 if (curIndex == drawPatch)
678 {
679 btVector3 aabbMin, aabbMax;
680 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
681 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
682 btVector3 color(1, 0, 0);
683 debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
684 }
685#endif //VISUALLY_ANALYZE_BVH
686
687 //catch bugs in tree data
688 btAssert(walkIterations < subTreeSize);
689
690 walkIterations++;
691 //PCK: unsigned instead of bool
692 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
693 isLeafNode = rootNode->isLeafNode();
694
695 if (isLeafNode && aabbOverlap)
696 {
697 nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
698 }
699
700 //PCK: unsigned instead of bool
701 if ((aabbOverlap != 0) || isLeafNode)
702 {
703 rootNode++;
704 curIndex++;
705 }
706 else
707 {
708 escapeIndex = rootNode->getEscapeIndex();
709 rootNode += escapeIndex;
710 curIndex += escapeIndex;
711 }
712 }
713}
714
715//This traversal can be called from Playstation 3 SPU
716void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
717{
719
720 int i;
721
722 for (i = 0; i < this->m_SubtreeHeaders.size(); i++)
723 {
724 const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
725
726 //PCK: unsigned instead of bool
727 unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, subtree.m_quantizedAabbMin, subtree.m_quantizedAabbMax);
728 if (overlap != 0)
729 {
730 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax,
731 subtree.m_rootNodeIndex,
732 subtree.m_rootNodeIndex + subtree.m_subtreeSize);
733 }
734 }
735}
736
737void btQuantizedBvh::reportRayOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
738{
739 reportBoxCastOverlappingNodex(nodeCallback, raySource, rayTarget, btVector3(0, 0, 0), btVector3(0, 0, 0));
740}
741
742void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax) const
743{
744 //always use stackless
745
747 {
748 walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
749 }
750 else
751 {
752 walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
753 }
754 /*
755 {
756 //recursive traversal
757 btVector3 qaabbMin = raySource;
758 btVector3 qaabbMax = raySource;
759 qaabbMin.setMin(rayTarget);
760 qaabbMax.setMax(rayTarget);
761 qaabbMin += aabbMin;
762 qaabbMax += aabbMax;
763 reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
764 }
765 */
766}
767
768void btQuantizedBvh::swapLeafNodes(int i, int splitIndex)
769{
771 {
774 m_quantizedLeafNodes[splitIndex] = tmp;
775 }
776 else
777 {
779 m_leafNodes[i] = m_leafNodes[splitIndex];
780 m_leafNodes[splitIndex] = tmp;
781 }
782}
783
784void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
785{
787 {
788 m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
789 }
790 else
791 {
792 m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
793 }
794}
795
796//PCK: include
797#include <new>
798
799#if 0
800//PCK: consts
801static const unsigned BVH_ALIGNMENT = 16;
802static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
803
804static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
805#endif
806
808{
809 // I changed this to 0 since the extra padding is not needed or used.
810 return 0; //BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
811}
812
814{
815 unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
816 baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
818 {
819 return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
820 }
821 return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
822}
823
824bool btQuantizedBvh::serialize(void* o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
825{
828
829 /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
830 {
832 btAssert(0);
833 return false;
834 }
835*/
836
837 btQuantizedBvh* targetBvh = (btQuantizedBvh*)o_alignedDataBuffer;
838
839 // construct the class so the virtual function table, etc will be set up
840 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
841 new (targetBvh) btQuantizedBvh;
842
843 if (i_swapEndian)
844 {
845 targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
846
850
852 targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
853 }
854 else
855 {
856 targetBvh->m_curNodeIndex = m_curNodeIndex;
857 targetBvh->m_bvhAabbMin = m_bvhAabbMin;
858 targetBvh->m_bvhAabbMax = m_bvhAabbMax;
860 targetBvh->m_traversalMode = m_traversalMode;
862 }
863
865
866 unsigned char* nodeData = (unsigned char*)targetBvh;
867 nodeData += sizeof(btQuantizedBvh);
868
869 unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
870 nodeData += sizeToAdd;
871
872 int nodeCount = m_curNodeIndex;
873
875 {
876 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
877
878 if (i_swapEndian)
879 {
880 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
881 {
882 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
883 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
884 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
885
886 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
887 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
888 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
889
890 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
891 }
892 }
893 else
894 {
895 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
896 {
897 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
898 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
899 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
900
901 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
902 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
903 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
904
905 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
906 }
907 }
908 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
909
910 // this clears the pointer in the member variable it doesn't really do anything to the data
911 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
912 // so the memory (which is not freed) is left alone
914 }
915 else
916 {
917 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
918
919 if (i_swapEndian)
920 {
921 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
922 {
923 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
924 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
925
926 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
927 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
928 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
929 }
930 }
931 else
932 {
933 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
934 {
935 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
936 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
937
938 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
939 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
940 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
941 }
942 }
943 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
944
945 // this clears the pointer in the member variable it doesn't really do anything to the data
946 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
947 // so the memory (which is not freed) is left alone
948 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
949 }
950
951 sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
952 nodeData += sizeToAdd;
953
954 // Now serialize the subtree headers
956 if (i_swapEndian)
957 {
958 for (int i = 0; i < m_subtreeHeaderCount; i++)
959 {
960 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
961 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
962 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
963
964 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
965 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
966 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
967
968 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
969 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
970 }
971 }
972 else
973 {
974 for (int i = 0; i < m_subtreeHeaderCount; i++)
975 {
976 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
977 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
978 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
979
980 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
981 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
982 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
983
984 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
985 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
986
987 // need to clear padding in destination buffer
988 targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
989 targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
990 targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
991 }
992 }
993 nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
994
995 // this clears the pointer in the member variable it doesn't really do anything to the data
996 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
997 // so the memory (which is not freed) is left alone
998 targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
999
1000 // this wipes the virtual function table pointer at the start of the buffer for the class
1001 *((void**)o_alignedDataBuffer) = NULL;
1002
1003 return true;
1004}
1005
1006btQuantizedBvh* btQuantizedBvh::deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1007{
1008 if (i_alignedDataBuffer == NULL) // || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1009 {
1010 return NULL;
1011 }
1012 btQuantizedBvh* bvh = (btQuantizedBvh*)i_alignedDataBuffer;
1013
1014 if (i_swapEndian)
1015 {
1016 bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1017
1021
1023 bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1024 }
1025
1026 unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1027 btAssert(calculatedBufSize <= i_dataBufferSize);
1028
1029 if (calculatedBufSize > i_dataBufferSize)
1030 {
1031 return NULL;
1032 }
1033
1034 unsigned char* nodeData = (unsigned char*)bvh;
1035 nodeData += sizeof(btQuantizedBvh);
1036
1037 unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1038 nodeData += sizeToAdd;
1039
1040 int nodeCount = bvh->m_curNodeIndex;
1041
1042 // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1043 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1044 new (bvh) btQuantizedBvh(*bvh, false);
1045
1046 if (bvh->m_useQuantization)
1047 {
1048 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1049
1050 if (i_swapEndian)
1051 {
1052 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1053 {
1054 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1055 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1056 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1057
1058 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1059 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1060 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1061
1062 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1063 }
1064 }
1065 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1066 }
1067 else
1068 {
1069 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1070
1071 if (i_swapEndian)
1072 {
1073 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1074 {
1075 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1076 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1077
1078 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1079 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1080 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1081 }
1082 }
1083 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1084 }
1085
1086 sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1087 nodeData += sizeToAdd;
1088
1089 // Now serialize the subtree headers
1091 if (i_swapEndian)
1092 {
1093 for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1094 {
1095 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1096 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1097 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1098
1099 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1100 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1101 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1102
1103 bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1104 bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1105 }
1106 }
1107
1108 return bvh;
1109}
1110
1111// Constructor that prevents btVector3's default constructor from being called
1112btQuantizedBvh::btQuantizedBvh(btQuantizedBvh& self, bool /* ownsMemory */) : m_bvhAabbMin(self.m_bvhAabbMin),
1113 m_bvhAabbMax(self.m_bvhAabbMax),
1114 m_bvhQuantization(self.m_bvhQuantization),
1115 m_bulletVersion(BT_BULLET_VERSION)
1116{
1117}
1118
1120{
1121 m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1122 m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1124
1125 m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1126 m_useQuantization = quantizedBvhFloatData.m_useQuantization != 0;
1127
1128 {
1129 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1130 m_contiguousNodes.resize(numElem);
1131
1132 if (numElem)
1133 {
1134 btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1135
1136 for (int i = 0; i < numElem; i++, memPtr++)
1137 {
1138 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1139 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1140 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1141 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1142 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1143 }
1144 }
1145 }
1146
1147 {
1148 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1150
1151 if (numElem)
1152 {
1153 btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1154 for (int i = 0; i < numElem; i++, memPtr++)
1155 {
1156 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1157 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1158 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1159 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1160 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1161 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1162 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1163 }
1164 }
1165 }
1166
1167 m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1168
1169 {
1170 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1171 m_SubtreeHeaders.resize(numElem);
1172 if (numElem)
1173 {
1174 btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1175 for (int i = 0; i < numElem; i++, memPtr++)
1176 {
1177 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1178 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1179 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1180 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1181 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1182 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1183 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1184 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1185 }
1186 }
1187 }
1188}
1189
1191{
1192 m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1193 m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1195
1196 m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1197 m_useQuantization = quantizedBvhDoubleData.m_useQuantization != 0;
1198
1199 {
1200 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1201 m_contiguousNodes.resize(numElem);
1202
1203 if (numElem)
1204 {
1205 btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1206
1207 for (int i = 0; i < numElem; i++, memPtr++)
1208 {
1209 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1210 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1211 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1212 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1213 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1214 }
1215 }
1216 }
1217
1218 {
1219 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1221
1222 if (numElem)
1223 {
1224 btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1225 for (int i = 0; i < numElem; i++, memPtr++)
1226 {
1227 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1228 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1229 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1230 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1231 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1232 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1233 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1234 }
1235 }
1236 }
1237
1238 m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1239
1240 {
1241 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1242 m_SubtreeHeaders.resize(numElem);
1243 if (numElem)
1244 {
1245 btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1246 for (int i = 0; i < numElem; i++, memPtr++)
1247 {
1248 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1249 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1250 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1251 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1252 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1253 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1254 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1255 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1256 }
1257 }
1258 }
1259}
1260
1262const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
1263{
1264 btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
1265
1266 m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
1267 m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
1268 m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
1269
1270 quantizedData->m_curNodeIndex = m_curNodeIndex;
1271 quantizedData->m_useQuantization = m_useQuantization;
1272
1273 quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
1274 quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*)(m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
1275 if (quantizedData->m_contiguousNodesPtr)
1276 {
1277 int sz = sizeof(btOptimizedBvhNodeData);
1278 int numElem = m_contiguousNodes.size();
1279 btChunk* chunk = serializer->allocate(sz, numElem);
1281 for (int i = 0; i < numElem; i++, memPtr++)
1282 {
1283 m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
1284 m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
1285 memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
1286 memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
1287 memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
1288 // Fill padding with zeros to appease msan.
1289 memset(memPtr->m_pad, 0, sizeof(memPtr->m_pad));
1290 }
1291 serializer->finalizeChunk(chunk, "btOptimizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_contiguousNodes[0]);
1292 }
1293
1294 quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
1295 // printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
1296 quantizedData->m_quantizedContiguousNodesPtr = (btQuantizedBvhNodeData*)(m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
1297 if (quantizedData->m_quantizedContiguousNodesPtr)
1298 {
1299 int sz = sizeof(btQuantizedBvhNodeData);
1300 int numElem = m_quantizedContiguousNodes.size();
1301 btChunk* chunk = serializer->allocate(sz, numElem);
1303 for (int i = 0; i < numElem; i++, memPtr++)
1304 {
1305 memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
1306 memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
1307 memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
1308 memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
1309 memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
1310 memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
1311 memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
1312 }
1313 serializer->finalizeChunk(chunk, "btQuantizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_quantizedContiguousNodes[0]);
1314 }
1315
1316 quantizedData->m_traversalMode = int(m_traversalMode);
1317 quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
1318
1319 quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*)(m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
1320 if (quantizedData->m_subTreeInfoPtr)
1321 {
1322 int sz = sizeof(btBvhSubtreeInfoData);
1323 int numElem = m_SubtreeHeaders.size();
1324 btChunk* chunk = serializer->allocate(sz, numElem);
1326 for (int i = 0; i < numElem; i++, memPtr++)
1327 {
1328 memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
1329 memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
1330 memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
1331 memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
1332 memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
1333 memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
1334
1335 memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
1336 memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
1337 }
1338 serializer->finalizeChunk(chunk, "btBvhSubtreeInfoData", BT_ARRAY_CODE, (void*)&m_SubtreeHeaders[0]);
1339 }
1341}
bool btRayAabb(const btVector3 &rayFrom, const btVector3 &rayTo, const btVector3 &aabbMin, const btVector3 &aabbMax, btScalar &param, btVector3 &normal)
Definition: btAabbUtil2.h:117
bool TestAabbAgainstAabb2(const btVector3 &aabbMin1, const btVector3 &aabbMax1, const btVector3 &aabbMin2, const btVector3 &aabbMax2)
conservative test for overlap between two aabbs
Definition: btAabbUtil2.h:43
bool btRayAabb2(const btVector3 &rayFrom, const btVector3 &rayInvDirection, const unsigned int raySign[3], const btVector3 bounds[2], btScalar &tmin, btScalar lambda_min, btScalar lambda_max)
Definition: btAabbUtil2.h:82
unsigned testQuantizedAabbAgainstQuantizedAabb(const unsigned short int *aabbMin1, const unsigned short int *aabbMax1, const unsigned short int *aabbMin2, const unsigned short int *aabbMax2)
Definition: btAabbUtil2.h:201
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition: btDbvt.cpp:299
#define btOptimizedBvhNodeData
#define MAX_SUBTREE_SIZE_IN_BYTES
#define btQuantizedBvhDataName
#define btQuantizedBvhData
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
Definition: btScalar.h:314
#define BT_LARGE_FLOAT
Definition: btScalar.h:316
#define SIMD_INFINITY
Definition: btScalar.h:544
unsigned btSwapEndian(unsigned val)
Definition: btScalar.h:651
#define BT_BULLET_VERSION
Definition: btScalar.h:28
#define btAssert(x)
Definition: btScalar.h:153
#define BT_ARRAY_CODE
Definition: btSerializer.h:118
void btSwapVector3Endian(const btVector3 &sourceVec, btVector3 &destVec)
btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
Definition: btVector3.h:1231
void btUnSwapVector3Endian(btVector3 &vector)
btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
Definition: btVector3.h:1240
int size() const
return the number of elements in the array
void resize(int newsize, const T &fillData=T())
void clear()
clear the array, deallocated memory. Generally it is better to use array.resize(0),...
T & expand(const T &fillValue=T())
void initializeFromBuffer(void *buffer, int size, int capacity)
btBvhSubtreeInfo provides info to gather a subtree of limited size
unsigned short int m_quantizedAabbMax[3]
unsigned short int m_quantizedAabbMin[3]
void setAabbFromQuantizeNode(const btQuantizedBvhNode &quantizedNode)
void * m_oldPtr
Definition: btSerializer.h:52
The btIDebugDraw interface class allows hooking up a debug renderer to visually debug simulations.
Definition: btIDebugDraw.h:27
virtual void drawAabb(const btVector3 &from, const btVector3 &to, const btVector3 &color)
Definition: btIDebugDraw.h:135
virtual void processNode(int subPart, int triangleIndex)=0
The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
void reportAabbOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &aabbMin, const btVector3 &aabbMax) const
‍***************************************** expert/internal use only *************************
void setInternalNodeAabbMax(int nodeIndex, const btVector3 &aabbMax)
NodeArray m_leafNodes
void setQuantizationValues(const btVector3 &bvhAabbMin, const btVector3 &bvhAabbMax, btScalar quantizationMargin=btScalar(1.0))
‍***************************************** expert/internal use only *************************
void swapLeafNodes(int firstIndex, int secondIndex)
unsigned calculateSerializeBufferSize() const
void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode *currentNode, btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax) const
use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
btVector3 m_bvhAabbMax
void updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
void buildTree(int startIndex, int endIndex)
QuantizedNodeArray m_quantizedLeafNodes
btTraversalMode m_traversalMode
void quantize(unsigned short *out, const btVector3 &point, int isMax) const
btVector3 m_bvhQuantization
btVector3 m_bvhAabbMin
void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
static btQuantizedBvh * deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
@ TRAVERSAL_STACKLESS_CACHE_FRIENDLY
virtual void deSerializeFloat(struct btQuantizedBvhFloatData &quantizedBvhFloatData)
void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax, int startNodeIndex, int endNodeIndex) const
BvhSubtreeInfoArray m_SubtreeHeaders
NodeArray m_contiguousNodes
virtual ~btQuantizedBvh()
static unsigned int getAlignmentSerializationPadding()
void reportRayOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget) const
void reportBoxCastOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax) const
void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax) const
tree traversal designed for small-memory processors like PS3 SPU
void walkStacklessTree(btNodeOverlapCallback *nodeCallback, const btVector3 &aabbMin, const btVector3 &aabbMax) const
void walkStacklessTreeAgainstRay(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax, int startNodeIndex, int endNodeIndex) const
void setInternalNodeAabbMin(int nodeIndex, const btVector3 &aabbMin)
two versions, one for quantized and normal nodes.
void mergeInternalNodeAabb(int nodeIndex, const btVector3 &newAabbMin, const btVector3 &newAabbMax)
void walkStacklessQuantizedTree(btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
void quantizeWithClamp(unsigned short *out, const btVector3 &point2, int isMax) const
void assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
int sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
btVector3 getAabbMax(int nodeIndex) const
btVector3 getAabbMin(int nodeIndex) const
int calcSplittingAxis(int startIndex, int endIndex)
QuantizedNodeArray m_quantizedContiguousNodes
void buildInternal()
buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialize...
btVector3 unQuantize(const unsigned short *vecIn) const
virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const
Data buffer MUST be 16 byte aligned.
virtual void deSerializeDouble(struct btQuantizedBvhDoubleData &quantizedBvhDoubleData)
virtual btChunk * allocate(size_t size, int numElements)=0
virtual void * getUniquePointer(void *oldPtr)=0
virtual void finalizeChunk(btChunk *chunk, const char *structType, int chunkCode, void *oldPtr)=0
btVector3 can be used to represent 3D points and vectors.
Definition: btVector3.h:82
void setMax(const btVector3 &other)
Set each element to the max of the current values and the values of another btVector3.
Definition: btVector3.h:609
btVector3 & safeNormalize()
Definition: btVector3.h:286
btScalar dot(const btVector3 &v) const
Return the dot product.
Definition: btVector3.h:229
void deSerializeFloat(const struct btVector3FloatData &dataIn)
Definition: btVector3.h:1298
void setValue(const btScalar &_x, const btScalar &_y, const btScalar &_z)
Definition: btVector3.h:640
void deSerializeDouble(const struct btVector3DoubleData &dataIn)
Definition: btVector3.h:1311
int maxAxis() const
Return the axis with the largest value Note return values are 0,1,2 for x, y, or z.
Definition: btVector3.h:477
void serialize(struct btVector3Data &dataOut) const
Definition: btVector3.h:1317
void setMin(const btVector3 &other)
Set each element to the min of the current values and the values of another btVector3.
Definition: btVector3.h:626
unsigned short m_quantizedAabbMin[3]
unsigned short m_quantizedAabbMax[3]
btVector3DoubleData m_aabbMaxOrg
btVector3DoubleData m_aabbMinOrg
btVector3FloatData m_aabbMaxOrg
btVector3FloatData m_aabbMinOrg
btOptimizedBvhNode contains both internal and leaf node information.
btBvhSubtreeInfoData * m_subTreeInfoPtr
btVector3DoubleData m_bvhAabbMin
btVector3DoubleData m_bvhAabbMax
btVector3DoubleData m_bvhQuantization
btQuantizedBvhNodeData * m_quantizedContiguousNodesPtr
btOptimizedBvhNodeDoubleData * m_contiguousNodesPtr
btOptimizedBvhNodeFloatData * m_contiguousNodesPtr
btVector3FloatData m_bvhAabbMin
btBvhSubtreeInfoData * m_subTreeInfoPtr
btVector3FloatData m_bvhQuantization
btQuantizedBvhNodeData * m_quantizedContiguousNodesPtr
btVector3FloatData m_bvhAabbMax
unsigned short m_quantizedAabbMax[3]
unsigned short m_quantizedAabbMin[3]
btQuantizedBvhNode is a compressed aabb node, 16 bytes.
unsigned short int m_quantizedAabbMin[3]
int getPartId() const
unsigned short int m_quantizedAabbMax[3]
bool isLeafNode() const
int getEscapeIndex() const
int getTriangleIndex() const