1 // Copyright Contributors to the OpenVDB Project
2 // SPDX-License-Identifier: MPL-2.0
3
4 /// @file tools/GridOperators.h
5 ///
6 /// @brief Apply an operator to an input grid to produce an output grid
7 /// with the same active voxel topology but a potentially different value type.
8
9 #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
10 #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
11
12 #include "openvdb/Grid.h"
13 #include "openvdb/math/Operators.h"
14 #include "openvdb/util/NullInterrupter.h"
15 #include "openvdb/thread/Threading.h"
16 #include "openvdb/tree/LeafManager.h"
17 #include "openvdb/tree/ValueAccessor.h"
18 #include "ValueTransformer.h" // for tools::foreach()
19 #include <openvdb/openvdb.h>
20
21 #include <tbb/parallel_for.h>
22
23 namespace openvdb {
24 OPENVDB_USE_VERSION_NAMESPACE
25 namespace OPENVDB_VERSION_NAME {
26 namespace tools {
27
28 /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
29 /// having the same tree configuration as VectorGridType but a scalar value type, T,
30 /// where T is the type of the original vector components.
31 /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
32 template<typename VectorGridType> struct VectorToScalarConverter {
33 typedef typename VectorGridType::ValueType::value_type VecComponentValueT;
34 typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
35 };
36
37 /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
38 /// having the same tree configuration as ScalarGridType but value type Vec3<T>
39 /// where T is ScalarGridType::ValueType.
40 /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
41 template<typename ScalarGridType> struct ScalarToVectorConverter {
42 typedef math::Vec3<typename ScalarGridType::ValueType> VectorValueT;
43 typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
44 };
45
46
47 /// @brief Compute the Closest-Point Transform (CPT) from a distance field.
48 /// @return a new vector-valued grid with the same numerical precision as the input grid
49 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
50 /// @details When a mask grid is specified, the solution is calculated only in
51 /// the intersection of the mask active topology and the input active topology
52 /// independent of the transforms associated with either grid.
53 template<typename GridType, typename InterruptT>
54 typename ScalarToVectorConverter<GridType>::Type::Ptr
55 cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
56
57 template<typename GridType, typename MaskT, typename InterruptT>
58 typename ScalarToVectorConverter<GridType>::Type::Ptr
59 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
60
61 template<typename GridType>
62 typename ScalarToVectorConverter<GridType>::Type::Ptr
63 cpt(const GridType& grid, bool threaded = true)
64 {
65 return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
66 }
67
68 template<typename GridType, typename MaskT>
69 typename ScalarToVectorConverter<GridType>::Type::Ptr
70 cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
71 {
72 return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
73 }
74
75
76 /// @brief Compute the curl of the given vector-valued grid.
77 /// @return a new vector-valued grid
78 /// @details When a mask grid is specified, the solution is calculated only in
79 /// the intersection of the mask active topology and the input active topology
80 /// independent of the transforms associated with either grid.
81 template<typename GridType, typename InterruptT>
82 typename GridType::Ptr
83 curl(const GridType& grid, bool threaded, InterruptT* interrupt);
84
85 template<typename GridType, typename MaskT, typename InterruptT>
86 typename GridType::Ptr
87 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
88
89 template<typename GridType>
90 typename GridType::Ptr
91 curl(const GridType& grid, bool threaded = true)
92 {
93 return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
94 }
95
96 template<typename GridType, typename MaskT>
97 typename GridType::Ptr
98 curl(const GridType& grid, const MaskT& mask, bool threaded = true)
99 {
100 return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
101 }
102
103
104 /// @brief Compute the divergence of the given vector-valued grid.
105 /// @return a new scalar-valued grid with the same numerical precision as the input grid
106 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
107 /// @details When a mask grid is specified, the solution is calculated only in
108 /// the intersection of the mask active topology and the input active topology
109 /// independent of the transforms associated with either grid.
110 template<typename GridType, typename InterruptT>
111 typename VectorToScalarConverter<GridType>::Type::Ptr
112 divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
113
114 template<typename GridType, typename MaskT, typename InterruptT>
115 typename VectorToScalarConverter<GridType>::Type::Ptr
116 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
117
118 template<typename GridType>
119 typename VectorToScalarConverter<GridType>::Type::Ptr
120 divergence(const GridType& grid, bool threaded = true)
121 {
122 return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
123 }
124
125 template<typename GridType, typename MaskT>
126 typename VectorToScalarConverter<GridType>::Type::Ptr
127 divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
128 {
129 return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
130 }
131
132
133 /// @brief Compute the gradient of the given scalar grid.
134 /// @return a new vector-valued grid with the same numerical precision as the input grid
135 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
136 /// @details When a mask grid is specified, the solution is calculated only in
137 /// the intersection of the mask active topology and the input active topology
138 /// independent of the transforms associated with either grid.
139 template<typename GridType, typename InterruptT>
140 typename ScalarToVectorConverter<GridType>::Type::Ptr
141 gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
142
143 template<typename GridType, typename MaskT, typename InterruptT>
144 typename ScalarToVectorConverter<GridType>::Type::Ptr
145 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
146
147 template<typename GridType>
148 typename ScalarToVectorConverter<GridType>::Type::Ptr
149 gradient(const GridType& grid, bool threaded = true)
150 {
151 return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
152 }
153
154 template<typename GridType, typename MaskT>
155 typename ScalarToVectorConverter<GridType>::Type::Ptr
156 gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
157 {
158 return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
159 }
160
161
162 /// @brief Compute the Laplacian of the given scalar grid.
163 /// @return a new scalar grid
164 /// @details When a mask grid is specified, the solution is calculated only in
165 /// the intersection of the mask active topology and the input active topology
166 /// independent of the transforms associated with either grid.
167 template<typename GridType, typename InterruptT>
168 typename GridType::Ptr
169 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
170
171 template<typename GridType, typename MaskT, typename InterruptT>
172 typename GridType::Ptr
173 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
174
175 template<typename GridType>
176 typename GridType::Ptr
177 laplacian(const GridType& grid, bool threaded = true)
178 {
179 return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
180 }
181
182 template<typename GridType, typename MaskT>
183 typename GridType::Ptr
184 laplacian(const GridType& grid, const MaskT& mask, bool threaded = true)
185 {
186 return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
187 }
188
189
190 /// @brief Compute the mean curvature of the given grid.
191 /// @return a new grid
192 /// @details When a mask grid is specified, the solution is calculated only in
193 /// the intersection of the mask active topology and the input active topology
194 /// independent of the transforms associated with either grid.
195 template<typename GridType, typename InterruptT>
196 typename GridType::Ptr
197 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
198
199 template<typename GridType, typename MaskT, typename InterruptT>
200 typename GridType::Ptr
201 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
202
203 template<typename GridType>
204 typename GridType::Ptr
205 meanCurvature(const GridType& grid, bool threaded = true)
206 {
207 return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
208 }
209
210 template<typename GridType, typename MaskT>
211 typename GridType::Ptr
212 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
213 {
214 return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
215 }
216
217
218 /// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
219 /// @return a new scalar-valued grid with the same numerical precision as the input grid
220 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
221 /// @details When a mask grid is specified, the solution is calculated only in
222 /// the intersection of the mask active topology and the input active topology
223 /// independent of the transforms associated with either grid.
224 template<typename GridType, typename InterruptT>
225 typename VectorToScalarConverter<GridType>::Type::Ptr
226 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
227
228 template<typename GridType, typename MaskT, typename InterruptT>
229 typename VectorToScalarConverter<GridType>::Type::Ptr
230 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
231
232 template<typename GridType>
233 typename VectorToScalarConverter<GridType>::Type::Ptr
234 magnitude(const GridType& grid, bool threaded = true)
235 {
236 return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
237 }
238
239 template<typename GridType, typename MaskT>
240 typename VectorToScalarConverter<GridType>::Type::Ptr
241 magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
242 {
243 return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
244 }
245
246
247 /// @brief Normalize the vectors of the given vector-valued grid.
248 /// @return a new vector-valued grid
249 /// @details When a mask grid is specified, the solution is calculated only in
250 /// the intersection of the mask active topology and the input active topology
251 /// independent of the transforms associated with either grid.
252 template<typename GridType, typename InterruptT>
253 typename GridType::Ptr
254 normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
255
256 template<typename GridType, typename MaskT, typename InterruptT>
257 typename GridType::Ptr
258 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
259
260 template<typename GridType>
261 typename GridType::Ptr
262 normalize(const GridType& grid, bool threaded = true)
263 {
264 return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
265 }
266
267 template<typename GridType, typename MaskT>
268 typename GridType::Ptr
269 normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
270 {
271 return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
272 }
273
274
275 ////////////////////////////////////////
276
277
278 namespace gridop {
279
280 /// @brief ToMaskGrid<T>::Type is the type of a grid having the same
281 /// tree hierarchy as grid type T but a value equal to its active state.
282 /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
283 template<typename GridType>
284 struct ToMaskGrid {
285 typedef Grid<typename GridType::TreeType::template ValueConverter<ValueMask>::Type> Type;
286 };
287
288
289 /// @brief Apply an operator to an input grid to produce an output grid
290 /// with the same active voxel topology but a potentially different value type.
291 /// @details To facilitate inlining, this class is also templated on a Map type.
292 ///
293 /// @note This is a helper class and should never be used directly.
294 template<
295 typename InGridT,
296 typename MaskGridType,
297 typename OutGridT,
298 typename MapT,
299 typename OperatorT,
300 typename InterruptT = util::NullInterrupter>
301 class GridOperator
302 {
303 public:
304 typedef typename OutGridT::TreeType OutTreeT;
305 typedef typename OutTreeT::LeafNodeType OutLeafT;
306 typedef typename tree::LeafManager<OutTreeT> LeafManagerT;
307
308 GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
309 InterruptT* interrupt = nullptr, bool densify = true)
310 : mAcc(grid.getConstAccessor())
311 , mMap(map)
312 , mInterrupt(interrupt)
313 , mMask(mask)
314 , mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait
315 {
316 }
317 GridOperator(const GridOperator&) = default;
318 GridOperator& operator=(const GridOperator&) = default;
319 virtual ~GridOperator() = default;
320
321 typename OutGridT::Ptr process(bool threaded = true)
322 {
323 if (mInterrupt) mInterrupt->start("Processing grid");
324
325 // Derive background value of the output grid
326 typename InGridT::TreeType tmp(mAcc.tree().background());
327 typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
328
329 // The output tree is topology copy, optionally densified, of the input tree.
330 // (Densification is necessary for some operators because applying the operator to
331 // a constant tile produces distinct output values, particularly along tile borders.)
332 /// @todo Can tiles be handled correctly without densification, or by densifying
333 /// only to the width of the operator stencil?
334 typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
335 if (mDensify) tree->voxelizeActiveTiles();
336
337 // create grid with output tree and unit transform
338 typename OutGridT::Ptr result(new OutGridT(tree));
339
340 // Modify the solution area if a mask was supplied.
341 if (mMask) {
342 result->topologyIntersection(*mMask);
343 }
344
345 // transform of output grid = transform of input grid
346 result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
347
348 LeafManagerT leafManager(*tree);
349
350 if (threaded) {
351 tbb::parallel_for(leafManager.leafRange(), *this);
352 } else {
353 (*this)(leafManager.leafRange());
354 }
355
356 // If the tree wasn't densified, it might have active tiles that need to be processed.
357 if (!mDensify) {
358 using TileIter = typename OutTreeT::ValueOnIter;
359
360 TileIter tileIter = tree->beginValueOn();
361 tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels)
362
363 AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value
364 auto tileOp = [this, inAcc](const TileIter& it) {
365 // Apply the operator to the input grid's tile value at the iterator's
366 // current coordinates, and set the output tile's value to the result.
367 it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord()));
368 };
369
370 // Apply the operator to tile values, optionally in parallel.
371 // (But don't share the functor; each thread needs its own accessor.)
372 tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false);
373 }
374
375 if (mDensify) tree->prune();
376
377 if (mInterrupt) mInterrupt->end();
378 return result;
379 }
380
381 /// @brief Iterate sequentially over LeafNodes and voxels in the output
382 /// grid and apply the operator using a value accessor for the input grid.
383 ///
384 /// @note Never call this public method directly - it is called by
385 /// TBB threads only!
operator()386 void operator()(const typename LeafManagerT::LeafRange& range) const
387 {
388 if (util::wasInterrupted(mInterrupt)) {
389 thread::cancelGroupExecution();
390 }
391
392 for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
393 for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
394 value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
395 }
396 }
397 }
398
399 protected:
400 typedef typename InGridT::ConstAccessor AccessorT;
401 mutable AccessorT mAcc;
402 const MapT& mMap;
403 InterruptT* mInterrupt;
404 const MaskGridType* mMask;
405 const bool mDensify;
406 }; // end of GridOperator class
407
408 } // namespace gridop
409
410
411 ////////////////////////////////////////
412
413
414 /// @brief Compute the closest-point transform of a scalar grid.
415 template<
416 typename InGridT,
417 typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
418 typename InterruptT = util::NullInterrupter>
419 class Cpt
420 {
421 public:
422 typedef InGridT InGridType;
423 typedef typename ScalarToVectorConverter<InGridT>::Type OutGridType;
424
425 Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)426 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
427 {
428 }
429
430 Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)431 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
432 {
433 }
434
435 typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
436 {
437 Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
438 processTypedMap(mInputGrid.transform(), functor);
439 if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
440 return functor.mOutputGrid;
441 }
442
443 private:
444 struct IsOpT
445 {
446 template<typename MapT, typename AccT>
447 static typename OutGridType::ValueType
resultIsOpT448 result(const MapT& map, const AccT& acc, const Coord& xyz)
449 {
450 return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
451 }
452 };
453 struct WsOpT
454 {
455 template<typename MapT, typename AccT>
456 static typename OutGridType::ValueType
resultWsOpT457 result(const MapT& map, const AccT& acc, const Coord& xyz)
458 {
459 return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
460 }
461 };
462 struct Functor
463 {
FunctorFunctor464 Functor(const InGridType& grid, const MaskGridType* mask,
465 bool threaded, bool worldspace, InterruptT* interrupt)
466 : mThreaded(threaded)
467 , mWorldSpace(worldspace)
468 , mInputGrid(grid)
469 , mInterrupt(interrupt)
470 , mMask(mask)
471 {}
472
473 template<typename MapT>
operatorFunctor474 void operator()(const MapT& map)
475 {
476 if (mWorldSpace) {
477 gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, WsOpT, InterruptT>
478 op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
479 mOutputGrid = op.process(mThreaded); // cache the result
480 } else {
481 gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, IsOpT, InterruptT>
482 op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
483 mOutputGrid = op.process(mThreaded); // cache the result
484 }
485 }
486 const bool mThreaded;
487 const bool mWorldSpace;
488 const InGridType& mInputGrid;
489 typename OutGridType::Ptr mOutputGrid;
490 InterruptT* mInterrupt;
491 const MaskGridType* mMask;
492 };
493 const InGridType& mInputGrid;
494 InterruptT* mInterrupt;
495 const MaskGridType* mMask;
496 }; // end of Cpt class
497
498
499 ////////////////////////////////////////
500
501
502 /// @brief Compute the curl of a vector grid.
503 template<
504 typename GridT,
505 typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
506 typename InterruptT = util::NullInterrupter>
507 class Curl
508 {
509 public:
510 typedef GridT InGridType;
511 typedef GridT OutGridType;
512
513 Curl(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)514 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
515 {
516 }
517
518 Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)519 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
520 {
521 }
522
523 typename GridT::Ptr process(bool threaded = true)
524 {
525 Functor functor(mInputGrid, mMask, threaded, mInterrupt);
526 processTypedMap(mInputGrid.transform(), functor);
527 if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
528 return functor.mOutputGrid;
529 }
530
531 private:
532 struct Functor
533 {
FunctorFunctor534 Functor(const GridT& grid, const MaskGridType* mask,
535 bool threaded, InterruptT* interrupt):
536 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
537
538 template<typename MapT>
operatorFunctor539 void operator()(const MapT& map)
540 {
541 typedef math::Curl<MapT, math::CD_2ND> OpT;
542 gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
543 op(mInputGrid, mMask, map, mInterrupt);
544 mOutputGrid = op.process(mThreaded); // cache the result
545 }
546
547 const bool mThreaded;
548 const GridT& mInputGrid;
549 typename GridT::Ptr mOutputGrid;
550 InterruptT* mInterrupt;
551 const MaskGridType* mMask;
552 }; // Private Functor
553
554 const GridT& mInputGrid;
555 InterruptT* mInterrupt;
556 const MaskGridType* mMask;
557 }; // end of Curl class
558
559
560 ////////////////////////////////////////
561
562
563 /// @brief Compute the divergence of a vector grid.
564 template<
565 typename InGridT,
566 typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
567 typename InterruptT = util::NullInterrupter>
568 class Divergence
569 {
570 public:
571 typedef InGridT InGridType;
572 typedef typename VectorToScalarConverter<InGridT>::Type OutGridType;
573
574 Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)575 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
576 {
577 }
578
579 Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)580 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
581 {
582 }
583
584 typename OutGridType::Ptr process(bool threaded = true)
585 {
586 if (mInputGrid.getGridClass() == GRID_STAGGERED) {
587 Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
588 processTypedMap(mInputGrid.transform(), functor);
589 return functor.mOutputGrid;
590 } else {
591 Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
592 processTypedMap(mInputGrid.transform(), functor);
593 return functor.mOutputGrid;
594 }
595 }
596
597 protected:
598 template<math::DScheme DiffScheme>
599 struct Functor
600 {
FunctorFunctor601 Functor(const InGridT& grid, const MaskGridType* mask,
602 bool threaded, InterruptT* interrupt):
603 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
604
605 template<typename MapT>
operatorFunctor606 void operator()(const MapT& map)
607 {
608 typedef math::Divergence<MapT, DiffScheme> OpT;
609 gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT>
610 op(mInputGrid, mMask, map, mInterrupt);
611 mOutputGrid = op.process(mThreaded); // cache the result
612 }
613
614 const bool mThreaded;
615 const InGridType& mInputGrid;
616 typename OutGridType::Ptr mOutputGrid;
617 InterruptT* mInterrupt;
618 const MaskGridType* mMask;
619 }; // Private Functor
620
621 const InGridType& mInputGrid;
622 InterruptT* mInterrupt;
623 const MaskGridType* mMask;
624 }; // end of Divergence class
625
626
627 ////////////////////////////////////////
628
629
630 /// @brief Compute the gradient of a scalar grid.
631 template<
632 typename InGridT,
633 typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
634 typename InterruptT = util::NullInterrupter>
635 class Gradient
636 {
637 public:
638 typedef InGridT InGridType;
639 typedef typename ScalarToVectorConverter<InGridT>::Type OutGridType;
640
641 Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)642 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
643 {
644 }
645
646 Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)647 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
648 {
649 }
650
651 typename OutGridType::Ptr process(bool threaded = true)
652 {
653 Functor functor(mInputGrid, mMask, threaded, mInterrupt);
654 processTypedMap(mInputGrid.transform(), functor);
655 if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
656 return functor.mOutputGrid;
657 }
658
659 protected:
660 struct Functor
661 {
FunctorFunctor662 Functor(const InGridT& grid, const MaskGridType* mask,
663 bool threaded, InterruptT* interrupt):
664 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
665
666 template<typename MapT>
operatorFunctor667 void operator()(const MapT& map)
668 {
669 typedef math::Gradient<MapT, math::CD_2ND> OpT;
670 gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT>
671 op(mInputGrid, mMask, map, mInterrupt);
672 mOutputGrid = op.process(mThreaded); // cache the result
673 }
674
675 const bool mThreaded;
676 const InGridT& mInputGrid;
677 typename OutGridType::Ptr mOutputGrid;
678 InterruptT* mInterrupt;
679 const MaskGridType* mMask;
680 }; // Private Functor
681
682 const InGridT& mInputGrid;
683 InterruptT* mInterrupt;
684 const MaskGridType* mMask;
685 }; // end of Gradient class
686
687
688 ////////////////////////////////////////
689
690
691 template<
692 typename GridT,
693 typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
694 typename InterruptT = util::NullInterrupter>
695 class Laplacian
696 {
697 public:
698 typedef GridT InGridType;
699 typedef GridT OutGridType;
700
701 Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)702 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
703 {
704 }
705
706 Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)707 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
708 {
709 }
710
711 typename GridT::Ptr process(bool threaded = true)
712 {
713 Functor functor(mInputGrid, mMask, threaded, mInterrupt);
714 processTypedMap(mInputGrid.transform(), functor);
715 if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
716 return functor.mOutputGrid;
717 }
718
719 protected:
720 struct Functor
721 {
FunctorFunctor722 Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
723 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
724
725 template<typename MapT>
operatorFunctor726 void operator()(const MapT& map)
727 {
728 typedef math::Laplacian<MapT, math::CD_SECOND> OpT;
729 gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
730 op(mInputGrid, mMask, map, mInterrupt);
731 mOutputGrid = op.process(mThreaded); // cache the result
732 }
733
734 const bool mThreaded;
735 const GridT& mInputGrid;
736 typename GridT::Ptr mOutputGrid;
737 InterruptT* mInterrupt;
738 const MaskGridType* mMask;
739 }; // Private Functor
740
741 const GridT& mInputGrid;
742 InterruptT* mInterrupt;
743 const MaskGridType* mMask;
744 }; // end of Laplacian class
745
746
747 ////////////////////////////////////////
748
749
750 template<
751 typename GridT,
752 typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
753 typename InterruptT = util::NullInterrupter>
754 class MeanCurvature
755 {
756 public:
757 typedef GridT InGridType;
758 typedef GridT OutGridType;
759
760 MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)761 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
762 {
763 }
764
765 MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)766 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
767 {
768 }
769
770 typename GridT::Ptr process(bool threaded = true)
771 {
772 Functor functor(mInputGrid, mMask, threaded, mInterrupt);
773 processTypedMap(mInputGrid.transform(), functor);
774 if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
775 return functor.mOutputGrid;
776 }
777
778 protected:
779 struct Functor
780 {
FunctorFunctor781 Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
782 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
783
784 template<typename MapT>
operatorFunctor785 void operator()(const MapT& map)
786 {
787 typedef math::MeanCurvature<MapT, math::CD_SECOND, math::CD_2ND> OpT;
788 gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
789 op(mInputGrid, mMask, map, mInterrupt);
790 mOutputGrid = op.process(mThreaded); // cache the result
791 }
792
793 const bool mThreaded;
794 const GridT& mInputGrid;
795 typename GridT::Ptr mOutputGrid;
796 InterruptT* mInterrupt;
797 const MaskGridType* mMask;
798 }; // Private Functor
799
800 const GridT& mInputGrid;
801 InterruptT* mInterrupt;
802 const MaskGridType* mMask;
803 }; // end of MeanCurvature class
804
805
806 ////////////////////////////////////////
807
808
809 template<
810 typename InGridT,
811 typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
812 typename InterruptT = util::NullInterrupter>
813 class Magnitude
814 {
815 public:
816 typedef InGridT InGridType;
817 typedef typename VectorToScalarConverter<InGridT>::Type OutGridType;
818
819 Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)820 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
821 {
822 }
823
824 Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)825 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
826 {
827 }
828
829 typename OutGridType::Ptr process(bool threaded = true)
830 {
831 Functor functor(mInputGrid, mMask, threaded, mInterrupt);
832 processTypedMap(mInputGrid.transform(), functor);
833 return functor.mOutputGrid;
834 }
835
836 protected:
837 struct OpT
838 {
839 template<typename MapT, typename AccT>
840 static typename OutGridType::ValueType
resultOpT841 result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
842 };
843 struct Functor
844 {
FunctorFunctor845 Functor(const InGridT& grid, const MaskGridType* mask,
846 bool threaded, InterruptT* interrupt):
847 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
848
849 template<typename MapT>
operatorFunctor850 void operator()(const MapT& map)
851 {
852 gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT>
853 op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
854 mOutputGrid = op.process(mThreaded); // cache the result
855 }
856
857 const bool mThreaded;
858 const InGridType& mInputGrid;
859 typename OutGridType::Ptr mOutputGrid;
860 InterruptT* mInterrupt;
861 const MaskGridType* mMask;
862 }; // Private Functor
863
864 const InGridType& mInputGrid;
865 InterruptT* mInterrupt;
866 const MaskGridType* mMask;
867 }; // end of Magnitude class
868
869
870 ////////////////////////////////////////
871
872
873 template<
874 typename GridT,
875 typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
876 typename InterruptT = util::NullInterrupter>
877 class Normalize
878 {
879 public:
880 typedef GridT InGridType;
881 typedef GridT OutGridType;
882
883 Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
mInputGrid(grid)884 mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
885 {
886 }
887
888 Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
mInputGrid(grid)889 mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
890 {
891 }
892
893 typename GridT::Ptr process(bool threaded = true)
894 {
895 Functor functor(mInputGrid, mMask, threaded, mInterrupt);
896 processTypedMap(mInputGrid.transform(), functor);
897 if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
898 const VecType vecType = mInputGrid.getVectorType();
899 if (vecType == VEC_COVARIANT) {
900 outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
901 } else {
902 outGrid->setVectorType(vecType);
903 }
904 }
905 return functor.mOutputGrid;
906 }
907
908 protected:
909 struct OpT
910 {
911 template<typename MapT, typename AccT>
912 static typename OutGridType::ValueType
resultOpT913 result(const MapT&, const AccT& acc, const Coord& xyz)
914 {
915 typename OutGridType::ValueType vec = acc.getValue(xyz);
916 if ( !vec.normalize() ) vec.setZero();
917 return vec;
918 }
919 };
920 struct Functor
921 {
FunctorFunctor922 Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
923 mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
924
925 template<typename MapT>
operatorFunctor926 void operator()(const MapT& map)
927 {
928 gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT>
929 op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
930 mOutputGrid = op.process(mThreaded); // cache the result
931 }
932
933 const bool mThreaded;
934 const GridT& mInputGrid;
935 typename GridT::Ptr mOutputGrid;
936 InterruptT* mInterrupt;
937 const MaskGridType* mMask;
938 }; // Private Functor
939
940 const GridT& mInputGrid;
941 InterruptT* mInterrupt;
942 const MaskGridType* mMask;
943 }; // end of Normalize class
944
945
946 ////////////////////////////////////////
947
948
949 template<typename GridType, typename InterruptT>
950 typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType & grid,bool threaded,InterruptT * interrupt)951 cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
952 {
953 Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
954 return op.process(threaded);
955 }
956
957 template<typename GridType, typename MaskT, typename InterruptT>
958 typename ScalarToVectorConverter<GridType>::Type::Ptr
cpt(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)959 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
960 {
961 Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
962 return op.process(threaded);
963 }
964
965 template<typename GridType, typename InterruptT>
966 typename GridType::Ptr
curl(const GridType & grid,bool threaded,InterruptT * interrupt)967 curl(const GridType& grid, bool threaded, InterruptT* interrupt)
968 {
969 Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
970 return op.process(threaded);
971 }
972
973 template<typename GridType, typename MaskT, typename InterruptT>
974 typename GridType::Ptr
curl(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)975 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
976 {
977 Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
978 return op.process(threaded);
979 }
980
981 template<typename GridType, typename InterruptT>
982 typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType & grid,bool threaded,InterruptT * interrupt)983 divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
984 {
985 Divergence<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
986 op(grid, interrupt);
987 return op.process(threaded);
988 }
989
990 template<typename GridType, typename MaskT, typename InterruptT>
991 typename VectorToScalarConverter<GridType>::Type::Ptr
divergence(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)992 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
993 {
994 Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
995 return op.process(threaded);
996 }
997
998 template<typename GridType, typename InterruptT>
999 typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType & grid,bool threaded,InterruptT * interrupt)1000 gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
1001 {
1002 Gradient<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
1003 op(grid, interrupt);
1004 return op.process(threaded);
1005 }
1006
1007 template<typename GridType, typename MaskT, typename InterruptT>
1008 typename ScalarToVectorConverter<GridType>::Type::Ptr
gradient(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)1009 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1010 {
1011 Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1012 return op.process(threaded);
1013 }
1014
1015 template<typename GridType, typename InterruptT>
1016 typename GridType::Ptr
laplacian(const GridType & grid,bool threaded,InterruptT * interrupt)1017 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
1018 {
1019 Laplacian<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
1020 op(grid, interrupt);
1021 return op.process(threaded);
1022 }
1023
1024 template<typename GridType, typename MaskT, typename InterruptT>
1025 typename GridType::Ptr
laplacian(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)1026 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1027 {
1028 Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1029 return op.process(threaded);
1030 }
1031
1032 template<typename GridType, typename InterruptT>
1033 typename GridType::Ptr
meanCurvature(const GridType & grid,bool threaded,InterruptT * interrupt)1034 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
1035 {
1036 MeanCurvature<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
1037 op(grid, interrupt);
1038 return op.process(threaded);
1039 }
1040
1041 template<typename GridType, typename MaskT, typename InterruptT>
1042 typename GridType::Ptr
meanCurvature(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)1043 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1044 {
1045 MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1046 return op.process(threaded);
1047 }
1048
1049 template<typename GridType, typename InterruptT>
1050 typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType & grid,bool threaded,InterruptT * interrupt)1051 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
1052 {
1053 Magnitude<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
1054 op(grid, interrupt);
1055 return op.process(threaded);
1056 }
1057
1058 template<typename GridType, typename MaskT, typename InterruptT>
1059 typename VectorToScalarConverter<GridType>::Type::Ptr
magnitude(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)1060 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1061 {
1062 Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1063 return op.process(threaded);
1064 }
1065
1066 template<typename GridType, typename InterruptT>
1067 typename GridType::Ptr
normalize(const GridType & grid,bool threaded,InterruptT * interrupt)1068 normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
1069 {
1070 Normalize<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT>
1071 op(grid, interrupt);
1072 return op.process(threaded);
1073 }
1074
1075 template<typename GridType, typename MaskT, typename InterruptT>
1076 typename GridType::Ptr
normalize(const GridType & grid,const MaskT & mask,bool threaded,InterruptT * interrupt)1077 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1078 {
1079 Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1080 return op.process(threaded);
1081 }
1082
1083 ////////////////////////////////////////
1084
1085
1086 // Explicit Template Instantiation
1087
1088 #ifdef OPENVDB_USE_EXPLICIT_INSTANTIATION
1089
1090 #ifdef OPENVDB_INSTANTIATE_GRIDOPERATORS
1091 #include <openvdb/util/ExplicitInstantiation.h>
1092 #endif
1093
1094 #define _FUNCTION(TreeT) \
1095 ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr cpt(const Grid<TreeT>&, bool, util::NullInterrupter*)
1096 OPENVDB_REAL_TREE_INSTANTIATE(_FUNCTION)
1097 #undef _FUNCTION
1098
1099 #define _FUNCTION(TreeT) \
1100 ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr cpt(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1101 OPENVDB_REAL_TREE_INSTANTIATE(_FUNCTION)
1102 #undef _FUNCTION
1103
1104 #define _FUNCTION(TreeT) \
1105 Grid<TreeT>::Ptr curl(const Grid<TreeT>&, bool, util::NullInterrupter*)
1106 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1107 #undef _FUNCTION
1108
1109 #define _FUNCTION(TreeT) \
1110 Grid<TreeT>::Ptr curl(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1111 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1112 #undef _FUNCTION
1113
1114 #define _FUNCTION(TreeT) \
1115 VectorToScalarConverter<Grid<TreeT>>::Type::Ptr divergence(const Grid<TreeT>&, bool, util::NullInterrupter*)
1116 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1117 #undef _FUNCTION
1118
1119 #define _FUNCTION(TreeT) \
1120 VectorToScalarConverter<Grid<TreeT>>::Type::Ptr divergence(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1121 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1122 #undef _FUNCTION
1123
1124 #define _FUNCTION(TreeT) \
1125 ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr gradient(const Grid<TreeT>&, bool, util::NullInterrupter*)
1126 OPENVDB_REAL_TREE_INSTANTIATE(_FUNCTION)
1127 #undef _FUNCTION
1128
1129 #define _FUNCTION(TreeT) \
1130 ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr gradient(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1131 OPENVDB_REAL_TREE_INSTANTIATE(_FUNCTION)
1132 #undef _FUNCTION
1133
1134 #define _FUNCTION(TreeT) \
1135 Grid<TreeT>::Ptr laplacian(const Grid<TreeT>&, bool, util::NullInterrupter*)
1136 OPENVDB_NUMERIC_TREE_INSTANTIATE(_FUNCTION)
1137 #undef _FUNCTION
1138
1139 #define _FUNCTION(TreeT) \
1140 Grid<TreeT>::Ptr laplacian(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1141 OPENVDB_NUMERIC_TREE_INSTANTIATE(_FUNCTION)
1142 #undef _FUNCTION
1143
1144 #define _FUNCTION(TreeT) \
1145 Grid<TreeT>::Ptr meanCurvature(const Grid<TreeT>&, bool, util::NullInterrupter*)
1146 OPENVDB_NUMERIC_TREE_INSTANTIATE(_FUNCTION)
1147 #undef _FUNCTION
1148
1149 #define _FUNCTION(TreeT) \
1150 Grid<TreeT>::Ptr meanCurvature(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1151 OPENVDB_NUMERIC_TREE_INSTANTIATE(_FUNCTION)
1152 #undef _FUNCTION
1153
1154 #define _FUNCTION(TreeT) \
1155 VectorToScalarConverter<Grid<TreeT>>::Type::Ptr magnitude(const Grid<TreeT>&, bool, util::NullInterrupter*)
1156 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1157 #undef _FUNCTION
1158
1159 #define _FUNCTION(TreeT) \
1160 VectorToScalarConverter<Grid<TreeT>>::Type::Ptr magnitude(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1161 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1162 #undef _FUNCTION
1163
1164 #define _FUNCTION(TreeT) \
1165 Grid<TreeT>::Ptr normalize(const Grid<TreeT>&, bool, util::NullInterrupter*)
1166 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1167 #undef _FUNCTION
1168
1169 #define _FUNCTION(TreeT) \
1170 Grid<TreeT>::Ptr normalize(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1171 OPENVDB_VEC3_TREE_INSTANTIATE(_FUNCTION)
1172 #undef _FUNCTION
1173
1174 #endif // OPENVDB_USE_EXPLICIT_INSTANTIATION
1175
1176
1177 } // namespace tools
1178 } // namespace OPENVDB_VERSION_NAME
1179 } // namespace openvdb
1180
1181 #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
1182