1 //
2 // Copyright 2016 Pixar
3 //
4 // Licensed under the Apache License, Version 2.0 (the "Apache License")
5 // with the following modification; you may not use this file except in
6 // compliance with the Apache License and the following modification to it:
7 // Section 6. Trademarks. is deleted and replaced with:
8 //
9 // 6. Trademarks. This License does not grant permission to use the trade
10 // names, trademarks, service marks, or product names of the Licensor
11 // and its affiliates, except as required to comply with Section 4(c) of
12 // the License and to reproduce the content of the NOTICE file.
13 //
14 // You may obtain a copy of the Apache License at
15 //
16 // http://www.apache.org/licenses/LICENSE-2.0
17 //
18 // Unless required by applicable law or agreed to in writing, software
19 // distributed under the Apache License with the above modification is
20 // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 // KIND, either express or implied. See the Apache License for the specific
22 // language governing permissions and limitations under the Apache License.
23 //
24
25 #include "pxr/pxr.h"
26 #include "pxr/usd/pcp/primIndex_Graph.h"
27 #include "pxr/usd/pcp/arc.h"
28 #include "pxr/usd/pcp/diagnostic.h"
29 #include "pxr/usd/pcp/errors.h"
30 #include "pxr/usd/pcp/node_Iterator.h"
31 #include "pxr/usd/pcp/strengthOrdering.h"
32 #include "pxr/usd/pcp/types.h"
33
34 #include "pxr/base/trace/trace.h"
35 #include "pxr/base/tf/mallocTag.h"
36
37 PXR_NAMESPACE_OPEN_SCOPE
38
39 const size_t PcpPrimIndex_Graph::_Node::_invalidNodeIndex;
40
41 ////////////////////////////////////////////////////////////
42
43 struct PcpPrimIndex_Graph::_ArcStrengthOrder {
_ArcStrengthOrderPcpPrimIndex_Graph::_ArcStrengthOrder44 _ArcStrengthOrder(PcpPrimIndex_Graph* graph) : _graph(graph) { }
45
operator ()PcpPrimIndex_Graph::_ArcStrengthOrder46 bool operator()(size_t aIdx, size_t bIdx) const
47 {
48 const PcpNodeRef a(_graph, aIdx);
49 const PcpNodeRef b(_graph, bIdx);
50
51 const int result = PcpCompareSiblingNodeStrength(a, b);
52 if (!TF_VERIFY(result != 0,
53 "Redundant nodes in prim index for <%s>",
54 _graph->GetRootNode().GetPath().GetString().c_str())) {
55
56 // This should never happen. It means we have multiple nodes
57 // with the same strength information.
58 //
59 // If this fails, one reason might be that we're processing
60 // the same node multiple times, adding redundant arcs.
61 // Such arcs will have identical strength, causing us to
62 // get into here. PCP_DIAGNOSTIC_VALIDATION provides
63 // a way to detect this.
64 #ifdef PCP_DIAGNOSTIC_VALIDATION
65 printf("\n------------------\n");
66 printf("\nEntire graph was:\n");
67 PcpDump(a.GetRootNode());
68 PcpDumpDotGraph(a.GetRootNode(), "test.dot", true, true);
69 printf("\nNode A:\n");
70 PcpDump(a, /* recurse */ false);
71 printf("\nNode B:\n");
72 PcpDump(b, /* recurse */ false);
73 #endif // PCP_DIAGNOSTIC_VALIDATION
74
75 return a < b;
76 }
77
78 return result == -1;
79 }
80
81 private:
82 PcpPrimIndex_Graph* _graph;
83 };
84
85 ////////////////////////////////////////////////////////////
86
87 void
SetArc(const PcpArc & arc)88 PcpPrimIndex_Graph::_Node::SetArc(const PcpArc& arc)
89 {
90 TF_VERIFY(static_cast<size_t>(arc.siblingNumAtOrigin) <= ((1lu << _childrenSize) - 1));
91 TF_VERIFY(static_cast<size_t>(arc.namespaceDepth) <= ((1lu << _depthSize) - 1));
92 // Add one because -1 is specifically allowed to mean invalid.
93 TF_VERIFY(arc.parent._GetNodeIndex() + 1 <= _invalidNodeIndex);
94 TF_VERIFY(arc.origin._GetNodeIndex() + 1 <= _invalidNodeIndex);
95
96 smallInts.arcType = arc.type;
97 smallInts.arcSiblingNumAtOrigin = arc.siblingNumAtOrigin;
98 smallInts.arcNamespaceDepth = arc.namespaceDepth;
99 smallInts.arcParentIndex = arc.parent._GetNodeIndex();
100 smallInts.arcOriginIndex = arc.origin._GetNodeIndex();
101
102 if (arc.parent) {
103 mapToParent = arc.mapToParent;
104 mapToRoot = arc.parent.GetMapToRoot().Compose(mapToParent);
105 } else {
106 mapToParent = mapToRoot = PcpMapExpression::Identity();
107 }
108 }
109
110 PcpPrimIndex_GraphRefPtr
New(const PcpLayerStackSite & rootSite,bool usd)111 PcpPrimIndex_Graph::New(const PcpLayerStackSite& rootSite, bool usd)
112 {
113 TfAutoMallocTag2 tag("Pcp", "PcpPrimIndex_Graph");
114
115 return TfCreateRefPtr(new PcpPrimIndex_Graph(rootSite, usd));
116 }
117
118 PcpPrimIndex_GraphRefPtr
New(const PcpPrimIndex_GraphPtr & copy)119 PcpPrimIndex_Graph::New(const PcpPrimIndex_GraphPtr& copy)
120 {
121 TfAutoMallocTag2 tag("Pcp", "PcpPrimIndex_Graph");
122
123 TRACE_FUNCTION();
124
125 return TfCreateRefPtr(new PcpPrimIndex_Graph(*get_pointer(copy)));
126 }
127
PcpPrimIndex_Graph(const PcpLayerStackSite & rootSite,bool usd)128 PcpPrimIndex_Graph::PcpPrimIndex_Graph(const PcpLayerStackSite& rootSite,
129 bool usd)
130 : _data(new _SharedData(usd))
131 {
132 PcpArc rootArc;
133 rootArc.type = PcpArcTypeRoot;
134 rootArc.namespaceDepth = 0;
135 rootArc.mapToParent = PcpMapExpression::Identity();
136 _CreateNode(rootSite, rootArc);
137 }
138
PcpPrimIndex_Graph(const PcpPrimIndex_Graph & rhs)139 PcpPrimIndex_Graph::PcpPrimIndex_Graph(const PcpPrimIndex_Graph& rhs)
140 : _data(rhs._data)
141 , _nodeSitePaths(rhs._nodeSitePaths)
142 , _nodeHasSpecs(rhs._nodeHasSpecs)
143 {
144 // There are no internal references to rhs in the nodes that we've
145 // copied, so we don't need to do anything here.
146 }
147
148 bool
IsUsd() const149 PcpPrimIndex_Graph::IsUsd() const
150 {
151 return _data->usd;
152 }
153
154 void
SetHasPayloads(bool hasPayloads)155 PcpPrimIndex_Graph::SetHasPayloads(bool hasPayloads)
156 {
157 if (_data->hasPayloads != hasPayloads) {
158 _DetachSharedNodePool();
159 _data->hasPayloads = hasPayloads;
160 }
161 }
162
163 bool
HasPayloads() const164 PcpPrimIndex_Graph::HasPayloads() const
165 {
166 return _data->hasPayloads;
167 }
168
169 void
SetIsInstanceable(bool instanceable)170 PcpPrimIndex_Graph::SetIsInstanceable(bool instanceable)
171 {
172 if (_data->instanceable != instanceable) {
173 _DetachSharedNodePool();
174 _data->instanceable = instanceable;
175 }
176 }
177
178 bool
IsInstanceable() const179 PcpPrimIndex_Graph::IsInstanceable() const
180 {
181 return _data->instanceable;
182 }
183
184 PcpNodeRef
GetRootNode() const185 PcpPrimIndex_Graph::GetRootNode() const
186 {
187 return PcpNodeRef(const_cast<PcpPrimIndex_Graph*>(this), 0);
188 }
189
190 PcpNodeRef
GetNodeUsingSite(const PcpLayerStackSite & site) const191 PcpPrimIndex_Graph::GetNodeUsingSite(const PcpLayerStackSite& site) const
192 {
193 TRACE_FUNCTION();
194
195 for (size_t i = 0, numNodes = _data->nodes.size(); i != numNodes; ++i) {
196 const _Node& node = _data->nodes[i];
197 if (!(node.smallInts.inert || node.smallInts.culled)
198 && node.layerStack == site.layerStack
199 && _nodeSitePaths[i] == site.path) {
200 return PcpNodeRef(const_cast<PcpPrimIndex_Graph*>(this), i);
201 }
202 }
203
204 return PcpNodeRef();
205 }
206
207 template <class Predicate>
208 std::pair<size_t, size_t>
_FindRootChildRange(const Predicate & pred) const209 PcpPrimIndex_Graph::_FindRootChildRange(
210 const Predicate& pred) const
211 {
212 const _Node& rootNode = _GetNode(0);
213 for (size_t startIdx = rootNode.smallInts.firstChildIndex;
214 startIdx != _Node::_invalidNodeIndex;
215 startIdx = _GetNode(startIdx).smallInts.nextSiblingIndex) {
216
217 if (!pred(PcpArcType(_GetNode(startIdx).smallInts.arcType))) {
218 continue;
219 }
220
221 size_t endIdx = _GetNumNodes();
222 for (size_t childIdx =_GetNode(startIdx).smallInts.nextSiblingIndex;
223 childIdx != _Node::_invalidNodeIndex;
224 childIdx = _GetNode(childIdx).smallInts.nextSiblingIndex) {
225
226 if (!pred(PcpArcType(_GetNode(childIdx).smallInts.arcType))) {
227 endIdx = childIdx;
228 break;
229 }
230 }
231
232 return std::make_pair(startIdx, endIdx);
233 }
234
235 return std::make_pair(_GetNumNodes(), _GetNumNodes());
236 }
237
238 static PcpArcType
_GetArcTypeForRangeType(const PcpRangeType rangeType)239 _GetArcTypeForRangeType(const PcpRangeType rangeType)
240 {
241 switch (rangeType) {
242 case PcpRangeTypeRoot:
243 return PcpArcTypeRoot;
244 case PcpRangeTypeInherit:
245 return PcpArcTypeInherit;
246 case PcpRangeTypeVariant:
247 return PcpArcTypeVariant;
248 case PcpRangeTypeReference:
249 return PcpArcTypeReference;
250 case PcpRangeTypePayload:
251 return PcpArcTypePayload;
252 case PcpRangeTypeSpecialize:
253 return PcpArcTypeSpecialize;
254
255 default:
256 TF_CODING_ERROR("Unhandled range type");
257 return PcpArcTypeRoot;
258 }
259 }
260
261 std::pair<size_t, size_t>
GetNodeIndexesForRange(PcpRangeType rangeType) const262 PcpPrimIndex_Graph::GetNodeIndexesForRange(PcpRangeType rangeType) const
263 {
264 // This function essentially returns indexes that point into
265 // this graph's node pool. That pool will not necessarily be sorted
266 // in strength order unless this graph has been finalized. So, verify
267 // that that's the case.
268 TF_VERIFY(_data->finalized);
269
270 std::pair<size_t, size_t> nodeRange(_GetNumNodes(), _GetNumNodes());
271
272 switch (rangeType) {
273 case PcpRangeTypeInvalid:
274 TF_CODING_ERROR("Invalid range type specified");
275 break;
276
277 case PcpRangeTypeAll:
278 nodeRange = std::make_pair(0, _GetNumNodes());
279 break;
280 case PcpRangeTypeWeakerThanRoot:
281 nodeRange = std::make_pair(1, _GetNumNodes());
282 break;
283 case PcpRangeTypeStrongerThanPayload:
284 nodeRange = _FindRootChildRange(
285 [](PcpArcType arcType) { return arcType == PcpArcTypePayload; });
286 nodeRange = std::make_pair(0, nodeRange.first);
287 break;
288
289 case PcpRangeTypeRoot:
290 nodeRange = std::make_pair(0, 1);
291 break;
292 default:
293 nodeRange = _FindRootChildRange(
294 [rangeType](PcpArcType arcType) {
295 return arcType == _GetArcTypeForRangeType(rangeType);
296 });
297 break;
298 };
299
300 return nodeRange;
301 }
302
303 void
Finalize()304 PcpPrimIndex_Graph::Finalize()
305 {
306 TRACE_FUNCTION();
307
308 if (_data->finalized) {
309 return;
310 }
311
312 // We assume that the node pool being finalized is not being shared.
313 // We'd have problems if the pool was being shared with other graphs at
314 // this point because we wouldn't be able to fix up the _nodeSitePaths
315 // member in those other graphs. That data is aligned with the node pool,
316 // but is *not* shared.
317 TF_VERIFY(_data.unique());
318
319 // We want to store the nodes in the node pool in strong-to-weak order.
320 // In particular, this allows strength-order iteration over the nodes in
321 // the graph to be a simple traversal of the pool. So, we compute the
322 // strength ordering of our nodes and reorder the pool if needed.
323 std::vector<size_t> nodeIndexToStrengthOrder;
324 const bool nodeOrderMatchesStrengthOrder =
325 _ComputeStrengthOrderIndexMapping(&nodeIndexToStrengthOrder);
326 if (!nodeOrderMatchesStrengthOrder) {
327 _ApplyNodeIndexMapping(nodeIndexToStrengthOrder);
328 }
329
330 // There may be nodes in the pool that have been marked for culling that
331 // can be erased from the node pool. Compute and apply the necessary
332 // transformation.
333 std::vector<size_t> culledNodeMapping;
334 const bool hasNodesToCull =
335 _ComputeEraseCulledNodeIndexMapping(&culledNodeMapping);
336 if (hasNodesToCull) {
337 _ApplyNodeIndexMapping(culledNodeMapping);
338 }
339
340 _data->finalized = true;
341 }
342
343 // Several helper macros to make it easier to access indexes for other
344 // nodes.
345 #define PARENT(node) node.smallInts.arcParentIndex
346 #define ORIGIN(node) node.smallInts.arcOriginIndex
347 #define FIRST_CHILD(node) node.smallInts.firstChildIndex
348 #define LAST_CHILD(node) node.smallInts.lastChildIndex
349 #define NEXT_SIBLING(node) node.smallInts.nextSiblingIndex
350 #define PREV_SIBLING(node) node.smallInts.prevSiblingIndex
351
352 void
_ApplyNodeIndexMapping(const std::vector<size_t> & nodeIndexMap)353 PcpPrimIndex_Graph::_ApplyNodeIndexMapping(
354 const std::vector<size_t>& nodeIndexMap)
355 {
356 _NodePool& oldNodes = _data->nodes;
357 SdfPathVector& oldSitePaths = _nodeSitePaths;
358 std::vector<bool>& oldHasSpecs = _nodeHasSpecs;
359 TF_VERIFY(oldNodes.size() == oldSitePaths.size() &&
360 oldNodes.size() == oldHasSpecs.size());
361 TF_VERIFY(nodeIndexMap.size() == oldNodes.size());
362
363 const size_t numNodesToErase =
364 std::count(nodeIndexMap.begin(), nodeIndexMap.end(),
365 _Node::_invalidNodeIndex);
366
367 const size_t oldNumNodes = oldNodes.size();
368 const size_t newNumNodes = oldNumNodes - numNodesToErase;
369 TF_VERIFY(newNumNodes <= oldNumNodes);
370
371 struct _ConvertOldToNewIndex {
372 _ConvertOldToNewIndex(const std::vector<size_t>& table,
373 size_t numNewNodes) : _table(table)
374 {
375 for (size_t i = 0, n = _table.size(); i != n; ++i) {
376 TF_VERIFY(_table[i] < numNewNodes ||
377 _table[i] == _Node::_invalidNodeIndex);
378 }
379 }
380
381 size_t operator()(size_t oldIndex) const
382 {
383 if (oldIndex != _Node::_invalidNodeIndex) {
384 return _table[oldIndex];
385 }
386 else {
387 return oldIndex;
388 }
389 }
390 const std::vector<size_t>& _table;
391
392 };
393
394 const _ConvertOldToNewIndex convertToNewIndex(nodeIndexMap, newNumNodes);
395
396 // If this mapping causes nodes to be erased, it's much more convenient
397 // to fix up node indices to accommodate those erasures in the old node
398 // pool before moving nodes to their new position.
399 if (numNodesToErase > 0) {
400 for (size_t i = 0; i < oldNumNodes; ++i) {
401 const size_t oldNodeIndex = i;
402 const size_t newNodeIndex = convertToNewIndex(oldNodeIndex);
403
404 _Node& node = _data->nodes[oldNodeIndex];
405
406 // Sanity-check: If this node isn't going to be erased, its parent
407 // can't be erased either.
408 const bool nodeWillBeErased =
409 (newNodeIndex == _Node::_invalidNodeIndex);
410 if (!nodeWillBeErased) {
411 const bool parentWillBeErased =
412 PARENT(node) != _Node::_invalidNodeIndex &&
413 convertToNewIndex(PARENT(node)) == _Node::_invalidNodeIndex;
414 TF_VERIFY(!parentWillBeErased);
415 continue;
416 }
417
418 if (PREV_SIBLING(node) != _Node::_invalidNodeIndex) {
419 _Node& prevNode = _data->nodes[PREV_SIBLING(node)];
420 NEXT_SIBLING(prevNode) = NEXT_SIBLING(node);
421 }
422 if (NEXT_SIBLING(node) != _Node::_invalidNodeIndex) {
423 _Node& nextNode = _data->nodes[NEXT_SIBLING(node)];
424 PREV_SIBLING(nextNode) = PREV_SIBLING(node);
425 }
426
427 _Node& parentNode = _data->nodes[PARENT(node)];
428 if (FIRST_CHILD(parentNode) == oldNodeIndex) {
429 FIRST_CHILD(parentNode) = NEXT_SIBLING(node);
430 }
431 if (LAST_CHILD(parentNode) == oldNodeIndex) {
432 LAST_CHILD(parentNode) = PREV_SIBLING(node);
433 }
434 }
435 }
436
437 // Swap nodes into their new position.
438 _NodePool nodesAfterMapping(newNumNodes);
439 SdfPathVector nodeSitePathsAfterMapping(newNumNodes);
440 std::vector<bool> nodeHasSpecsAfterMapping(newNumNodes);
441
442 for (size_t i = 0; i < oldNumNodes; ++i) {
443 const size_t oldNodeIndex = i;
444 const size_t newNodeIndex = convertToNewIndex(oldNodeIndex);
445 if (newNodeIndex == _Node::_invalidNodeIndex) {
446 continue;
447 }
448
449 // Swap the node from the old node pool into the new node pool at
450 // the desired location.
451 _Node& oldNode = oldNodes[oldNodeIndex];
452 _Node& newNode = nodesAfterMapping[newNodeIndex];
453 newNode.Swap(oldNode);
454
455 PARENT(newNode) = convertToNewIndex(PARENT(newNode));
456 ORIGIN(newNode) = convertToNewIndex(ORIGIN(newNode));
457 FIRST_CHILD(newNode) = convertToNewIndex(FIRST_CHILD(newNode));
458 LAST_CHILD(newNode) = convertToNewIndex(LAST_CHILD(newNode));
459 PREV_SIBLING(newNode) = convertToNewIndex(PREV_SIBLING(newNode));
460 NEXT_SIBLING(newNode) = convertToNewIndex(NEXT_SIBLING(newNode));
461
462 // Copy the corresponding node site path.
463 nodeSitePathsAfterMapping[newNodeIndex] = oldSitePaths[oldNodeIndex];
464 nodeHasSpecsAfterMapping[newNodeIndex] = oldHasSpecs[oldNodeIndex];
465 }
466
467 _data->nodes.swap(nodesAfterMapping);
468 _nodeSitePaths.swap(nodeSitePathsAfterMapping);
469 _nodeHasSpecs.swap(nodeHasSpecsAfterMapping);
470 }
471
472 bool
IsFinalized() const473 PcpPrimIndex_Graph::IsFinalized() const
474 {
475 return _data->finalized;
476 }
477
478 void
AppendChildNameToAllSites(const SdfPath & childPath)479 PcpPrimIndex_Graph::AppendChildNameToAllSites(const SdfPath& childPath)
480 {
481 const SdfPath &parentPath = childPath.GetParentPath();
482 TF_FOR_ALL(it, _nodeSitePaths) {
483 if (*it == parentPath) {
484 *it = childPath;
485 } else {
486 *it = it->AppendChild(childPath.GetNameToken());
487 }
488 }
489
490 // Note that appending a child name doesn't require finalization
491 // of the graph because doing so doesn't affect the strength ordering of
492 // nodes.
493 }
494
495 PcpNodeRef
InsertChildNode(const PcpNodeRef & parent,const PcpLayerStackSite & site,const PcpArc & arc,PcpErrorBasePtr * error)496 PcpPrimIndex_Graph::InsertChildNode(
497 const PcpNodeRef& parent,
498 const PcpLayerStackSite& site, const PcpArc& arc,
499 PcpErrorBasePtr *error)
500 {
501 TfAutoMallocTag2 tag("Pcp", "PcpPrimIndex_Graph");
502
503 TF_VERIFY(arc.type != PcpArcTypeRoot);
504 TF_VERIFY(arc.parent == parent);
505
506 // Node capacity is limited by both NodeIndexBits and reservation
507 // of the _invalidNodeIndex value. Other fields are limited by
508 // the number of bits allocated to represent them.
509 if (_GetNumNodes() >= _Node::_invalidNodeIndex) {
510 if (error) {
511 *error = PcpErrorCapacityExceeded::New(
512 PcpErrorType_IndexCapacityExceeded);
513 }
514 return PcpNodeRef();
515 }
516 if (arc.siblingNumAtOrigin >= 1<<_Node::_childrenSize) {
517 if (error) {
518 *error = PcpErrorCapacityExceeded::New(
519 PcpErrorType_ArcCapacityExceeded);
520 }
521 return PcpNodeRef();
522 }
523 if (arc.namespaceDepth >= (1<<_Node::_depthSize)) {
524 if (error) {
525 *error = PcpErrorCapacityExceeded::New(
526 PcpErrorType_ArcNamespaceDepthCapacityExceeded);
527 }
528 return PcpNodeRef();
529 }
530
531 _DetachSharedNodePool();
532
533 const size_t parentNodeIdx = parent._GetNodeIndex();
534 const size_t childNodeIdx = _CreateNode(site, arc);
535
536 return _InsertChildInStrengthOrder(parentNodeIdx, childNodeIdx);
537 }
538
539 PcpNodeRef
InsertChildSubgraph(const PcpNodeRef & parent,const PcpPrimIndex_GraphPtr & subgraph,const PcpArc & arc,PcpErrorBasePtr * error)540 PcpPrimIndex_Graph::InsertChildSubgraph(
541 const PcpNodeRef& parent,
542 const PcpPrimIndex_GraphPtr& subgraph, const PcpArc& arc,
543 PcpErrorBasePtr *error)
544 {
545 TfAutoMallocTag2 tag("Pcp", "PcpPrimIndex_Graph");
546
547 TF_VERIFY(arc.type != PcpArcTypeRoot);
548 TF_VERIFY(arc.parent == parent);
549
550 // Node capacity is limited by NodeIndexBits and reservation
551 // of _invalidNodeIndex.
552 // Other capacity-limited fields were validated when
553 // the nodes were added to the subgraph.
554 if (_GetNumNodes() + subgraph->_GetNumNodes() >= _Node::_invalidNodeIndex) {
555 if (error) {
556 *error = PcpErrorCapacityExceeded::New(
557 PcpErrorType_IndexCapacityExceeded);
558 }
559 return PcpNodeRef();
560 }
561
562 _DetachSharedNodePool();
563
564 const size_t parentNodeIdx = parent._GetNodeIndex();
565 const size_t childNodeIdx =
566 _CreateNodesForSubgraph(*get_pointer(subgraph), arc);
567
568 return _InsertChildInStrengthOrder(parentNodeIdx, childNodeIdx);
569 }
570
571 PcpNodeRef
_InsertChildInStrengthOrder(size_t parentNodeIdx,size_t childNodeIdx)572 PcpPrimIndex_Graph::_InsertChildInStrengthOrder(
573 size_t parentNodeIdx, size_t childNodeIdx)
574 {
575 TF_VERIFY(parentNodeIdx < _GetNumNodes());
576 TF_VERIFY(childNodeIdx < _GetNumNodes());
577
578 // Insert the child in the list of children, maintaining
579 // the relative strength order.
580 _Node& parentNode = _data->nodes[parentNodeIdx];
581 _Node& childNode = _data->nodes[childNodeIdx];
582 _ArcStrengthOrder comp(this);
583 if (FIRST_CHILD(parentNode) == _Node::_invalidNodeIndex) {
584 // No children yet so this is the first child.
585 TF_VERIFY(LAST_CHILD(parentNode) == _Node::_invalidNodeIndex);
586
587 FIRST_CHILD(parentNode) =
588 LAST_CHILD(parentNode) = childNodeIdx;
589 }
590 else if (comp(childNodeIdx, FIRST_CHILD(parentNode))) {
591 // New first child.
592 TF_VERIFY(LAST_CHILD(parentNode) != _Node::_invalidNodeIndex);
593
594 _Node& nextNode = _data->nodes[FIRST_CHILD(parentNode)];
595 NEXT_SIBLING(childNode) = FIRST_CHILD(parentNode);
596 PREV_SIBLING(nextNode) = childNodeIdx;
597 FIRST_CHILD(parentNode) = childNodeIdx;
598 }
599 else if (!comp(childNodeIdx, LAST_CHILD(parentNode))) {
600 // New last child.
601 _Node& prevNode = _data->nodes[LAST_CHILD(parentNode)];
602 PREV_SIBLING(childNode) = LAST_CHILD(parentNode);
603 NEXT_SIBLING(prevNode) = childNodeIdx;
604 LAST_CHILD(parentNode) = childNodeIdx;
605 }
606 else {
607 // Child goes somewhere internal to the sibling linked list.
608 for (size_t index = FIRST_CHILD(parentNode);
609 index != _Node::_invalidNodeIndex;
610 index = NEXT_SIBLING(_data->nodes[index])) {
611 if (comp(childNodeIdx, index)) {
612 _Node& nextNode = _data->nodes[index];
613 TF_VERIFY(PREV_SIBLING(nextNode) != _Node::_invalidNodeIndex);
614 _Node& prevNode =_data->nodes[PREV_SIBLING(nextNode)];
615 PREV_SIBLING(childNode) = PREV_SIBLING(nextNode);
616 NEXT_SIBLING(childNode) = index;
617 PREV_SIBLING(nextNode) = childNodeIdx;
618 NEXT_SIBLING(prevNode) = childNodeIdx;
619 break;
620 }
621 }
622 }
623
624 return PcpNodeRef(this, childNodeIdx);
625 }
626
627 void
_DetachSharedNodePool()628 PcpPrimIndex_Graph::_DetachSharedNodePool()
629 {
630 if (!_data.unique()) {
631 TRACE_FUNCTION();
632 _data.reset(new _SharedData(*_data));
633
634 // XXX: This probably causes more finalization than necessary. Only
635 // need to finalize if (a) nodes are added (b) nodes are culled.
636 _data->finalized = false;
637 }
638 }
639
640 size_t
_CreateNode(const PcpLayerStackSite & site,const PcpArc & arc)641 PcpPrimIndex_Graph::_CreateNode(
642 const PcpLayerStackSite& site, const PcpArc& arc)
643 {
644 _nodeSitePaths.push_back(site.path);
645 _nodeHasSpecs.push_back(false);
646 _data->nodes.push_back(_Node());
647 _data->finalized = false;
648
649 _Node& node = _data->nodes.back();
650 node.layerStack = site.layerStack;
651 node.SetArc(arc);
652
653 return _data->nodes.size() - 1;
654 }
655
656 size_t
_CreateNodesForSubgraph(const PcpPrimIndex_Graph & subgraph,const PcpArc & arc)657 PcpPrimIndex_Graph::_CreateNodesForSubgraph(
658 const PcpPrimIndex_Graph& subgraph, const PcpArc& arc)
659 {
660 // The subgraph's root should never have a parent or origin node; we
661 // rely on this invariant below.
662 TF_VERIFY(!subgraph.GetRootNode().GetParentNode() &&
663 !subgraph.GetRootNode().GetOriginNode());
664
665 // Insert a copy of all of the node data in the given subgraph into our
666 // node pool.
667 const size_t oldNumNodes = _GetNumNodes();
668 _data->finalized = false;
669 _data->nodes.insert(
670 _data->nodes.end(),
671 subgraph._data->nodes.begin(), subgraph._data->nodes.end());
672 _nodeSitePaths.insert(
673 _nodeSitePaths.end(),
674 subgraph._nodeSitePaths.begin(), subgraph._nodeSitePaths.end());
675 _nodeHasSpecs.insert(
676 _nodeHasSpecs.end(),
677 subgraph._nodeHasSpecs.begin(), subgraph._nodeHasSpecs.end());
678 const size_t newNumNodes = _GetNumNodes();
679 const size_t subgraphRootNodeIndex = oldNumNodes;
680
681 // Set the arc connecting the root of the subgraph to the rest of the
682 // graph.
683 _Node& subgraphRoot = _data->nodes[subgraphRootNodeIndex];
684 subgraphRoot.SetArc(arc);
685
686 // XXX: This is very similar to code in _ApplyNodeIndexMapping that
687 // adjust node references. There must be a good way to factor
688 // all of that out...
689
690 // Iterate over all of the newly-copied nodes and adjust references to
691 // other nodes in the node pool.
692 struct _ConvertOldToNewIndex {
693 _ConvertOldToNewIndex(size_t base, size_t numNewNodes) :
694 _base(base), _numNewNodes(numNewNodes) { }
695 size_t operator()(size_t oldIndex) const
696 {
697 if (oldIndex != _Node::_invalidNodeIndex) {
698 TF_VERIFY(oldIndex + _base < _numNewNodes);
699 return oldIndex + _base;
700 }
701 else {
702 return oldIndex;
703 }
704 }
705 size_t _base;
706 size_t _numNewNodes;
707 };
708 const _ConvertOldToNewIndex convertToNewIndex(subgraphRootNodeIndex,
709 newNumNodes);
710
711 for (size_t i = oldNumNodes; i < newNumNodes; ++i) {
712 _Node& newNode = _data->nodes[i];
713
714 // Update the node's mapToRoot since it is now part of a new graph.
715 if (i != subgraphRootNodeIndex) {
716 newNode.mapToRoot =
717 subgraphRoot.mapToRoot.Compose(newNode.mapToRoot);
718 }
719
720 // The parent and origin of the root of the newly-inserted subgraph
721 // don't need to be fixed up because it doesn't point to a node
722 // within the subgraph.
723 if (i != subgraphRootNodeIndex) {
724 PARENT(newNode) = convertToNewIndex(PARENT(newNode));
725 ORIGIN(newNode) = convertToNewIndex(ORIGIN(newNode));
726 }
727
728 FIRST_CHILD(newNode) = convertToNewIndex(FIRST_CHILD(newNode));
729 LAST_CHILD(newNode) = convertToNewIndex(LAST_CHILD(newNode));
730 PREV_SIBLING(newNode) = convertToNewIndex(PREV_SIBLING(newNode));
731 NEXT_SIBLING(newNode) = convertToNewIndex(NEXT_SIBLING(newNode));
732 }
733
734 return subgraphRootNodeIndex;
735 }
736
737 PcpPrimIndex_Graph::_Node&
_GetWriteableNode(size_t idx)738 PcpPrimIndex_Graph::_GetWriteableNode(size_t idx)
739 {
740 TF_VERIFY(idx < _GetNumNodes());
741 _DetachSharedNodePool();
742 return _data->nodes[idx];
743 }
744
745 PcpPrimIndex_Graph::_Node&
_GetWriteableNode(const PcpNodeRef & node)746 PcpPrimIndex_Graph::_GetWriteableNode(const PcpNodeRef& node)
747 {
748 const size_t idx = node._GetNodeIndex();
749 TF_VERIFY(idx < _GetNumNodes());
750 _DetachSharedNodePool();
751 return _data->nodes[idx];
752 }
753
754 bool
_ComputeStrengthOrderIndexMapping(std::vector<size_t> * nodeIndexToStrengthOrder) const755 PcpPrimIndex_Graph::_ComputeStrengthOrderIndexMapping(
756 std::vector<size_t>* nodeIndexToStrengthOrder) const
757 {
758 TRACE_FUNCTION();
759
760 nodeIndexToStrengthOrder->resize(_GetNumNodes());
761
762 const size_t rootNodeIdx = 0;
763 size_t strengthIdx = 0;
764 return _ComputeStrengthOrderIndexMappingRecursively(
765 rootNodeIdx, &strengthIdx, nodeIndexToStrengthOrder);
766 }
767
768 bool
_ComputeStrengthOrderIndexMappingRecursively(size_t nodeIdx,size_t * strengthIdx,std::vector<size_t> * nodeIndexToStrengthOrder) const769 PcpPrimIndex_Graph::_ComputeStrengthOrderIndexMappingRecursively(
770 size_t nodeIdx,
771 size_t* strengthIdx,
772 std::vector<size_t>* nodeIndexToStrengthOrder) const
773 {
774 bool nodeOrderMatchesStrengthOrder = true;
775
776 (*nodeIndexToStrengthOrder)[nodeIdx] = *strengthIdx;
777 nodeOrderMatchesStrengthOrder &= (nodeIdx == *strengthIdx);
778
779 // Recurse down.
780 const _Node::_SmallInts& smallInts = _GetNode(nodeIdx).smallInts;
781 size_t index = smallInts.firstChildIndex;
782 if (index != _Node::_invalidNodeIndex) {
783 (*strengthIdx)++;
784
785 const bool nodeOrderMatchesStrengthOrderInSubtree =
786 _ComputeStrengthOrderIndexMappingRecursively(
787 index, strengthIdx, nodeIndexToStrengthOrder);
788
789 nodeOrderMatchesStrengthOrder &= nodeOrderMatchesStrengthOrderInSubtree;
790 }
791
792 // Recurse across.
793 index = smallInts.nextSiblingIndex;
794 if (index != _Node::_invalidNodeIndex) {
795 (*strengthIdx)++;
796
797 const bool nodeOrderMatchesStrengthOrderInSubtree =
798 _ComputeStrengthOrderIndexMappingRecursively(
799 index, strengthIdx, nodeIndexToStrengthOrder);
800
801 nodeOrderMatchesStrengthOrder &= nodeOrderMatchesStrengthOrderInSubtree;
802 }
803
804 return nodeOrderMatchesStrengthOrder;
805 }
806
807 bool
_ComputeEraseCulledNodeIndexMapping(std::vector<size_t> * erasedIndexMapping) const808 PcpPrimIndex_Graph::_ComputeEraseCulledNodeIndexMapping(
809 std::vector<size_t>* erasedIndexMapping) const
810 {
811 TRACE_FUNCTION();
812
813 // Figure out which of the nodes that are marked for culling can
814 // actually be erased from the node pool.
815 const size_t numNodes = _GetNumNodes();
816 std::vector<bool> nodeCanBeErased(numNodes);
817 for (size_t i = 0; i < numNodes; ++i) {
818 nodeCanBeErased[i] = _GetNode(i).smallInts.culled;
819 }
820
821 // If a node is marked for culling, but serves as the origin node for a
822 // node that is *not* culled, we can't erase it from the graph. Doing so
823 // would break the chain of origins Pcp relies on for strength ordering.
824 // So, we iterate through the nodes to detect this situation and mark
825 // the appropriate nodes as un-erasable.
826 //
827 // XXX: This has some O(N^2) behavior, as we wind up visiting the nodes
828 // in the chain of origins multiple times. We could keep track of
829 // nodes we've already visited to avoid re-processing them.
830 for (size_t i = 0; i < numNodes; ++i) {
831 if (ORIGIN(_GetNode(i)) == _Node::_invalidNodeIndex) {
832 continue;
833 }
834
835 // Follow origin chain until we find the first non-culled node.
836 // All subsequent nodes in the chain cannot be erased. This also
837 // means that the parents of those nodes cannot be erased.
838 bool subsequentOriginsCannotBeCulled = false;
839 for (size_t nIdx = i; ; nIdx = ORIGIN(_GetNode(nIdx))) {
840 const bool nodeIsCulled = nodeCanBeErased[nIdx];
841 if (!nodeIsCulled) {
842 subsequentOriginsCannotBeCulled = true;
843 }
844 else if (nodeIsCulled && subsequentOriginsCannotBeCulled) {
845 for (size_t pIdx = nIdx;
846 pIdx != _Node::_invalidNodeIndex &&
847 nodeCanBeErased[pIdx];
848 pIdx = PARENT(_GetNode(pIdx))) {
849 nodeCanBeErased[pIdx] = false;
850 }
851 }
852
853 if (ORIGIN(_GetNode(nIdx)) == PARENT(_GetNode(nIdx))) {
854 break;
855 }
856 }
857 }
858
859 // Now that we've determined which nodes can and can't be erased,
860 // create the node index mapping.
861 const size_t numNodesToCull =
862 std::count(nodeCanBeErased.begin(), nodeCanBeErased.end(), true);
863 if (numNodesToCull == 0) {
864 return false;
865 }
866
867 size_t numCulledNodes = 0;
868 erasedIndexMapping->resize(numNodes);
869 for (size_t i = 0; i < numNodes; ++i) {
870 if (nodeCanBeErased[i]) {
871 (*erasedIndexMapping)[i] = _Node::_invalidNodeIndex;
872 ++numCulledNodes;
873 }
874 else {
875 (*erasedIndexMapping)[i] = i - numCulledNodes;
876 }
877 }
878
879 return true;
880 }
881
882 PXR_NAMESPACE_CLOSE_SCOPE
883