1 //
2 // Copyright 2016 Pixar
3 //
4 // Licensed under the Apache License, Version 2.0 (the "Apache License")
5 // with the following modification; you may not use this file except in
6 // compliance with the Apache License and the following modification to it:
7 // Section 6. Trademarks. is deleted and replaced with:
8 //
9 // 6. Trademarks. This License does not grant permission to use the trade
10 // names, trademarks, service marks, or product names of the Licensor
11 // and its affiliates, except as required to comply with Section 4(c) of
12 // the License and to reproduce the content of the NOTICE file.
13 //
14 // You may obtain a copy of the Apache License at
15 //
16 // http://www.apache.org/licenses/LICENSE-2.0
17 //
18 // Unless required by applicable law or agreed to in writing, software
19 // distributed under the Apache License with the above modification is
20 // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 // KIND, either express or implied. See the Apache License for the specific
22 // language governing permissions and limitations under the Apache License.
23 //
24
25 #include "pxr/pxr.h"
26 #include "pxr/usd/pcp/cache.h"
27 #include "pxr/usd/pcp/arc.h"
28 #include "pxr/usd/pcp/changes.h"
29 #include "pxr/usd/pcp/diagnostic.h"
30 #include "pxr/usd/pcp/debugCodes.h"
31 #include "pxr/usd/pcp/dependencies.h"
32 #include "pxr/usd/pcp/layerStack.h"
33 #include "pxr/usd/pcp/layerStackIdentifier.h"
34 #include "pxr/usd/pcp/layerStackRegistry.h"
35 #include "pxr/usd/pcp/node_Iterator.h"
36 #include "pxr/usd/pcp/pathTranslation.h"
37 #include "pxr/usd/pcp/primIndex.h"
38 #include "pxr/usd/pcp/propertyIndex.h"
39 #include "pxr/usd/pcp/statistics.h"
40 #include "pxr/usd/pcp/targetIndex.h"
41
42 #include "pxr/usd/ar/resolver.h"
43 #include "pxr/usd/ar/resolverScopedCache.h"
44 #include "pxr/usd/ar/resolverContextBinder.h"
45 #include "pxr/usd/sdf/layer.h"
46 #include "pxr/usd/sdf/schema.h"
47 #include "pxr/base/trace/trace.h"
48 #include "pxr/base/work/dispatcher.h"
49 #include "pxr/base/work/loops.h"
50 #include "pxr/base/work/utils.h"
51 #include "pxr/base/work/withScopedParallelism.h"
52 #include "pxr/base/tf/enum.h"
53 #include "pxr/base/tf/envSetting.h"
54 #include "pxr/base/tf/registryManager.h"
55
56 #include <tbb/atomic.h>
57 #include <tbb/concurrent_queue.h>
58 #include <tbb/concurrent_vector.h>
59 #include <tbb/spin_rw_mutex.h>
60
61 #include <algorithm>
62 #include <iostream>
63 #include <utility>
64 #include <vector>
65
66 using std::make_pair;
67 using std::pair;
68 using std::vector;
69
70 PXR_NAMESPACE_OPEN_SCOPE
71
72 TF_DEFINE_ENV_SETTING(
73 PCP_CULLING, true,
74 "Controls whether culling is enabled in Pcp caches.");
75
76 // Helper for applying changes immediately if the client hasn't asked that
77 // they only be collected instead.
78 class Pcp_CacheChangesHelper {
79 public:
80 // Construct. If \p changes is \c NULL then collect changes into an
81 // internal object and apply them to \p cache when this object is
82 // destroyed.
Pcp_CacheChangesHelper(PcpChanges * changes)83 Pcp_CacheChangesHelper(PcpChanges* changes) :
84 _changes(changes)
85 {
86 // Do nothing
87 }
88
~Pcp_CacheChangesHelper()89 ~Pcp_CacheChangesHelper()
90 {
91 // Apply changes now immediately if _changes is NULL.
92 if (!_changes) {
93 _immediateChanges.Apply();
94 }
95 }
96
97 // Act like a pointer to the c'tor PcpChanges or, if that's NULL, the
98 // internal changes.
operator ->()99 PcpChanges* operator->()
100 {
101 return _changes ? _changes : &_immediateChanges;
102 }
103
104 private:
105 PcpChanges* _changes;
106 PcpChanges _immediateChanges;
107 };
108
PcpCache(const PcpLayerStackIdentifier & layerStackIdentifier,const std::string & fileFormatTarget,bool usd)109 PcpCache::PcpCache(
110 const PcpLayerStackIdentifier & layerStackIdentifier,
111 const std::string& fileFormatTarget,
112 bool usd) :
113 _rootLayer(layerStackIdentifier.rootLayer),
114 _sessionLayer(layerStackIdentifier.sessionLayer),
115 _layerStackIdentifier(layerStackIdentifier),
116 _usd(usd),
117 _fileFormatTarget(fileFormatTarget),
118 _layerStackCache(Pcp_LayerStackRegistry::New(_fileFormatTarget, _usd)),
119 _primDependencies(new Pcp_Dependencies())
120 {
121 // Do nothing
122 }
123
~PcpCache()124 PcpCache::~PcpCache()
125 {
126 // We have to release the GIL here, since we don't know whether or not we've
127 // been invoked by some python-wrapped thing here which might not have
128 // released the GIL itself. Dropping the layer RefPtrs could cause the
129 // layers to expire, which might try to invoke the python/c++ shared
130 // lifetime management support, which will need to acquire the GIL. If that
131 // happens in a separate worker thread while this thread holds the GIL,
132 // we'll deadlock. Dropping the GIL here prevents this.
133 TF_PY_ALLOW_THREADS_IN_SCOPE();
134
135 // Clear the layer stack before destroying the registry, so
136 // that it can safely unregister itself.
137 TfReset(_layerStack);
138
139 // Tear down some of our datastructures in parallel, since it can take quite
140 // a bit of time.
141 WorkWithScopedParallelism([this]() {
142 WorkDispatcher wd;
143 wd.Run([this]() { _rootLayer.Reset(); });
144 wd.Run([this]() { _sessionLayer.Reset(); });
145 wd.Run([this]() { TfReset(_includedPayloads); });
146 wd.Run([this]() { TfReset(_variantFallbackMap); });
147 wd.Run([this]() { _primIndexCache.ClearInParallel(); });
148 wd.Run([this]() { TfReset(_propertyIndexCache); });
149 // Wait, since _primDependencies cannot be destroyed concurrently
150 // with the prim indexes, since they both hold references to
151 // layer stacks and the layer stack registry is not currently
152 // prepared to handle concurrent expiry of layer stacks.
153 });
154
155 _primDependencies.reset();
156 _layerStackCache.Reset();
157 }
158
159 ////////////////////////////////////////////////////////////////////////
160 // Cache parameters.
161
162 const PcpLayerStackIdentifier&
GetLayerStackIdentifier() const163 PcpCache::GetLayerStackIdentifier() const
164 {
165 return _layerStackIdentifier;
166 }
167
168 PcpLayerStackPtr
GetLayerStack() const169 PcpCache::GetLayerStack() const
170 {
171 return _layerStack;
172 }
173
174 PcpLayerStackPtr
FindLayerStack(const PcpLayerStackIdentifier & id) const175 PcpCache::FindLayerStack(const PcpLayerStackIdentifier &id) const
176 {
177 return _layerStackCache->Find(id);
178 }
179
180 bool
UsesLayerStack(const PcpLayerStackPtr & layerStack) const181 PcpCache::UsesLayerStack(const PcpLayerStackPtr &layerStack) const
182 {
183 return _layerStackCache->Contains(layerStack);
184 }
185
186 const PcpLayerStackPtrVector&
FindAllLayerStacksUsingLayer(const SdfLayerHandle & layer) const187 PcpCache::FindAllLayerStacksUsingLayer(const SdfLayerHandle& layer) const
188 {
189 return _layerStackCache->FindAllUsingLayer(layer);
190 }
191
192 bool
IsUsd() const193 PcpCache::IsUsd() const
194 {
195 return _usd;
196 }
197
198 const std::string&
GetFileFormatTarget() const199 PcpCache::GetFileFormatTarget() const
200 {
201 return _fileFormatTarget;
202 }
203
204 PcpVariantFallbackMap
GetVariantFallbacks() const205 PcpCache::GetVariantFallbacks() const
206 {
207 return _variantFallbackMap;
208 }
209
210 void
SetVariantFallbacks(const PcpVariantFallbackMap & map,PcpChanges * changes)211 PcpCache::SetVariantFallbacks( const PcpVariantFallbackMap &map,
212 PcpChanges* changes )
213 {
214 if (_variantFallbackMap != map) {
215 _variantFallbackMap = map;
216
217 Pcp_CacheChangesHelper cacheChanges(changes);
218
219 // We could scan to find prim indices that actually use the
220 // affected variant sets, but for simplicity of implementing what
221 // is a really uncommon operation, we just invalidate everything.
222 cacheChanges->DidChangeSignificantly(this, SdfPath::AbsoluteRootPath());
223 }
224 }
225
226 bool
IsPayloadIncluded(const SdfPath & path) const227 PcpCache::IsPayloadIncluded(const SdfPath &path) const
228 {
229 return _includedPayloads.find(path) != _includedPayloads.end();
230 }
231
232 PcpCache::PayloadSet const &
GetIncludedPayloads() const233 PcpCache::GetIncludedPayloads() const
234 {
235 return _includedPayloads;
236 }
237
238 void
RequestPayloads(const SdfPathSet & pathsToInclude,const SdfPathSet & pathsToExclude,PcpChanges * changes)239 PcpCache::RequestPayloads( const SdfPathSet & pathsToInclude,
240 const SdfPathSet & pathsToExclude,
241 PcpChanges* changes )
242 {
243 Pcp_CacheChangesHelper cacheChanges(changes);
244
245 TF_FOR_ALL(path, pathsToInclude) {
246 if (path->IsPrimPath()) {
247 if (_includedPayloads.insert(*path).second) {
248 cacheChanges->DidChangeSignificantly(this, *path);
249 }
250 }
251 else {
252 TF_CODING_ERROR("Path <%s> must be a prim path", path->GetText());
253 }
254 }
255 TF_FOR_ALL(path, pathsToExclude) {
256 if (path->IsPrimPath()) {
257 if (pathsToInclude.find(*path) == pathsToInclude.end()) {
258 if (_includedPayloads.erase(*path)) {
259 cacheChanges->DidChangeSignificantly(this, *path);
260 }
261 }
262 }
263 else {
264 TF_CODING_ERROR("Path <%s> must be a prim path", path->GetText());
265 }
266 }
267 }
268
269 void
RequestLayerMuting(const std::vector<std::string> & layersToMute,const std::vector<std::string> & layersToUnmute,PcpChanges * changes,std::vector<std::string> * newLayersMuted,std::vector<std::string> * newLayersUnmuted)270 PcpCache::RequestLayerMuting(const std::vector<std::string>& layersToMute,
271 const std::vector<std::string>& layersToUnmute,
272 PcpChanges* changes,
273 std::vector<std::string>* newLayersMuted,
274 std::vector<std::string>* newLayersUnmuted)
275 {
276 ArResolverContextBinder binder(_layerStackIdentifier.pathResolverContext);
277
278 std::vector<std::string> finalLayersToMute;
279 for (const auto& layerToMute : layersToMute) {
280 if (layerToMute.empty()) {
281 continue;
282 }
283
284 if (SdfLayer::Find(layerToMute) == _rootLayer) {
285 TF_CODING_ERROR("Cannot mute cache's root layer @%s@",
286 layerToMute.c_str());
287 continue;
288 }
289
290 finalLayersToMute.push_back(layerToMute);
291 }
292
293 std::vector<std::string> finalLayersToUnmute;
294 for (const auto& layerToUnmute : layersToUnmute) {
295 if (layerToUnmute.empty()) {
296 continue;
297 }
298
299 if (std::find(layersToMute.begin(), layersToMute.end(),
300 layerToUnmute) == layersToMute.end()) {
301 finalLayersToUnmute.push_back(layerToUnmute);
302 }
303 }
304
305 if (finalLayersToMute.empty() && finalLayersToUnmute.empty()) {
306 return;
307 }
308
309 _layerStackCache->MuteAndUnmuteLayers(
310 _rootLayer, &finalLayersToMute, &finalLayersToUnmute);
311
312 Pcp_CacheChangesHelper cacheChanges(changes);
313
314 // Register changes for all computed layer stacks that are
315 // affected by the newly muted/unmuted layers.
316 for (const auto& layerToMute : finalLayersToMute) {
317 cacheChanges->DidMuteLayer(this, layerToMute);
318 }
319
320 for (const auto& layerToUnmute : finalLayersToUnmute) {
321 cacheChanges->DidUnmuteLayer(this, layerToUnmute);
322 }
323
324 // The above won't handle cases where we've unmuted the root layer
325 // of a reference or payload layer stack, since prim indexing will skip
326 // computing those layer stacks altogether. So, find all prim indexes
327 // that have the associated composition error and treat this as if
328 // we're reloading the unmuted layer.
329 if (!finalLayersToUnmute.empty()) {
330 for (const auto& primIndexEntry : _primIndexCache) {
331 const PcpPrimIndex& primIndex = primIndexEntry.second;
332 if (!primIndex.IsValid()) {
333 continue;
334 }
335
336 for (const auto& error : primIndex.GetLocalErrors()) {
337 PcpErrorMutedAssetPathPtr typedError =
338 std::dynamic_pointer_cast<PcpErrorMutedAssetPath>(error);
339 if (!typedError) {
340 continue;
341 }
342
343 const bool assetWasUnmuted = std::find(
344 finalLayersToUnmute.begin(), finalLayersToUnmute.end(),
345 typedError->resolvedAssetPath) != finalLayersToUnmute.end();
346 if (assetWasUnmuted) {
347 cacheChanges->DidMaybeFixAsset(
348 this, typedError->site, typedError->layer,
349 typedError->resolvedAssetPath);
350 }
351 }
352 }
353 }
354
355 // update out newLayersMuted and newLayersUnmuted parameters
356 if (newLayersMuted) {
357 *newLayersMuted = std::move(finalLayersToMute);
358 }
359 if (newLayersUnmuted) {
360 *newLayersUnmuted = std::move(finalLayersToUnmute);
361 }
362 }
363
364 const std::vector<std::string>&
GetMutedLayers() const365 PcpCache:: GetMutedLayers() const
366 {
367 return _layerStackCache->GetMutedLayers();
368 }
369
370 bool
IsLayerMuted(const std::string & layerId) const371 PcpCache::IsLayerMuted(const std::string& layerId) const
372 {
373 return IsLayerMuted(_rootLayer, layerId);
374 }
375
376 bool
IsLayerMuted(const SdfLayerHandle & anchorLayer,const std::string & layerId,std::string * canonicalMutedLayerId) const377 PcpCache::IsLayerMuted(const SdfLayerHandle& anchorLayer,
378 const std::string& layerId,
379 std::string* canonicalMutedLayerId) const
380 {
381 return _layerStackCache->IsLayerMuted(
382 anchorLayer, layerId, canonicalMutedLayerId);
383 }
384
385 PcpPrimIndexInputs
GetPrimIndexInputs()386 PcpCache::GetPrimIndexInputs()
387 {
388 return PcpPrimIndexInputs()
389 .Cache(this)
390 .VariantFallbacks(&_variantFallbackMap)
391 .IncludedPayloads(&_includedPayloads)
392 .Cull(TfGetEnvSetting(PCP_CULLING))
393 .FileFormatTarget(_fileFormatTarget);
394 }
395
396 PcpLayerStackRefPtr
ComputeLayerStack(const PcpLayerStackIdentifier & id,PcpErrorVector * allErrors)397 PcpCache::ComputeLayerStack(const PcpLayerStackIdentifier &id,
398 PcpErrorVector *allErrors)
399 {
400 PcpLayerStackRefPtr result =
401 _layerStackCache->FindOrCreate(id, allErrors);
402
403 // Retain the cache's root layer stack.
404 if (!_layerStack && id == GetLayerStackIdentifier()) {
405 _layerStack = result;
406 }
407
408 return result;
409 }
410
411 const PcpPrimIndex *
FindPrimIndex(const SdfPath & path) const412 PcpCache::FindPrimIndex(const SdfPath & path) const
413 {
414 return _GetPrimIndex(path);
415 }
416
417 void
ComputeRelationshipTargetPaths(const SdfPath & relPath,SdfPathVector * paths,bool localOnly,const SdfSpecHandle & stopProperty,bool includeStopProperty,SdfPathVector * deletedPaths,PcpErrorVector * allErrors)418 PcpCache::ComputeRelationshipTargetPaths(const SdfPath & relPath,
419 SdfPathVector *paths,
420 bool localOnly,
421 const SdfSpecHandle &stopProperty,
422 bool includeStopProperty,
423 SdfPathVector *deletedPaths,
424 PcpErrorVector *allErrors)
425 {
426 TRACE_FUNCTION();
427
428 if (!relPath.IsPropertyPath()) {
429 TF_CODING_ERROR(
430 "Path <%s> must be a relationship path", relPath.GetText());
431 return;
432 }
433
434 PcpTargetIndex targetIndex;
435 PcpBuildFilteredTargetIndex( PcpSite(GetLayerStackIdentifier(), relPath),
436 ComputePropertyIndex(relPath, allErrors),
437 SdfSpecTypeRelationship,
438 localOnly, stopProperty, includeStopProperty,
439 this, &targetIndex, deletedPaths,
440 allErrors );
441 paths->swap(targetIndex.paths);
442 }
443
444 void
ComputeAttributeConnectionPaths(const SdfPath & attrPath,SdfPathVector * paths,bool localOnly,const SdfSpecHandle & stopProperty,bool includeStopProperty,SdfPathVector * deletedPaths,PcpErrorVector * allErrors)445 PcpCache::ComputeAttributeConnectionPaths(const SdfPath & attrPath,
446 SdfPathVector *paths,
447 bool localOnly,
448 const SdfSpecHandle &stopProperty,
449 bool includeStopProperty,
450 SdfPathVector *deletedPaths,
451 PcpErrorVector *allErrors)
452 {
453 TRACE_FUNCTION();
454
455 if (!attrPath.IsPropertyPath()) {
456 TF_CODING_ERROR(
457 "Path <%s> must be an attribute path", attrPath.GetText());
458 return;
459 }
460
461 PcpTargetIndex targetIndex;
462 PcpBuildFilteredTargetIndex( PcpSite(GetLayerStackIdentifier(), attrPath),
463 ComputePropertyIndex(attrPath, allErrors),
464 SdfSpecTypeAttribute,
465 localOnly, stopProperty, includeStopProperty,
466 this, &targetIndex, deletedPaths,
467 allErrors );
468 paths->swap(targetIndex.paths);
469 }
470
471 const PcpPropertyIndex *
FindPropertyIndex(const SdfPath & path) const472 PcpCache::FindPropertyIndex(const SdfPath & path) const
473 {
474 return _GetPropertyIndex(path);
475 }
476
477 SdfLayerHandleSet
GetUsedLayers() const478 PcpCache::GetUsedLayers() const
479 {
480 SdfLayerHandleSet rval = _primDependencies->GetUsedLayers();
481
482 // Dependencies don't include the local layer stack, so manually add those
483 // layers here.
484 if (_layerStack) {
485 const SdfLayerRefPtrVector& localLayers = _layerStack->GetLayers();
486 rval.insert(localLayers.begin(), localLayers.end());
487 }
488 return rval;
489 }
490
491 size_t
GetUsedLayersRevision() const492 PcpCache::GetUsedLayersRevision() const
493 {
494 return _primDependencies->GetLayerStacksRevision();
495 }
496
497 SdfLayerHandleSet
GetUsedRootLayers() const498 PcpCache::GetUsedRootLayers() const
499 {
500 SdfLayerHandleSet rval = _primDependencies->GetUsedRootLayers();
501
502 // Dependencies don't include the local layer stack, so manually add the
503 // local root layer here.
504 rval.insert(_rootLayer);
505 return rval;
506 }
507
508 PcpDependencyVector
FindSiteDependencies(const SdfLayerHandle & layer,const SdfPath & sitePath,PcpDependencyFlags depMask,bool recurseOnSite,bool recurseOnIndex,bool filterForExistingCachesOnly) const509 PcpCache::FindSiteDependencies(
510 const SdfLayerHandle& layer,
511 const SdfPath& sitePath,
512 PcpDependencyFlags depMask,
513 bool recurseOnSite,
514 bool recurseOnIndex,
515 bool filterForExistingCachesOnly
516 ) const
517 {
518 PcpDependencyVector result;
519 for (const auto& layerStack: FindAllLayerStacksUsingLayer(layer)) {
520 PcpDependencyVector deps = FindSiteDependencies(
521 layerStack, sitePath, depMask, recurseOnSite, recurseOnIndex,
522 filterForExistingCachesOnly);
523 for (PcpDependency dep: deps) {
524 // Fold in any sublayer offset.
525 if (const SdfLayerOffset *sublayerOffset =
526 layerStack->GetLayerOffsetForLayer(layer)) {
527 dep.mapFunc = dep.mapFunc.ComposeOffset(*sublayerOffset);
528 }
529 result.push_back(std::move(dep));
530 }
531 }
532 return result;
533 }
534
535 PcpDependencyVector
FindSiteDependencies(const PcpLayerStackPtr & siteLayerStack,const SdfPath & sitePath,PcpDependencyFlags depMask,bool recurseOnSite,bool recurseOnIndex,bool filterForExistingCachesOnly) const536 PcpCache::FindSiteDependencies(
537 const PcpLayerStackPtr& siteLayerStack,
538 const SdfPath& sitePath,
539 PcpDependencyFlags depMask,
540 bool recurseOnSite,
541 bool recurseOnIndex,
542 bool filterForExistingCachesOnly
543 ) const
544 {
545 TRACE_FUNCTION();
546
547 PcpDependencyVector deps;
548
549 //
550 // Validate arguments.
551 //
552 if (!(depMask & (PcpDependencyTypeVirtual|PcpDependencyTypeNonVirtual))) {
553 TF_CODING_ERROR("depMask must include at least one of "
554 "{PcpDependencyTypeVirtual, "
555 "PcpDependencyTypeNonVirtual}");
556 return deps;
557 }
558 if (!(depMask & (PcpDependencyTypeRoot | PcpDependencyTypeDirect |
559 PcpDependencyTypeAncestral))) {
560 TF_CODING_ERROR("depMask must include at least one of "
561 "{PcpDependencyTypeRoot, "
562 "PcpDependencyTypePurelyDirect, "
563 "PcpDependencyTypePartlyDirect, "
564 "PcpDependencyTypeAncestral}");
565 return deps;
566 }
567 if ((depMask & PcpDependencyTypeRoot) &&
568 !(depMask & PcpDependencyTypeNonVirtual)) {
569 // Root deps are only ever non-virtual.
570 TF_CODING_ERROR("depMask of PcpDependencyTypeRoot requires "
571 "PcpDependencyTypeNonVirtual");
572 return deps;
573 }
574 if (siteLayerStack->_registry != _layerStackCache) {
575 TF_CODING_ERROR("PcpLayerStack does not belong to this PcpCache");
576 return deps;
577 }
578
579 // Filter function for dependencies to return.
580 auto cacheFilterFn = [this, filterForExistingCachesOnly]
581 (const SdfPath &indexPath) {
582 if (!filterForExistingCachesOnly) {
583 return true;
584 } else if (indexPath.IsAbsoluteRootOrPrimPath()) {
585 return bool(FindPrimIndex(indexPath));
586 } else if (indexPath.IsPropertyPath()) {
587 return bool(FindPropertyIndex(indexPath));
588 } else {
589 return false;
590 }
591 };
592
593 // Dependency arcs expressed in scene description connect prim
594 // paths, prim variant paths, and absolute paths only. Those arcs
595 // imply dependency structure for children, such as properties.
596 // To service dependency queries about those children, we must
597 // examine structure at the enclosing prim/root level where deps
598 // are expresed. Find the containing path.
599 SdfPath tmpPath;
600 const SdfPath *sitePrimPath = &sitePath;
601 if (ARCH_UNLIKELY(!sitePath.IsPrimOrPrimVariantSelectionPath())) {
602 tmpPath = (sitePath == SdfPath::AbsoluteRootPath()) ? sitePath :
603 sitePath.GetPrimOrPrimVariantSelectionPath();
604 sitePrimPath = &tmpPath;
605 }
606
607 // Handle the root dependency.
608 // Sites containing variant selections are never root dependencies.
609 if (depMask & PcpDependencyTypeRoot &&
610 siteLayerStack == _layerStack &&
611 !sitePath.ContainsPrimVariantSelection() &&
612 cacheFilterFn(sitePath)) {
613 deps.push_back(PcpDependency{
614 sitePath, sitePath, PcpMapFunction::Identity()});
615 }
616
617 // Handle dependencies stored in _primDependencies.
618 auto visitSiteFn = [&](const SdfPath &depPrimIndexPath,
619 const SdfPath &depPrimSitePath)
620 {
621 // Because arc dependencies are analyzed in terms of prims,
622 // if we are querying deps for a property, and recurseOnSite
623 // is true, we must guard against recursing into paths
624 // that are siblings of the property and filter them out.
625 if (depPrimSitePath != *sitePrimPath &&
626 depPrimSitePath.HasPrefix(*sitePrimPath) &&
627 !depPrimSitePath.HasPrefix(sitePath)) {
628 return;
629 }
630
631 // If we have recursed above to an ancestor, include its direct
632 // dependencies, since they are considered ancestral by descendants.
633 const PcpDependencyFlags localMask =
634 (depPrimSitePath != *sitePrimPath &&
635 sitePrimPath->HasPrefix(depPrimSitePath))
636 ? (depMask | PcpDependencyTypeDirect) : depMask;
637
638 // If we have recursed below sitePath, use that site;
639 // otherwise use the site the caller requested.
640 const SdfPath localSitePath =
641 (depPrimSitePath != *sitePrimPath &&
642 depPrimSitePath.HasPrefix(*sitePrimPath))
643 ? depPrimSitePath : sitePath;
644
645 auto visitNodeFn = [&](const SdfPath &depPrimIndexPath,
646 const PcpNodeRef &node)
647 {
648 // Skip computing the node's dependency type if we aren't looking
649 // for a specific type -- that computation can be expensive.
650 if (localMask != PcpDependencyTypeAnyIncludingVirtual) {
651 PcpDependencyFlags flags = PcpClassifyNodeDependency(node);
652 if ((flags & localMask) != flags) {
653 return;
654 }
655 }
656
657 // Now that we have found a dependency on depPrimSitePath,
658 // use path translation to get the corresponding depIndexPath.
659 SdfPath depIndexPath;
660 bool valid = false;
661 if (node.GetArcType() == PcpArcTypeRelocate) {
662 // Relocates require special handling. Because
663 // a relocate node's map function is always
664 // identity, we must do our own prefix replacement
665 // to step out of the relocate, then continue
666 // with regular path translation.
667 const PcpNodeRef parent = node.GetParentNode();
668 depIndexPath = PcpTranslatePathFromNodeToRoot(
669 parent,
670 localSitePath.ReplacePrefix( node.GetPath(),
671 parent.GetPath() ),
672 &valid );
673 } else {
674 depIndexPath = PcpTranslatePathFromNodeToRoot(
675 node, localSitePath, &valid);
676 }
677 if (valid && TF_VERIFY(!depIndexPath.IsEmpty()) &&
678 cacheFilterFn(depIndexPath)) {
679 deps.push_back(PcpDependency{
680 depIndexPath, localSitePath,
681 node.GetMapToRoot().Evaluate() });
682 }
683 };
684 Pcp_ForEachDependentNode(depPrimSitePath, siteLayerStack,
685 depPrimIndexPath, *this, visitNodeFn);
686 };
687 _primDependencies->ForEachDependencyOnSite(
688 siteLayerStack, *sitePrimPath,
689 /* includeAncestral = */ depMask & PcpDependencyTypeAncestral,
690 recurseOnSite, visitSiteFn);
691
692 // If recursing down namespace, we may have cache entries for
693 // descendants that did not introduce new dependency arcs, and
694 // therefore were not encountered above, but which nonetheless
695 // represent dependent paths. Add them if requested.
696 if (recurseOnIndex) {
697 TRACE_SCOPE("PcpCache::FindSiteDependencies - recurseOnIndex");
698 SdfPathSet seenDeps;
699 PcpDependencyVector expandedDeps;
700
701 for(const PcpDependency &dep: deps) {
702 const SdfPath & indexPath = dep.indexPath;
703
704 auto it = seenDeps.upper_bound(indexPath);
705 if (it != seenDeps.begin()) {
706 --it;
707 if (indexPath.HasPrefix(*it)) {
708 // Short circuit further expansion; expect we
709 // have already recursed below this path.
710 continue;
711 }
712 }
713
714 seenDeps.insert(indexPath);
715 expandedDeps.push_back(dep);
716 // Recurse on child index entries.
717 if (indexPath.IsAbsoluteRootOrPrimPath()) {
718 auto primRange =
719 _primIndexCache.FindSubtreeRange(indexPath);
720 if (primRange.first != primRange.second) {
721 // Skip initial entry, since we've already added it
722 // to expandedDeps above.
723 ++primRange.first;
724 }
725
726 for (auto entryIter = primRange.first;
727 entryIter != primRange.second; ++entryIter) {
728 const SdfPath& subPath = entryIter->first;
729 const PcpPrimIndex& subPrimIndex = entryIter->second;
730 if (subPrimIndex.IsValid()) {
731 expandedDeps.push_back(PcpDependency{
732 subPath,
733 subPath.ReplacePrefix(indexPath, dep.sitePath),
734 dep.mapFunc});
735 }
736 }
737 }
738 // Recurse on child property entries.
739 const auto propRange =
740 _propertyIndexCache.FindSubtreeRange(indexPath);
741 for (auto entryIter = propRange.first;
742 entryIter != propRange.second; ++entryIter) {
743 const SdfPath& subPath = entryIter->first;
744 const PcpPropertyIndex& subPropIndex = entryIter->second;
745 if (!subPropIndex.IsEmpty()) {
746 expandedDeps.push_back(PcpDependency{
747 subPath,
748 subPath.ReplacePrefix(indexPath, dep.sitePath),
749 dep.mapFunc});
750 }
751 }
752 }
753 std::swap(deps, expandedDeps);
754 }
755
756 return deps;
757 }
758
759 bool
CanHaveOpinionForSite(const SdfPath & localPcpSitePath,const SdfLayerHandle & layer,SdfPath * allowedPathInLayer) const760 PcpCache::CanHaveOpinionForSite(
761 const SdfPath& localPcpSitePath,
762 const SdfLayerHandle& layer,
763 SdfPath* allowedPathInLayer) const
764 {
765 // Get the prim index.
766 if (const PcpPrimIndex* primIndex = _GetPrimIndex(localPcpSitePath)) {
767 // We only want to check any layer stack for layer once.
768 std::set<PcpLayerStackPtr> visited;
769
770 // Iterate over all nodes.
771 for (const PcpNodeRef &node: primIndex->GetNodeRange()) {
772 // Ignore nodes that don't provide specs.
773 if (node.CanContributeSpecs()) {
774 // Check each layer stack that contributes specs only once.
775 if (visited.insert(node.GetLayerStack()).second) {
776 // Check for layer.
777 TF_FOR_ALL(i, node.GetLayerStack()->GetLayers()) {
778 if (*i == layer) {
779 if (allowedPathInLayer) {
780 *allowedPathInLayer = node.GetPath();
781 }
782 return true;
783 }
784 }
785 }
786 }
787 }
788 }
789
790 return false;
791 }
792
793 std::vector<std::string>
GetInvalidSublayerIdentifiers() const794 PcpCache::GetInvalidSublayerIdentifiers() const
795 {
796 TRACE_FUNCTION();
797
798 std::set<std::string> result;
799
800 std::vector<PcpLayerStackPtr> allLayerStacks =
801 _layerStackCache->GetAllLayerStacks();
802
803 TF_FOR_ALL(layerStack, allLayerStacks) {
804 // Scan errors for a sublayer error.
805 PcpErrorVector errs = (*layerStack)->GetLocalErrors();
806 TF_FOR_ALL(e, errs) {
807 if (PcpErrorInvalidSublayerPathPtr typedErr =
808 std::dynamic_pointer_cast<PcpErrorInvalidSublayerPath>(*e)){
809 result.insert(typedErr->sublayerPath);
810 }
811 }
812 }
813
814 return std::vector<std::string>( result.begin(), result.end() );
815 }
816
817 bool
IsInvalidSublayerIdentifier(const std::string & identifier) const818 PcpCache::IsInvalidSublayerIdentifier(const std::string& identifier) const
819 {
820 TRACE_FUNCTION();
821
822 std::vector<std::string> layers = GetInvalidSublayerIdentifiers();
823 std::vector<std::string>::const_iterator i =
824 std::find(layers.begin(), layers.end(), identifier);
825 return i != layers.end();
826 }
827
828 std::map<SdfPath, std::vector<std::string>, SdfPath::FastLessThan>
GetInvalidAssetPaths() const829 PcpCache::GetInvalidAssetPaths() const
830 {
831 TRACE_FUNCTION();
832
833 std::map<SdfPath, std::vector<std::string>, SdfPath::FastLessThan> result;
834
835 TF_FOR_ALL(it, _primIndexCache) {
836 const SdfPath& primPath = it->first;
837 const PcpPrimIndex& primIndex = it->second;
838 if (primIndex.IsValid()) {
839 PcpErrorVector errors = primIndex.GetLocalErrors();
840 for (const auto& e : errors) {
841 if (PcpErrorInvalidAssetPathPtr typedErr =
842 std::dynamic_pointer_cast<PcpErrorInvalidAssetPath>(e)){
843 result[primPath].push_back(typedErr->resolvedAssetPath);
844 }
845 }
846 }
847 }
848
849 return result;
850 }
851
852 bool
IsInvalidAssetPath(const std::string & resolvedAssetPath) const853 PcpCache::IsInvalidAssetPath(const std::string& resolvedAssetPath) const
854 {
855 TRACE_FUNCTION();
856
857 std::map<SdfPath, std::vector<std::string>, SdfPath::FastLessThan>
858 pathMap = GetInvalidAssetPaths();
859 TF_FOR_ALL(i, pathMap) {
860 TF_FOR_ALL(j, i->second) {
861 if (*j == resolvedAssetPath) {
862 return true;
863 }
864 }
865 }
866 return false;
867 }
868
869 bool
HasAnyDynamicFileFormatArgumentDependencies() const870 PcpCache::HasAnyDynamicFileFormatArgumentDependencies() const
871 {
872 return _primDependencies->HasAnyDynamicFileFormatArgumentDependencies();
873 }
874
875 bool
IsPossibleDynamicFileFormatArgumentField(const TfToken & field) const876 PcpCache::IsPossibleDynamicFileFormatArgumentField(
877 const TfToken &field) const
878 {
879 return _primDependencies->IsPossibleDynamicFileFormatArgumentField(field);
880 }
881
882 const PcpDynamicFileFormatDependencyData &
GetDynamicFileFormatArgumentDependencyData(const SdfPath & primIndexPath) const883 PcpCache::GetDynamicFileFormatArgumentDependencyData(
884 const SdfPath &primIndexPath) const
885 {
886 return _primDependencies->GetDynamicFileFormatArgumentDependencyData(
887 primIndexPath);
888 }
889
890 void
Apply(const PcpCacheChanges & changes,PcpLifeboat * lifeboat)891 PcpCache::Apply(const PcpCacheChanges& changes, PcpLifeboat* lifeboat)
892 {
893 TRACE_FUNCTION();
894
895 // Check for special case of blowing everything.
896 if (changes.didChangeSignificantly.count(SdfPath::AbsoluteRootPath())) {
897 // Clear everything for scene graph objects.
898 _primIndexCache.clear();
899 _propertyIndexCache.clear();
900 _primDependencies->RemoveAll(lifeboat);
901 }
902 else {
903 // If layers may have changed, inform _primDependencies.
904 if (changes.didMaybeChangeLayers) {
905 _primDependencies->LayerStacksChanged();
906 }
907
908 // Blow prim and property indexes due to prim graph changes.
909 TF_FOR_ALL(i, changes.didChangeSignificantly) {
910 const SdfPath& path = *i;
911 if (path.IsPrimPath()) {
912 _RemovePrimAndPropertyCaches(path, lifeboat);
913 }
914 else {
915 _RemovePropertyCaches(path, lifeboat);
916 }
917 }
918
919 // Blow prim and property indexes due to prim graph changes.
920 TF_FOR_ALL(i, changes.didChangePrims) {
921 _RemovePrimCache(*i, lifeboat);
922 _RemovePropertyCaches(*i, lifeboat);
923 }
924
925 // Blow property stacks and update spec dependencies on prims.
926 auto updateSpecStacks = [this, &lifeboat](const SdfPath& path) {
927 if (path.IsAbsoluteRootOrPrimPath()) {
928 // We've possibly changed the prim spec stack. Note that
929 // we may have blown the prim index so check that it exists.
930 if (PcpPrimIndex* primIndex = _GetPrimIndex(path)) {
931 Pcp_RescanForSpecs(primIndex, IsUsd(),
932 /* updateHasSpecs */ true);
933
934 // If there are no specs left then we can discard the
935 // prim index.
936 bool anyNodeHasSpecs = false;
937 for (const PcpNodeRef &node: primIndex->GetNodeRange()) {
938 if (node.HasSpecs()) {
939 anyNodeHasSpecs = true;
940 break;
941 }
942 }
943 if (!anyNodeHasSpecs) {
944 _RemovePrimAndPropertyCaches(path, lifeboat);
945 }
946 }
947 }
948 else if (path.IsPropertyPath()) {
949 _RemovePropertyCache(path, lifeboat);
950 }
951 else if (path.IsTargetPath()) {
952 // We have potentially aded or removed a relationship target
953 // spec. This invalidates the property stack for any
954 // relational attributes for this target.
955 _RemovePropertyCaches(path, lifeboat);
956 }
957 };
958
959 TF_FOR_ALL(i, changes.didChangeSpecs) {
960 updateSpecStacks(*i);
961 }
962
963 TF_FOR_ALL(i, changes._didChangeSpecsInternal) {
964 updateSpecStacks(*i);
965 }
966
967 // Fix the keys for any prim or property under any of the renamed
968 // paths.
969 // XXX: It'd be nice if this was a usd by just adjusting
970 // paths here and there.
971 // First blow all caches under the new names.
972 TF_FOR_ALL(i, changes.didChangePath) {
973 if (!i->second.IsEmpty()) {
974 _RemovePrimAndPropertyCaches(i->second, lifeboat);
975 }
976 }
977 // XXX: Blow the caches at the old names. We'd rather just
978 // adjust paths here and there in the prim graphs and the
979 // SdfPathTable keys, but the latter isn't possible yet
980 // and the former is inconvenient.
981 TF_FOR_ALL(i, changes.didChangePath) {
982 _RemovePrimAndPropertyCaches(i->first, lifeboat);
983 }
984 }
985
986 // Fix up payload paths. First remove everything we renamed then add
987 // the new names. This avoids any problems where we rename both from
988 // and to a path, e.g. B -> C, A -> B.
989 // XXX: This is a loop over both the changes and all included
990 // payloads because we have no way to find a prefix in a
991 // hash set of payload paths. We could store SdfPathSet
992 // but at an increased cost when testing if any given
993 // path is in the set. We'd have to benchmark to see if
994 // this is more costly or that would be.
995 static const bool fixTargetPaths = true;
996 std::vector<SdfPath> newIncludes;
997 // Path changes are in the order in which they were processed so we know
998 // the difference between a rename from B -> C followed by A -> B as opposed
999 // to from A -> B, B -> C.
1000 TF_FOR_ALL(i, changes.didChangePath) {
1001 for (PayloadSet::iterator j = _includedPayloads.begin();
1002 j != _includedPayloads.end(); ) {
1003 // If the payload path has the old path as a prefix then remove
1004 // the payload path and add the payload path with the old path
1005 // prefix replaced by the new path. We don't fix target paths
1006 // because there can't be any on a payload path.
1007 if (j->HasPrefix(i->first)) {
1008 newIncludes.push_back(j->ReplacePrefix(i->first, i->second,
1009 !fixTargetPaths));
1010 _includedPayloads.erase(j++);
1011 }
1012 else {
1013 ++j;
1014 }
1015 }
1016 // Because we could have a chain of renames like A -> B, B -> C, we also
1017 // need to check the newIncludes. Any payloads prefixed by A will have
1018 // been removed from _includedPayloads and renamed B in newIncludes
1019 // during the A -> B pass, so the B -> C pass needs to rename all the
1020 // B prefixed paths in newIncludes to complete the full rename.
1021 for (SdfPath &newInclude : newIncludes) {
1022 if (newInclude.HasPrefix(i->first)) {
1023 // The rename can happen in place.
1024 newInclude = newInclude.ReplacePrefix(i->first, i->second,
1025 !fixTargetPaths);
1026 }
1027 }
1028 }
1029 _includedPayloads.insert(newIncludes.begin(), newIncludes.end());
1030 }
1031
1032 void
Reload(PcpChanges * changes)1033 PcpCache::Reload(PcpChanges* changes)
1034 {
1035 TRACE_FUNCTION();
1036
1037 if (!_layerStack) {
1038 return;
1039 }
1040
1041 ArResolverContextBinder binder(_layerStackIdentifier.pathResolverContext);
1042
1043 // Reload every invalid sublayer and asset we know about,
1044 // in any layer stack or prim index.
1045 std::vector<PcpLayerStackPtr> allLayerStacks =
1046 _layerStackCache->GetAllLayerStacks();
1047 TF_FOR_ALL(layerStack, allLayerStacks) {
1048 const PcpErrorVector errors = (*layerStack)->GetLocalErrors();
1049 for (const auto& e : errors) {
1050 if (PcpErrorInvalidSublayerPathPtr typedErr =
1051 std::dynamic_pointer_cast<PcpErrorInvalidSublayerPath>(e)) {
1052 changes->DidMaybeFixSublayer(this,
1053 typedErr->layer,
1054 typedErr->sublayerPath);
1055 }
1056 }
1057 }
1058 TF_FOR_ALL(it, _primIndexCache) {
1059 const PcpPrimIndex& primIndex = it->second;
1060 if (primIndex.IsValid()) {
1061 const PcpErrorVector errors = primIndex.GetLocalErrors();
1062 for (const auto& e : errors) {
1063 if (PcpErrorInvalidAssetPathPtr typedErr =
1064 std::dynamic_pointer_cast<PcpErrorInvalidAssetPath>(e)) {
1065 changes->DidMaybeFixAsset(this,
1066 typedErr->site,
1067 typedErr->layer,
1068 typedErr->resolvedAssetPath);
1069 }
1070 }
1071 }
1072 }
1073
1074 // Reload every layer we've reached except the session layers (which we
1075 // never want to reload from disk).
1076 SdfLayerHandleSet layersToReload = GetUsedLayers();
1077
1078 for (const SdfLayerHandle &layer : _layerStack->GetSessionLayers()) {
1079 layersToReload.erase(layer);
1080 }
1081
1082 SdfLayer::ReloadLayers(layersToReload);
1083 }
1084
1085 void
ReloadReferences(PcpChanges * changes,const SdfPath & primPath)1086 PcpCache::ReloadReferences(PcpChanges* changes, const SdfPath& primPath)
1087 {
1088 TRACE_FUNCTION();
1089
1090 ArResolverContextBinder binder(_layerStackIdentifier.pathResolverContext);
1091
1092 // Traverse every PrimIndex at or under primPath to find
1093 // InvalidAssetPath errors, and collect the unique layer stacks used.
1094 std::set<PcpLayerStackPtr> layerStacksAtOrUnderPrim;
1095 const auto range = _primIndexCache.FindSubtreeRange(primPath);
1096 for (auto entryIter = range.first; entryIter != range.second; ++entryIter) {
1097 const auto& entry = *entryIter;
1098 const PcpPrimIndex& primIndex = entry.second;
1099 if (primIndex.IsValid()) {
1100 PcpErrorVector errors = primIndex.GetLocalErrors();
1101 for (const auto& e : errors) {
1102 if (PcpErrorInvalidAssetPathPtr typedErr =
1103 std::dynamic_pointer_cast<PcpErrorInvalidAssetPath>(e))
1104 {
1105 changes->DidMaybeFixAsset(this, typedErr->site,
1106 typedErr->layer,
1107 typedErr->resolvedAssetPath);
1108 }
1109 }
1110 for (const PcpNodeRef &node: primIndex.GetNodeRange()) {
1111 layerStacksAtOrUnderPrim.insert( node.GetSite().layerStack );
1112 }
1113 }
1114 }
1115
1116 // Check each used layer stack (gathered above) for invalid sublayers.
1117 for (const PcpLayerStackPtr& layerStack: layerStacksAtOrUnderPrim) {
1118 // Scan errors for a sublayer error.
1119 PcpErrorVector errs = layerStack->GetLocalErrors();
1120 for (const PcpErrorBasePtr &err: errs) {
1121 if (PcpErrorInvalidSublayerPathPtr typedErr =
1122 std::dynamic_pointer_cast<PcpErrorInvalidSublayerPath>(err)){
1123 changes->DidMaybeFixSublayer(this, typedErr->layer,
1124 typedErr->sublayerPath);
1125 }
1126 }
1127 }
1128
1129 // Reload every layer used by prims at or under primPath, except for
1130 // local layers.
1131 SdfLayerHandleSet layersToReload;
1132 for (const PcpLayerStackPtr& layerStack: layerStacksAtOrUnderPrim) {
1133 for (const SdfLayerHandle& layer: layerStack->GetLayers()) {
1134 if (!_layerStack->HasLayer(layer)) {
1135 layersToReload.insert(layer);
1136 }
1137 }
1138 }
1139
1140 SdfLayer::ReloadLayers(layersToReload);
1141 }
1142
1143 void
_RemovePrimCache(const SdfPath & primPath,PcpLifeboat * lifeboat)1144 PcpCache::_RemovePrimCache(const SdfPath& primPath, PcpLifeboat* lifeboat)
1145 {
1146 _PrimIndexCache::iterator it = _primIndexCache.find(primPath);
1147 if (it != _primIndexCache.end()) {
1148 _primDependencies->Remove(it->second, lifeboat);
1149 PcpPrimIndex empty;
1150 it->second.Swap(empty);
1151 }
1152 }
1153
1154 void
_RemovePrimAndPropertyCaches(const SdfPath & root,PcpLifeboat * lifeboat)1155 PcpCache::_RemovePrimAndPropertyCaches(const SdfPath& root,
1156 PcpLifeboat* lifeboat)
1157 {
1158 std::pair<_PrimIndexCache::iterator, _PrimIndexCache::iterator> range =
1159 _primIndexCache.FindSubtreeRange(root);
1160 for (_PrimIndexCache::iterator i = range.first; i != range.second; ++i) {
1161 _primDependencies->Remove(i->second, lifeboat);
1162 }
1163 if (range.first != range.second) {
1164 _primIndexCache.erase(range.first);
1165 }
1166
1167 // Remove all properties under any removed prim.
1168 _RemovePropertyCaches(root, lifeboat);
1169 }
1170
1171 void
_RemovePropertyCache(const SdfPath & root,PcpLifeboat * lifeboat)1172 PcpCache::_RemovePropertyCache(const SdfPath& root, PcpLifeboat* lifeboat)
1173 {
1174 _PropertyIndexCache::iterator it = _propertyIndexCache.find(root);
1175 if (it != _propertyIndexCache.end()) {
1176 PcpPropertyIndex empty;
1177 it->second.Swap(empty);
1178 }
1179 }
1180
1181 void
_RemovePropertyCaches(const SdfPath & root,PcpLifeboat * lifeboat)1182 PcpCache::_RemovePropertyCaches(const SdfPath& root, PcpLifeboat* lifeboat)
1183 {
1184 std::pair<_PropertyIndexCache::iterator,
1185 _PropertyIndexCache::iterator> range =
1186 _propertyIndexCache.FindSubtreeRange(root);
1187
1188 if (range.first != range.second) {
1189 _propertyIndexCache.erase(range.first);
1190 }
1191 }
1192
1193 ////////////////////////////////////////////////////////////////////////
1194 // Private helper methods.
1195
1196 void
_ForEachLayerStack(const TfFunctionRef<void (const PcpLayerStackPtr &)> & fn) const1197 PcpCache::_ForEachLayerStack(
1198 const TfFunctionRef<void(const PcpLayerStackPtr&)>& fn) const
1199 {
1200 _layerStackCache->ForEachLayerStack(fn);
1201 }
1202
1203 void
_ForEachPrimIndex(const TfFunctionRef<void (const PcpPrimIndex &)> & fn) const1204 PcpCache::_ForEachPrimIndex(
1205 const TfFunctionRef<void(const PcpPrimIndex&)>& fn) const
1206 {
1207 for (const auto& entry : _primIndexCache) {
1208 const PcpPrimIndex& primIndex = entry.second;
1209 if (primIndex.IsValid()) {
1210 fn(primIndex);
1211 }
1212 }
1213 }
1214
1215 PcpPrimIndex*
_GetPrimIndex(const SdfPath & path)1216 PcpCache::_GetPrimIndex(const SdfPath& path)
1217 {
1218 _PrimIndexCache::iterator i = _primIndexCache.find(path);
1219 if (i != _primIndexCache.end()) {
1220 PcpPrimIndex &primIndex = i->second;
1221 if (primIndex.IsValid()) {
1222 return &primIndex;
1223 }
1224 }
1225 return NULL;
1226 }
1227
1228 const PcpPrimIndex*
_GetPrimIndex(const SdfPath & path) const1229 PcpCache::_GetPrimIndex(const SdfPath& path) const
1230 {
1231 _PrimIndexCache::const_iterator i = _primIndexCache.find(path);
1232 if (i != _primIndexCache.end()) {
1233 const PcpPrimIndex &primIndex = i->second;
1234 if (primIndex.IsValid()) {
1235 return &primIndex;
1236 }
1237 }
1238 return NULL;
1239 }
1240
1241 struct PcpCache::_ParallelIndexer
1242 {
1243 using This = _ParallelIndexer;
1244
_ParallelIndexerPcpCache::_ParallelIndexer1245 explicit _ParallelIndexer(PcpCache *cache,
1246 const PcpLayerStackPtr &layerStack)
1247 : _cache(cache)
1248 , _layerStack(layerStack)
1249 , _resolver(ArGetResolver())
1250 {}
1251
PreparePcpCache::_ParallelIndexer1252 void Prepare(_UntypedIndexingChildrenPredicate childrenPred,
1253 PcpPrimIndexInputs baseInputs,
1254 PcpErrorVector *allErrors,
1255 const ArResolverScopedCache* parentCache,
1256 const char *mallocTag1,
1257 const char *mallocTag2) {
1258 _childrenPredicate = childrenPred;
1259 _baseInputs = baseInputs;
1260 // Set the includedPayloadsMutex in _baseInputs.
1261 _baseInputs.IncludedPayloadsMutex(&_includedPayloadsMutex);
1262 _allErrors = allErrors;
1263 _parentCache = parentCache;
1264 _mallocTag1 = mallocTag1;
1265 _mallocTag2 = mallocTag2;
1266
1267 // Clear the roots to compute.
1268 _toCompute.clear();
1269 }
1270
1271 // Run the added work and wait for it to complete.
RunAndWaitPcpCache::_ParallelIndexer1272 void RunAndWait() {
1273 WorkWithScopedParallelism([this]() {
1274 Pcp_Dependencies::ConcurrentPopulationContext
1275 populationContext(*_cache->_primDependencies);
1276 TF_FOR_ALL(i, _toCompute) {
1277 _dispatcher.Run(&This::_ComputeIndex, this,
1278 i->first, i->second,
1279 /*checkCache=*/true);
1280 }
1281 _dispatcher.Wait();
1282 });
1283
1284 // Clear out results & working space. If stuff is huge, dump it
1285 // asynchronously, otherwise clear in place to possibly reuse heap for
1286 // future calls.
1287 constexpr size_t MaxSize = 1024;
1288 _ClearMaybeAsync(_toCompute, _toCompute.size() >= MaxSize);
1289 }
1290
1291 // Add an index to compute.
ComputeIndexPcpCache::_ParallelIndexer1292 void ComputeIndex(const PcpPrimIndex *parentIndex, const SdfPath &path) {
1293 TF_AXIOM(parentIndex || path == SdfPath::AbsoluteRootPath());
1294 _toCompute.push_back(make_pair(parentIndex, path));
1295 }
1296
1297 private:
1298
1299 template <class Container>
_ClearMaybeAsyncPcpCache::_ParallelIndexer1300 void _ClearMaybeAsync(Container &c, bool async) {
1301 if (async) {
1302 WorkMoveDestroyAsync(c);
1303 }
1304 else {
1305 c.clear();
1306 }
1307 }
1308
1309 // This function is run in parallel by the _dispatcher. It computes prim
1310 // indexes and publishes them to the cache.
_ComputeIndexPcpCache::_ParallelIndexer1311 void _ComputeIndex(const PcpPrimIndex *parentIndex,
1312 SdfPath path, bool checkCache) {
1313 TfAutoMallocTag2 tag(_mallocTag1, _mallocTag2);
1314 ArResolverScopedCache taskCache(_parentCache);
1315
1316 // Check to see if we already have an index for this guy. If we do,
1317 // don't bother computing it.
1318 const PcpPrimIndex *index = nullptr;
1319 if (checkCache) {
1320 tbb::spin_rw_mutex::scoped_lock
1321 lock(_primIndexCacheMutex, /*write=*/false);
1322 PcpCache::_PrimIndexCache::const_iterator
1323 i = _cache->_primIndexCache.find(path);
1324 if (i == _cache->_primIndexCache.end()) {
1325 // There is no cache entry for this path or any children.
1326 checkCache = false;
1327 } else if (i->second.IsValid()) {
1328 // There is a valid cache entry.
1329 index = &i->second;
1330 } else {
1331 // There is a cache entry but it is invalid. There still
1332 // may be valid cache entries for children, so we must
1333 // continue to checkCache. An example is when adding a
1334 // new empty spec to a layer stack already used by a
1335 // prim, causing a culled node to no longer be culled,
1336 // and the children to be unaffected.
1337 }
1338 }
1339
1340 if (!index) {
1341 // We didn't find an index in the cache, so we must compute one.
1342 PcpPrimIndexOutputs outputs;
1343
1344 // Establish inputs.
1345 PcpPrimIndexInputs inputs = _baseInputs;
1346 inputs.parentIndex = parentIndex;
1347
1348 TF_VERIFY(parentIndex || path == SdfPath::AbsoluteRootPath());
1349
1350 // Run indexing.
1351 PcpComputePrimIndex(
1352 path, _layerStack, inputs, &outputs, &_resolver);
1353
1354 // Append any errors.
1355 if (!outputs.allErrors.empty()) {
1356 // Append errors.
1357 tbb::spin_mutex::scoped_lock lock(_allErrorsMutex);
1358 _allErrors->insert(_allErrors->end(),
1359 outputs.allErrors.begin(),
1360 outputs.allErrors.end());
1361 }
1362
1363 // Update payload set if necessary.
1364 PcpPrimIndexOutputs::PayloadState
1365 payloadState = outputs.payloadState;
1366 if (payloadState == PcpPrimIndexOutputs::IncludedByPredicate ||
1367 payloadState == PcpPrimIndexOutputs::ExcludedByPredicate) {
1368 tbb::spin_rw_mutex::scoped_lock lock(_includedPayloadsMutex);
1369 if (payloadState == PcpPrimIndexOutputs::IncludedByPredicate) {
1370 _cache->_includedPayloads.insert(path);
1371 }
1372 else {
1373 _cache->_includedPayloads.erase(path);
1374 }
1375 }
1376
1377 // Publish to cache.
1378 {
1379 tbb::spin_rw_mutex::scoped_lock lock(_primIndexCacheMutex);
1380 PcpPrimIndex *mutableIndex = &_cache->_primIndexCache[path];
1381 index = mutableIndex;
1382 TF_VERIFY(!index->IsValid(),
1383 "PrimIndex for %s already exists in cache",
1384 index->GetPath().GetText());
1385 mutableIndex->Swap(outputs.primIndex);
1386 lock.release();
1387 _cache->_primDependencies->Add(
1388 *index, std::move(outputs.dynamicFileFormatDependency));
1389 }
1390 }
1391
1392 // Invoke the client's predicate to see if we should do children.
1393 TfTokenVector namesToCompose;
1394 if (_childrenPredicate(*index, &namesToCompose)) {
1395 // Compute the children paths and add new tasks for them.
1396 TfTokenVector names;
1397 PcpTokenSet prohibitedNames;
1398 index->ComputePrimChildNames(&names, &prohibitedNames);
1399 for (const auto& name : names) {
1400 if (!namesToCompose.empty() &&
1401 std::find(namesToCompose.begin(), namesToCompose.end(),
1402 name) == namesToCompose.end()) {
1403 continue;
1404 }
1405
1406 _dispatcher.Run(
1407 &This::_ComputeIndex, this, index,
1408 path.AppendChild(name), checkCache);
1409 }
1410 }
1411 }
1412
1413 // Fixed inputs.
1414 PcpCache * const _cache;
1415 const PcpLayerStackPtr _layerStack;
1416 ArResolver& _resolver;
1417
1418 // Utils.
1419 tbb::spin_rw_mutex _primIndexCacheMutex;
1420 tbb::spin_rw_mutex _includedPayloadsMutex;
1421 WorkDispatcher _dispatcher;
1422
1423 // Varying inputs.
1424 _UntypedIndexingChildrenPredicate _childrenPredicate;
1425 PcpPrimIndexInputs _baseInputs;
1426 PcpErrorVector *_allErrors;
1427 tbb::spin_mutex _allErrorsMutex;
1428 const ArResolverScopedCache* _parentCache;
1429 char const *_mallocTag1;
1430 char const *_mallocTag2;
1431 vector<pair<const PcpPrimIndex *, SdfPath> > _toCompute;
1432 };
1433
1434 void
_ComputePrimIndexesInParallel(const SdfPathVector & roots,PcpErrorVector * allErrors,_UntypedIndexingChildrenPredicate childrenPred,_UntypedIndexingPayloadPredicate payloadPred,const char * mallocTag1,const char * mallocTag2)1435 PcpCache::_ComputePrimIndexesInParallel(
1436 const SdfPathVector &roots,
1437 PcpErrorVector *allErrors,
1438 _UntypedIndexingChildrenPredicate childrenPred,
1439 _UntypedIndexingPayloadPredicate payloadPred,
1440 const char *mallocTag1,
1441 const char *mallocTag2)
1442 {
1443 if (!IsUsd()) {
1444 TF_CODING_ERROR("Computing prim indexes in parallel only supported "
1445 "for USD caches.");
1446 return;
1447 }
1448
1449 TF_PY_ALLOW_THREADS_IN_SCOPE();
1450
1451 ArResolverScopedCache parentCache;
1452 TfAutoMallocTag2 tag(mallocTag1, mallocTag2);
1453
1454 if (!_layerStack)
1455 ComputeLayerStack(GetLayerStackIdentifier(), allErrors);
1456
1457 if (!_parallelIndexer) {
1458 _parallelIndexer.reset(new _ParallelIndexer(this, _layerStack));
1459 }
1460
1461 _ParallelIndexer * const indexer = _parallelIndexer.get();
1462
1463 // General strategy: Compute indexes recursively starting from roots, in
1464 // parallel. When we've computed an index, ask the children predicate if we
1465 // should continue to compute its children indexes. If so, we add all the
1466 // children as new tasks for threads to pick up.
1467 //
1468 // Once all the indexes are computed, add them to the cache and add their
1469 // dependencies to the dependencies structures.
1470
1471 PcpPrimIndexInputs inputs = GetPrimIndexInputs()
1472 .USD(_usd)
1473 .IncludePayloadPredicate(payloadPred)
1474 ;
1475
1476 indexer->Prepare(childrenPred, inputs, allErrors, &parentCache,
1477 mallocTag1, mallocTag2);
1478
1479 for (const auto& rootPath : roots) {
1480 // Obtain the parent index, if this is not the absolute root. Note that
1481 // the call to ComputePrimIndex below is not concurrency safe.
1482 const PcpPrimIndex *parentIndex =
1483 rootPath == SdfPath::AbsoluteRootPath() ? nullptr :
1484 &_ComputePrimIndexWithCompatibleInputs(
1485 rootPath.GetParentPath(), inputs, allErrors);
1486 indexer->ComputeIndex(parentIndex, rootPath);
1487 }
1488
1489 // Do the indexing and wait for it to complete.
1490 indexer->RunAndWait();
1491 }
1492
1493 const PcpPrimIndex &
ComputePrimIndex(const SdfPath & path,PcpErrorVector * allErrors)1494 PcpCache::ComputePrimIndex(const SdfPath & path, PcpErrorVector *allErrors) {
1495 return _ComputePrimIndexWithCompatibleInputs(
1496 path, GetPrimIndexInputs().USD(_usd), allErrors);
1497 }
1498
1499 const PcpPrimIndex &
_ComputePrimIndexWithCompatibleInputs(const SdfPath & path,const PcpPrimIndexInputs & inputs,PcpErrorVector * allErrors)1500 PcpCache::_ComputePrimIndexWithCompatibleInputs(
1501 const SdfPath & path, const PcpPrimIndexInputs &inputs,
1502 PcpErrorVector *allErrors)
1503 {
1504 // NOTE:TRACE_FUNCTION() is too much overhead here.
1505
1506 // Check for a cache hit. Default constructed PcpPrimIndex objects
1507 // may live in the SdfPathTable for paths that haven't yet been computed,
1508 // so we have to explicitly check for that.
1509 _PrimIndexCache::const_iterator i = _primIndexCache.find(path);
1510 if (i != _primIndexCache.end() && i->second.IsValid()) {
1511 return i->second;
1512 }
1513
1514 TRACE_FUNCTION();
1515
1516 if (!_layerStack) {
1517 ComputeLayerStack(GetLayerStackIdentifier(), allErrors);
1518 }
1519
1520 // Run the prim indexing algorithm.
1521 PcpPrimIndexOutputs outputs;
1522 PcpComputePrimIndex(path, _layerStack, inputs, &outputs);
1523 allErrors->insert(
1524 allErrors->end(),
1525 outputs.allErrors.begin(),
1526 outputs.allErrors.end());
1527
1528 // Add dependencies.
1529 _primDependencies->Add(outputs.primIndex,
1530 std::move(outputs.dynamicFileFormatDependency));
1531
1532 // Update _includedPayloads if we included a discovered payload.
1533 if (outputs.payloadState == PcpPrimIndexOutputs::IncludedByPredicate) {
1534 _includedPayloads.insert(path);
1535 }
1536 if (outputs.payloadState == PcpPrimIndexOutputs::ExcludedByPredicate) {
1537 _includedPayloads.erase(path);
1538 }
1539
1540 // Save the prim index.
1541 PcpPrimIndex &cacheEntry = _primIndexCache[path];
1542 cacheEntry.Swap(outputs.primIndex);
1543
1544 return cacheEntry;
1545 }
1546
1547 PcpPropertyIndex*
_GetPropertyIndex(const SdfPath & path)1548 PcpCache::_GetPropertyIndex(const SdfPath& path)
1549 {
1550 _PropertyIndexCache::iterator i = _propertyIndexCache.find(path);
1551 if (i != _propertyIndexCache.end() && !i->second.IsEmpty()) {
1552 return &i->second;
1553 }
1554
1555 return NULL;
1556 }
1557
1558 const PcpPropertyIndex*
_GetPropertyIndex(const SdfPath & path) const1559 PcpCache::_GetPropertyIndex(const SdfPath& path) const
1560 {
1561 _PropertyIndexCache::const_iterator i = _propertyIndexCache.find(path);
1562 if (i != _propertyIndexCache.end() && !i->second.IsEmpty()) {
1563 return &i->second;
1564 }
1565 return NULL;
1566 }
1567
1568 const PcpPropertyIndex &
ComputePropertyIndex(const SdfPath & path,PcpErrorVector * allErrors)1569 PcpCache::ComputePropertyIndex(const SdfPath & path, PcpErrorVector *allErrors)
1570 {
1571 TRACE_FUNCTION();
1572
1573 static PcpPropertyIndex nullIndex;
1574 if (!path.IsPropertyPath()) {
1575 TF_CODING_ERROR("Path <%s> must be a property path", path.GetText());
1576 return nullIndex;
1577 }
1578 if (_usd) {
1579 // Disable computation and cache of property indexes in USD mode.
1580 // Although PcpBuildPropertyIndex does support this computation in
1581 // USD mode, we do not want to pay the cost of caching these.
1582 //
1583 // XXX: Maybe we shouldn't explicitly disallow this, but let consumers
1584 // decide if they want this; if they don't, they should just
1585 // avoid calling ComputePropertyIndex?
1586 TF_CODING_ERROR("PcpCache will not compute a cached property index in "
1587 "USD mode; use PcpBuildPropertyIndex() instead. Path "
1588 "was <%s>", path.GetText());
1589 return nullIndex;
1590 }
1591
1592 // Check for a cache hit. Default constructed PcpPrimIndex objects
1593 // may live in the SdfPathTable for paths that haven't yet been computed,
1594 // so we have to explicitly check for that.
1595 PcpPropertyIndex &cacheEntry = _propertyIndexCache[path];
1596 if (cacheEntry.IsEmpty()) {
1597 PcpBuildPropertyIndex(path, this, &cacheEntry, allErrors);
1598 }
1599 return cacheEntry;
1600 }
1601
1602 ////////////////////////////////////////////////////////////////////////
1603 // Diagnostics
1604
1605 void
PrintStatistics() const1606 PcpCache::PrintStatistics() const
1607 {
1608 Pcp_PrintCacheStatistics(this, std::cout);
1609 }
1610
1611 PXR_NAMESPACE_CLOSE_SCOPE
1612