1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ccpr/GrCCPerFlushResources.h"
9 
10 #include "include/private/GrRecordingContext.h"
11 #include "src/core/SkMakeUnique.h"
12 #include "src/gpu/GrClip.h"
13 #include "src/gpu/GrMemoryPool.h"
14 #include "src/gpu/GrOnFlushResourceProvider.h"
15 #include "src/gpu/GrRecordingContextPriv.h"
16 #include "src/gpu/GrRenderTargetContext.h"
17 #include "src/gpu/GrSurfaceContextPriv.h"
18 #include "src/gpu/ccpr/GrCCPathCache.h"
19 #include "src/gpu/ccpr/GrGSCoverageProcessor.h"
20 #include "src/gpu/ccpr/GrSampleMaskProcessor.h"
21 #include "src/gpu/ccpr/GrVSCoverageProcessor.h"
22 #include "src/gpu/geometry/GrShape.h"
23 
24 using CoverageType = GrCCAtlas::CoverageType;
25 using FillBatchID = GrCCFiller::BatchID;
26 using StrokeBatchID = GrCCStroker::BatchID;
27 using PathInstance = GrCCPathProcessor::Instance;
28 
29 static constexpr int kFillIdx = GrCCPerFlushResourceSpecs::kFillIdx;
30 static constexpr int kStrokeIdx = GrCCPerFlushResourceSpecs::kStrokeIdx;
31 
32 namespace {
33 
34 // Base class for an Op that renders a CCPR atlas.
35 class AtlasOp : public GrDrawOp {
36 public:
fixedFunctionFlags() const37     FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
finalize(const GrCaps &,const GrAppliedClip *,bool hasMixedSampledCoverage,GrClampType)38     GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
39                                       bool hasMixedSampledCoverage, GrClampType) override {
40         return GrProcessorSet::EmptySetAnalysis();
41     }
onCombineIfPossible(GrOp * other,const GrCaps &)42     CombineResult onCombineIfPossible(GrOp* other, const GrCaps&) override {
43         // We will only make multiple copy ops if they have different source proxies.
44         // TODO: make use of texture chaining.
45         return CombineResult::kCannotCombine;
46     }
onPrepare(GrOpFlushState *)47     void onPrepare(GrOpFlushState*) override {}
48 
49 protected:
AtlasOp(uint32_t classID,sk_sp<const GrCCPerFlushResources> resources,const SkISize & drawBounds)50     AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
51             const SkISize& drawBounds)
52             : GrDrawOp(classID)
53             , fResources(std::move(resources)) {
54         this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
55                         GrOp::HasAABloat::kNo, GrOp::IsHairline::kNo);
56     }
57 
58     const sk_sp<const GrCCPerFlushResources> fResources;
59 };
60 
61 // Copies paths from a cached coverage count or msaa atlas into an 8-bit literal-coverage atlas.
62 class CopyAtlasOp : public AtlasOp {
63 public:
64     DEFINE_OP_CLASS_ID
65 
Make(GrRecordingContext * context,sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> copyProxy,int baseInstance,int endInstance,const SkISize & drawBounds)66     static std::unique_ptr<GrDrawOp> Make(
67             GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
68             sk_sp<GrTextureProxy> copyProxy, int baseInstance, int endInstance,
69             const SkISize& drawBounds) {
70         GrOpMemoryPool* pool = context->priv().opMemoryPool();
71 
72         return pool->allocate<CopyAtlasOp>(std::move(resources), std::move(copyProxy), baseInstance,
73                                            endInstance, drawBounds);
74     }
75 
name() const76     const char* name() const override { return "CopyAtlasOp (CCPR)"; }
77 
visitProxies(const VisitProxyFunc & fn) const78     void visitProxies(const VisitProxyFunc& fn) const override {
79         fn(fSrcProxy.get(), GrMipMapped::kNo);
80     }
81 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)82     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
83         SkASSERT(fSrcProxy);
84         auto srcProxy = fSrcProxy.get();
85         SkASSERT(srcProxy->isInstantiated());
86 
87         auto coverageMode = GrCCPathProcessor::GetCoverageMode(
88                 fResources->renderedPathCoverageType());
89         GrCCPathProcessor pathProc(coverageMode, srcProxy->peekTexture(),
90                                    srcProxy->textureSwizzle(), srcProxy->origin());
91 
92         GrPipeline pipeline(GrScissorTest::kDisabled, SkBlendMode::kSrc,
93                             flushState->drawOpArgs().outputSwizzle());
94         GrPipeline::FixedDynamicState dynamicState;
95         dynamicState.fPrimitiveProcessorTextures = &srcProxy;
96 
97         pathProc.drawPaths(flushState, pipeline, &dynamicState, *fResources, fBaseInstance,
98                            fEndInstance, this->bounds());
99     }
100 
101 private:
102     friend class ::GrOpMemoryPool; // for ctor
103 
CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> srcProxy,int baseInstance,int endInstance,const SkISize & drawBounds)104     CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
105                 int baseInstance, int endInstance, const SkISize& drawBounds)
106             : AtlasOp(ClassID(), std::move(resources), drawBounds)
107             , fSrcProxy(srcProxy)
108             , fBaseInstance(baseInstance)
109             , fEndInstance(endInstance) {
110     }
111     sk_sp<GrTextureProxy> fSrcProxy;
112     const int fBaseInstance;
113     const int fEndInstance;
114 };
115 
116 // Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
117 template<typename ProcessorType> class RenderAtlasOp : public AtlasOp {
118 public:
119     DEFINE_OP_CLASS_ID
120 
Make(GrRecordingContext * context,sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)121     static std::unique_ptr<GrDrawOp> Make(
122             GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
123             FillBatchID fillBatchID, StrokeBatchID strokeBatchID, const SkISize& drawBounds) {
124         GrOpMemoryPool* pool = context->priv().opMemoryPool();
125 
126         return pool->allocate<RenderAtlasOp>(
127                 std::move(resources), fillBatchID, strokeBatchID, drawBounds);
128     }
129 
130     // GrDrawOp interface.
name() const131     const char* name() const override { return "RenderAtlasOp (CCPR)"; }
132 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)133     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
134         ProcessorType proc;
135         GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus,
136                             flushState->drawOpArgs().outputSwizzle());
137         fResources->filler().drawFills(flushState, &proc, pipeline, fFillBatchID, fDrawBounds);
138         fResources->stroker().drawStrokes(flushState, &proc, fStrokeBatchID, fDrawBounds);
139     }
140 
141 private:
142     friend class ::GrOpMemoryPool; // for ctor
143 
RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)144     RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID,
145                   StrokeBatchID strokeBatchID, const SkISize& drawBounds)
146             : AtlasOp(ClassID(), std::move(resources), drawBounds)
147             , fFillBatchID(fillBatchID)
148             , fStrokeBatchID(strokeBatchID)
149             , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
150     }
151 
152     const FillBatchID fFillBatchID;
153     const StrokeBatchID fStrokeBatchID;
154     const SkIRect fDrawBounds;
155 };
156 
157 }
158 
inst_buffer_count(const GrCCPerFlushResourceSpecs & specs)159 static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
160     return specs.fNumCachedPaths +
161            // Copies get two instances per draw: 1 copy + 1 draw.
162            (specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) * 2 +
163            specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx];
164            // No clips in instance buffers.
165 }
166 
GrCCPerFlushResources(GrOnFlushResourceProvider * onFlushRP,CoverageType coverageType,const GrCCPerFlushResourceSpecs & specs)167 GrCCPerFlushResources::GrCCPerFlushResources(
168         GrOnFlushResourceProvider* onFlushRP, CoverageType coverageType,
169         const GrCCPerFlushResourceSpecs& specs)
170         // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
171         // (See transform_path_pts below.)
172         // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
173         : fLocalDevPtsBuffer(SkTMax(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath,
174                                     specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1)
175         , fFiller((CoverageType::kFP16_CoverageCount == coverageType)
176                           ? GrCCFiller::Algorithm::kCoverageCount
177                           : GrCCFiller::Algorithm::kStencilWindingCount,
178                   specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths,
179                   specs.fRenderedPathStats[kFillIdx].fNumTotalSkPoints,
180                   specs.fRenderedPathStats[kFillIdx].fNumTotalSkVerbs,
181                   specs.fRenderedPathStats[kFillIdx].fNumTotalConicWeights)
182         , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
183                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
184                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
185         , fCopyAtlasStack(CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
186                           onFlushRP->caps())
187         , fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps())
188         , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
189         , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
190         , fInstanceBuffer(onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
191                                                 inst_buffer_count(specs) * sizeof(PathInstance)))
192         , fNextCopyInstanceIdx(0)
193         , fNextPathInstanceIdx(
194                 specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) {
195     if (!fIndexBuffer) {
196         SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
197         return;
198     }
199     if (!fVertexBuffer) {
200         SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
201         return;
202     }
203     if (!fInstanceBuffer) {
204         SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
205         return;
206     }
207     fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
208     SkASSERT(fPathInstanceData);
209 
210     if (CoverageType::kA8_Multisample == coverageType) {
211         int numRenderedPaths =
212                 specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx] +
213                 specs.fNumClipPaths;
214         fStencilResolveBuffer = onFlushRP->makeBuffer(
215                 GrGpuBufferType::kVertex,
216                 numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
217         fStencilResolveInstanceData = static_cast<GrStencilAtlasOp::ResolveRectInstance*>(
218                 fStencilResolveBuffer->map());
219         SkASSERT(fStencilResolveInstanceData);
220         SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
221     }
222 
223     SkDEBUGCODE(fEndCopyInstance =
224                         specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]);
225     SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
226 }
227 
upgradeEntryToLiteralCoverageAtlas(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPathCacheEntry * entry,GrFillRule fillRule)228 void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
229         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
230         GrFillRule fillRule) {
231     using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
232     SkASSERT(this->isMapped());
233     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
234 
235     const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
236     SkASSERT(cachedAtlas);
237     SkASSERT(cachedAtlas->getOnFlushProxy());
238 
239     if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
240         // This entry has already been upgraded to literal coverage. The path must have been drawn
241         // multiple times during the flush.
242         SkDEBUGCODE(--fEndCopyInstance);
243         return;
244     }
245 
246     SkIVector newAtlasOffset;
247     if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
248         // We did not fit in the previous copy atlas and it was retired. We will render the ranges
249         // up until fCopyPathRanges.count() into the retired atlas during finalize().
250         retiredAtlas->setFillBatchID(fCopyPathRanges.count());
251         fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
252     }
253 
254     this->recordCopyPathInstance(
255             *entry, newAtlasOffset, fillRule, sk_ref_sp(cachedAtlas->getOnFlushProxy()));
256 
257     sk_sp<GrTexture> previousAtlasTexture =
258             sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
259     GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
260     if (ReleaseAtlasResult::kDidInvalidateFromCache ==
261             entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
262         // This texture just got booted out of the cache. Keep it around, in case we might be able
263         // to recycle it for a new atlas. We can recycle it because copying happens before rendering
264         // new paths, and every path from the atlas that we're planning to use this flush will be
265         // copied to a new atlas. We'll never copy some and leave others.
266         fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
267     }
268 }
269 
270 template<typename T, typename... Args>
emplace_at_memcpy(SkTArray<T> * array,int idx,Args &&...args)271 static void emplace_at_memcpy(SkTArray<T>* array, int idx, Args&&... args) {
272     if (int moveCount = array->count() - idx) {
273         array->push_back();
274         T* location = array->begin() + idx;
275         memcpy(location+1, location, moveCount * sizeof(T));
276         new (location) T(std::forward<Args>(args)...);
277     } else {
278         array->emplace_back(std::forward<Args>(args)...);
279     }
280 }
281 
recordCopyPathInstance(const GrCCPathCacheEntry & entry,const SkIVector & newAtlasOffset,GrFillRule fillRule,sk_sp<GrTextureProxy> srcProxy)282 void GrCCPerFlushResources::recordCopyPathInstance(
283         const GrCCPathCacheEntry& entry, const SkIVector& newAtlasOffset, GrFillRule fillRule,
284         sk_sp<GrTextureProxy> srcProxy) {
285     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
286 
287     // Write the instance at the back of the array.
288     int currentInstanceIdx = fNextCopyInstanceIdx++;
289     constexpr uint64_t kWhite = (((uint64_t) SK_Half1) <<  0) |
290                                 (((uint64_t) SK_Half1) << 16) |
291                                 (((uint64_t) SK_Half1) << 32) |
292                                 (((uint64_t) SK_Half1) << 48);
293     fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, fillRule);
294 
295     // Percolate the instance forward until it's contiguous with other instances that share the same
296     // proxy.
297     for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
298         if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
299             ++fCopyPathRanges[i].fCount;
300             return;
301         }
302         int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
303         std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
304         currentInstanceIdx = rangeFirstInstanceIdx;
305     }
306 
307     // An instance with this particular proxy did not yet exist in the array. Add a range for it.
308     emplace_at_memcpy(&fCopyPathRanges, fCurrCopyAtlasRangesIdx, std::move(srcProxy), 1);
309 }
310 
transform_path_pts(const SkMatrix & m,const SkPath & path,const SkAutoSTArray<32,SkPoint> & outDevPts,GrOctoBounds * octoBounds)311 static bool transform_path_pts(
312         const SkMatrix& m, const SkPath& path, const SkAutoSTArray<32, SkPoint>& outDevPts,
313         GrOctoBounds* octoBounds) {
314     const SkPoint* pts = SkPathPriv::PointData(path);
315     int numPts = path.countPoints();
316     SkASSERT(numPts + 1 <= outDevPts.count());
317     SkASSERT(numPts);
318 
319     // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
320     // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
321     // transform is not necessary as long as the shader uses the correct inverse.
322     SkMatrix m45;
323     m45.setSinCos(1, 1);
324     m45.preConcat(m);
325 
326     // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
327     // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
328     //                                                          | 1  1 |
329     Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
330     Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
331     Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
332 
333     // Map the path's points to device space and accumulate bounding boxes.
334     Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
335     devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
336     Sk4f topLeft = devPt;
337     Sk4f bottomRight = devPt;
338 
339     // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
340     // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
341     // be at least one larger than the number of points.
342     devPt.store(&outDevPts[0]);
343 
344     for (int i = 1; i < numPts; ++i) {
345         devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
346         devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
347         topLeft = Sk4f::Min(topLeft, devPt);
348         bottomRight = Sk4f::Max(bottomRight, devPt);
349         devPt.store(&outDevPts[i]);
350     }
351 
352     if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) {
353         // The bounds are infinite or NaN.
354         return false;
355     }
356 
357     SkPoint topLeftPts[2], bottomRightPts[2];
358     topLeft.store(topLeftPts);
359     bottomRight.store(bottomRightPts);
360 
361     const SkRect& devBounds = SkRect::MakeLTRB(
362             topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(), bottomRightPts[0].y());
363     const SkRect& devBounds45 = SkRect::MakeLTRB(
364             topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(), bottomRightPts[1].y());
365 
366     octoBounds->set(devBounds, devBounds45);
367     return true;
368 }
369 
renderShapeInAtlas(const SkIRect & clipIBounds,const SkMatrix & m,const GrShape & shape,float strokeDevWidth,GrOctoBounds * octoBounds,SkIRect * devIBounds,SkIVector * devToAtlasOffset)370 GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
371         const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
372         GrOctoBounds* octoBounds, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
373     SkASSERT(this->isMapped());
374     SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
375 
376     SkPath path;
377     shape.asPath(&path);
378     if (path.isEmpty()) {
379         SkDEBUGCODE(--fEndPathInstance);
380         SkDEBUGCODE(--fEndStencilResolveInstance);
381         return nullptr;
382     }
383     if (!transform_path_pts(m, path, fLocalDevPtsBuffer, octoBounds)) {
384         // The transformed path had infinite or NaN bounds.
385         SkDEBUGCODE(--fEndPathInstance);
386         SkDEBUGCODE(--fEndStencilResolveInstance);
387         return nullptr;
388     }
389 
390     const SkStrokeRec& stroke = shape.style().strokeRec();
391     if (!stroke.isFillStyle()) {
392         float r = SkStrokeRec::GetInflationRadius(
393                 stroke.getJoin(), stroke.getMiter(), stroke.getCap(), strokeDevWidth);
394         octoBounds->outset(r);
395     }
396 
397     GrScissorTest enableScissorInAtlas;
398     if (clipIBounds.contains(octoBounds->bounds())) {
399         enableScissorInAtlas = GrScissorTest::kDisabled;
400     } else if (octoBounds->clip(clipIBounds)) {
401         enableScissorInAtlas = GrScissorTest::kEnabled;
402     } else {
403         // The clip and octo bounds do not intersect. Draw nothing.
404         SkDEBUGCODE(--fEndPathInstance);
405         SkDEBUGCODE(--fEndStencilResolveInstance);
406         return nullptr;
407     }
408     octoBounds->roundOut(devIBounds);
409     SkASSERT(clipIBounds.contains(*devIBounds));
410 
411     this->placeRenderedPathInAtlas(*devIBounds, enableScissorInAtlas, devToAtlasOffset);
412 
413     GrFillRule fillRule;
414     if (stroke.isFillStyle()) {
415         SkASSERT(0 == strokeDevWidth);
416         fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), enableScissorInAtlas,
417                                      *devIBounds, *devToAtlasOffset);
418         fillRule = GrFillRuleForSkPath(path);
419     } else {
420         // Stroke-and-fill is not yet supported.
421         SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() || stroke.isHairlineStyle());
422         SkASSERT(!stroke.isHairlineStyle() || 1 == strokeDevWidth);
423         fStroker.parseDeviceSpaceStroke(
424                 path, fLocalDevPtsBuffer.begin(), stroke, strokeDevWidth, enableScissorInAtlas,
425                 *devIBounds, *devToAtlasOffset);
426         fillRule = GrFillRule::kNonzero;
427     }
428 
429     if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
430         this->recordStencilResolveInstance(*devIBounds, *devToAtlasOffset, fillRule);
431     }
432 
433     return &fRenderedAtlasStack.current();
434 }
435 
renderDeviceSpacePathInAtlas(const SkIRect & clipIBounds,const SkPath & devPath,const SkIRect & devPathIBounds,GrFillRule fillRule,SkIVector * devToAtlasOffset)436 const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
437         const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
438         GrFillRule fillRule, SkIVector* devToAtlasOffset) {
439     SkASSERT(this->isMapped());
440 
441     if (devPath.isEmpty()) {
442         SkDEBUGCODE(--fEndStencilResolveInstance);
443         return nullptr;
444     }
445 
446     GrScissorTest enableScissorInAtlas;
447     SkIRect clippedPathIBounds;
448     if (clipIBounds.contains(devPathIBounds)) {
449         clippedPathIBounds = devPathIBounds;
450         enableScissorInAtlas = GrScissorTest::kDisabled;
451     } else if (clippedPathIBounds.intersect(clipIBounds, devPathIBounds)) {
452         enableScissorInAtlas = GrScissorTest::kEnabled;
453     } else {
454         // The clip and path bounds do not intersect. Draw nothing.
455         SkDEBUGCODE(--fEndStencilResolveInstance);
456         return nullptr;
457     }
458 
459     this->placeRenderedPathInAtlas(clippedPathIBounds, enableScissorInAtlas, devToAtlasOffset);
460     fFiller.parseDeviceSpaceFill(devPath, SkPathPriv::PointData(devPath), enableScissorInAtlas,
461                                  clippedPathIBounds, *devToAtlasOffset);
462 
463     // In MSAA mode we also record an internal draw instance that will be used to resolve stencil
464     // winding values to coverage when the atlas is generated.
465     if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
466         this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule);
467     }
468 
469     return &fRenderedAtlasStack.current();
470 }
471 
placeRenderedPathInAtlas(const SkIRect & clippedPathIBounds,GrScissorTest scissorTest,SkIVector * devToAtlasOffset)472 void GrCCPerFlushResources::placeRenderedPathInAtlas(
473         const SkIRect& clippedPathIBounds, GrScissorTest scissorTest, SkIVector* devToAtlasOffset) {
474     if (GrCCAtlas* retiredAtlas =
475                 fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
476         // We did not fit in the previous coverage count atlas and it was retired. Close the path
477         // parser's current batch (which does not yet include the path we just parsed). We will
478         // render this batch into the retired atlas during finalize().
479         retiredAtlas->setFillBatchID(fFiller.closeCurrentBatch());
480         retiredAtlas->setStrokeBatchID(fStroker.closeCurrentBatch());
481         retiredAtlas->setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
482     }
483 }
484 
recordStencilResolveInstance(const SkIRect & clippedPathIBounds,const SkIVector & devToAtlasOffset,GrFillRule fillRule)485 void GrCCPerFlushResources::recordStencilResolveInstance(
486         const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule fillRule) {
487     SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType());
488     SkASSERT(fNextStencilResolveInstanceIdx < fEndStencilResolveInstance);
489 
490     SkIRect atlasIBounds = clippedPathIBounds.makeOffset(devToAtlasOffset);
491     if (GrFillRule::kEvenOdd == fillRule) {
492         // Make even/odd fills counterclockwise. The resolve draw uses two-sided stencil, with
493         // "nonzero" settings in front and "even/odd" settings in back.
494         std::swap(atlasIBounds.fLeft, atlasIBounds.fRight);
495     }
496     fStencilResolveInstanceData[fNextStencilResolveInstanceIdx++] = {
497             (int16_t)atlasIBounds.left(), (int16_t)atlasIBounds.top(),
498             (int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()};
499 }
500 
finalize(GrOnFlushResourceProvider * onFlushRP)501 bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
502     SkASSERT(this->isMapped());
503     SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
504     SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
505     SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
506              fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
507 
508     fInstanceBuffer->unmap();
509     fPathInstanceData = nullptr;
510 
511     if (fStencilResolveBuffer) {
512         fStencilResolveBuffer->unmap();
513         fStencilResolveInstanceData = nullptr;
514     }
515 
516     if (!fCopyAtlasStack.empty()) {
517         fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
518         fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
519     }
520     if (!fRenderedAtlasStack.empty()) {
521         fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
522         fRenderedAtlasStack.current().setStrokeBatchID(fStroker.closeCurrentBatch());
523         fRenderedAtlasStack.current().setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
524     }
525 
526     // Build the GPU buffers to render path coverage counts. (This must not happen until after the
527     // final calls to fFiller/fStroker.closeCurrentBatch().)
528     if (!fFiller.prepareToDraw(onFlushRP)) {
529         return false;
530     }
531     if (!fStroker.prepareToDraw(onFlushRP)) {
532         return false;
533     }
534 
535     // Draw the copies from coverage count or msaa atlas(es) into 8-bit cached atlas(es).
536     int copyRangeIdx = 0;
537     int baseCopyInstance = 0;
538     for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
539         int endCopyRange = atlas->getFillBatchID();
540         SkASSERT(endCopyRange > copyRangeIdx);
541 
542         auto rtc = atlas->makeRenderTargetContext(onFlushRP);
543         for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
544             const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
545             int endCopyInstance = baseCopyInstance + copyRange.fCount;
546             if (rtc) {
547                 auto op = CopyAtlasOp::Make(
548                         rtc->surfPriv().getContext(), sk_ref_sp(this), copyRange.fSrcProxy,
549                         baseCopyInstance, endCopyInstance, atlas->drawBounds());
550                 rtc->addDrawOp(GrNoClip(), std::move(op));
551             }
552             baseCopyInstance = endCopyInstance;
553         }
554     }
555     SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
556     SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
557     SkASSERT(baseCopyInstance == fEndCopyInstance);
558 
559     // Render the coverage count atlas(es).
560     int baseStencilResolveInstance = 0;
561     for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
562         // Copies will be finished by the time we get to rendering new atlases. See if we can
563         // recycle any previous invalidated atlas textures instead of creating new ones.
564         sk_sp<GrTexture> backingTexture;
565         for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
566             if (texture && atlas->currentHeight() == texture->height() &&
567                     atlas->currentWidth() == texture->width()) {
568                 backingTexture = skstd::exchange(texture, nullptr);
569                 break;
570             }
571         }
572 
573         if (auto rtc = atlas->makeRenderTargetContext(onFlushRP, std::move(backingTexture))) {
574             std::unique_ptr<GrDrawOp> op;
575             if (CoverageType::kA8_Multisample == fRenderedAtlasStack.coverageType()) {
576                 op = GrStencilAtlasOp::Make(
577                         rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
578                         atlas->getStrokeBatchID(), baseStencilResolveInstance,
579                         atlas->getEndStencilResolveInstance(), atlas->drawBounds());
580             } else if (onFlushRP->caps()->shaderCaps()->geometryShaderSupport()) {
581                 op = RenderAtlasOp<GrGSCoverageProcessor>::Make(
582                         rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
583                         atlas->getStrokeBatchID(), atlas->drawBounds());
584             } else {
585                 op = RenderAtlasOp<GrVSCoverageProcessor>::Make(
586                         rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
587                         atlas->getStrokeBatchID(), atlas->drawBounds());
588             }
589             rtc->addDrawOp(GrNoClip(), std::move(op));
590             if (rtc->proxy()->requiresManualMSAAResolve()) {
591                 onFlushRP->addTextureResolveTask(sk_ref_sp(rtc->proxy()->asTextureProxy()),
592                                                  GrSurfaceProxy::ResolveFlags::kMSAA);
593             }
594         }
595 
596         SkASSERT(atlas->getEndStencilResolveInstance() >= baseStencilResolveInstance);
597         baseStencilResolveInstance = atlas->getEndStencilResolveInstance();
598     }
599     SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
600              baseStencilResolveInstance == fEndStencilResolveInstance);
601 
602     return true;
603 }
604 
cancelCopies()605 void GrCCPerFlushResourceSpecs::cancelCopies() {
606     // Convert copies to cached draws.
607     fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
608     fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
609     fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
610     fCopyAtlasSpecs = GrCCAtlas::Specs();
611 }
612