1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ccpr/GrCCPerFlushResources.h"
9 
10 #include "include/private/GrRecordingContext.h"
11 #include "src/gpu/GrClip.h"
12 #include "src/gpu/GrMemoryPool.h"
13 #include "src/gpu/GrOnFlushResourceProvider.h"
14 #include "src/gpu/GrRecordingContextPriv.h"
15 #include "src/gpu/GrRenderTargetContext.h"
16 #include "src/gpu/GrSurfaceContextPriv.h"
17 #include "src/gpu/ccpr/GrCCPathCache.h"
18 #include "src/gpu/ccpr/GrGSCoverageProcessor.h"
19 #include "src/gpu/ccpr/GrSampleMaskProcessor.h"
20 #include "src/gpu/ccpr/GrVSCoverageProcessor.h"
21 #include "src/gpu/geometry/GrShape.h"
22 #include <algorithm>
23 
24 using CoverageType = GrCCAtlas::CoverageType;
25 using FillBatchID = GrCCFiller::BatchID;
26 using StrokeBatchID = GrCCStroker::BatchID;
27 using PathInstance = GrCCPathProcessor::Instance;
28 
29 static constexpr int kFillIdx = GrCCPerFlushResourceSpecs::kFillIdx;
30 static constexpr int kStrokeIdx = GrCCPerFlushResourceSpecs::kStrokeIdx;
31 
32 namespace {
33 
34 // Base class for an Op that renders a CCPR atlas.
35 class AtlasOp : public GrDrawOp {
36 public:
fixedFunctionFlags() const37     FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
finalize(const GrCaps &,const GrAppliedClip *,bool hasMixedSampledCoverage,GrClampType)38     GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
39                                       bool hasMixedSampledCoverage, GrClampType) override {
40         return GrProcessorSet::EmptySetAnalysis();
41     }
onCombineIfPossible(GrOp * other,GrRecordingContext::Arenas *,const GrCaps &)42     CombineResult onCombineIfPossible(GrOp* other, GrRecordingContext::Arenas*,
43                                       const GrCaps&) override {
44         // We will only make multiple copy ops if they have different source proxies.
45         // TODO: make use of texture chaining.
46         return CombineResult::kCannotCombine;
47     }
48 
49 protected:
AtlasOp(uint32_t classID,sk_sp<const GrCCPerFlushResources> resources,const SkISize & drawBounds)50     AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
51             const SkISize& drawBounds)
52             : GrDrawOp(classID)
53             , fResources(std::move(resources)) {
54         this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
55                         GrOp::HasAABloat::kNo, GrOp::IsHairline::kNo);
56     }
57 
58     const sk_sp<const GrCCPerFlushResources> fResources;
59 
60 private:
onPrePrepare(GrRecordingContext *,const GrSurfaceProxyView * outputView,GrAppliedClip *,const GrXferProcessor::DstProxyView &)61     void onPrePrepare(GrRecordingContext*,
62                       const GrSurfaceProxyView* outputView,
63                       GrAppliedClip*,
64                       const GrXferProcessor::DstProxyView&) final {}
onPrepare(GrOpFlushState *)65     void onPrepare(GrOpFlushState*) final {}
66 };
67 
68 // Copies paths from a cached coverage count or msaa atlas into an 8-bit literal-coverage atlas.
69 class CopyAtlasOp : public AtlasOp {
70 public:
71     DEFINE_OP_CLASS_ID
72 
Make(GrRecordingContext * context,sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> copyProxy,int baseInstance,int endInstance,const SkISize & drawBounds)73     static std::unique_ptr<GrDrawOp> Make(
74             GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
75             sk_sp<GrTextureProxy> copyProxy, int baseInstance, int endInstance,
76             const SkISize& drawBounds) {
77         GrOpMemoryPool* pool = context->priv().opMemoryPool();
78 
79         return pool->allocate<CopyAtlasOp>(std::move(resources), std::move(copyProxy), baseInstance,
80                                            endInstance, drawBounds);
81     }
82 
name() const83     const char* name() const override { return "CopyAtlasOp (CCPR)"; }
84 
visitProxies(const VisitProxyFunc & fn) const85     void visitProxies(const VisitProxyFunc& fn) const override {
86         fn(fSrcProxy.get(), GrMipMapped::kNo);
87     }
88 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)89     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
90         SkASSERT(fSrcProxy);
91         SkASSERT(fSrcProxy->isInstantiated());
92 
93         auto coverageMode = GrCCAtlas::CoverageTypeToPathCoverageMode(
94                 fResources->renderedPathCoverageType());
95         GrColorType ct = GrCCAtlas::CoverageTypeToColorType(fResources->renderedPathCoverageType());
96         GrSwizzle swizzle = flushState->caps().getReadSwizzle(fSrcProxy->backendFormat(), ct);
97         GrCCPathProcessor pathProc(coverageMode, fSrcProxy->peekTexture(), swizzle,
98                                    GrCCAtlas::kTextureOrigin);
99 
100         GrPipeline pipeline(GrScissorTest::kDisabled, SkBlendMode::kSrc,
101                             flushState->drawOpArgs().writeSwizzle());
102 
103         pathProc.drawPaths(flushState, pipeline, *fSrcProxy, *fResources, fBaseInstance,
104                            fEndInstance, this->bounds());
105     }
106 
107 private:
108     friend class ::GrOpMemoryPool; // for ctor
109 
CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> srcProxy,int baseInstance,int endInstance,const SkISize & drawBounds)110     CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
111                 int baseInstance, int endInstance, const SkISize& drawBounds)
112             : AtlasOp(ClassID(), std::move(resources), drawBounds)
113             , fSrcProxy(srcProxy)
114             , fBaseInstance(baseInstance)
115             , fEndInstance(endInstance) {
116     }
117     sk_sp<GrTextureProxy> fSrcProxy;
118     const int fBaseInstance;
119     const int fEndInstance;
120 };
121 
122 // Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
123 template<typename ProcessorType> class RenderAtlasOp : public AtlasOp {
124 public:
125     DEFINE_OP_CLASS_ID
126 
Make(GrRecordingContext * context,sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)127     static std::unique_ptr<GrDrawOp> Make(
128             GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
129             FillBatchID fillBatchID, StrokeBatchID strokeBatchID, const SkISize& drawBounds) {
130         GrOpMemoryPool* pool = context->priv().opMemoryPool();
131 
132         return pool->allocate<RenderAtlasOp>(
133                 std::move(resources), fillBatchID, strokeBatchID, drawBounds);
134     }
135 
136     // GrDrawOp interface.
name() const137     const char* name() const override { return "RenderAtlasOp (CCPR)"; }
138 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)139     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
140         ProcessorType proc;
141         GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus,
142                             flushState->drawOpArgs().writeSwizzle());
143         fResources->filler().drawFills(flushState, &proc, pipeline, fFillBatchID, fDrawBounds);
144         fResources->stroker().drawStrokes(flushState, &proc, fStrokeBatchID, fDrawBounds);
145     }
146 
147 private:
148     friend class ::GrOpMemoryPool; // for ctor
149 
RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)150     RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID,
151                   StrokeBatchID strokeBatchID, const SkISize& drawBounds)
152             : AtlasOp(ClassID(), std::move(resources), drawBounds)
153             , fFillBatchID(fillBatchID)
154             , fStrokeBatchID(strokeBatchID)
155             , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
156     }
157 
158     const FillBatchID fFillBatchID;
159     const StrokeBatchID fStrokeBatchID;
160     const SkIRect fDrawBounds;
161 };
162 
163 }
164 
inst_buffer_count(const GrCCPerFlushResourceSpecs & specs)165 static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
166     return specs.fNumCachedPaths +
167            // Copies get two instances per draw: 1 copy + 1 draw.
168            (specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) * 2 +
169            specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx];
170            // No clips in instance buffers.
171 }
172 
GrCCPerFlushResources(GrOnFlushResourceProvider * onFlushRP,CoverageType coverageType,const GrCCPerFlushResourceSpecs & specs)173 GrCCPerFlushResources::GrCCPerFlushResources(
174         GrOnFlushResourceProvider* onFlushRP, CoverageType coverageType,
175         const GrCCPerFlushResourceSpecs& specs)
176         // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
177         // (See transform_path_pts below.)
178         // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
179         : fLocalDevPtsBuffer(std::max(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath,
180                                     specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1)
181         , fFiller((CoverageType::kFP16_CoverageCount == coverageType)
182                           ? GrCCFiller::Algorithm::kCoverageCount
183                           : GrCCFiller::Algorithm::kStencilWindingCount,
184                   specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths,
185                   specs.fRenderedPathStats[kFillIdx].fNumTotalSkPoints,
186                   specs.fRenderedPathStats[kFillIdx].fNumTotalSkVerbs,
187                   specs.fRenderedPathStats[kFillIdx].fNumTotalConicWeights)
188         , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
189                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
190                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
191         , fCopyAtlasStack(CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
192                           onFlushRP->caps())
193         , fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps())
194         , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
195         , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
196         , fNextCopyInstanceIdx(0)
197         , fNextPathInstanceIdx(
198                 specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) {
199     if (!fIndexBuffer) {
200         SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
201         return;
202     }
203     if (!fVertexBuffer) {
204         SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
205         return;
206     }
207     fPathInstanceBuffer.resetAndMapBuffer(onFlushRP,
208                                           inst_buffer_count(specs) * sizeof(PathInstance));
209     if (!fPathInstanceBuffer.gpuBuffer()) {
210         SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
211         return;
212     }
213 
214     if (CoverageType::kA8_Multisample == coverageType) {
215         int numRenderedPaths =
216                 specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx] +
217                 specs.fNumClipPaths;
218         fStencilResolveBuffer.resetAndMapBuffer(
219                 onFlushRP, numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
220         if (!fStencilResolveBuffer.gpuBuffer()) {
221             SkDebugf("WARNING: failed to allocate CCPR stencil resolve buffer. "
222                      "No paths will be drawn.\n");
223             return;
224         }
225         SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
226     }
227 
228     SkDEBUGCODE(fEndCopyInstance =
229                         specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]);
230     SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
231 }
232 
upgradeEntryToLiteralCoverageAtlas(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPathCacheEntry * entry,GrFillRule fillRule)233 void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
234         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
235         GrFillRule fillRule) {
236     using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
237     SkASSERT(this->isMapped());
238     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
239 
240     const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
241     SkASSERT(cachedAtlas);
242     SkASSERT(cachedAtlas->getOnFlushProxy());
243 
244     if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
245         // This entry has already been upgraded to literal coverage. The path must have been drawn
246         // multiple times during the flush.
247         SkDEBUGCODE(--fEndCopyInstance);
248         return;
249     }
250 
251     SkIVector newAtlasOffset;
252     if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
253         // We did not fit in the previous copy atlas and it was retired. We will render the ranges
254         // up until fCopyPathRanges.count() into the retired atlas during finalize().
255         retiredAtlas->setFillBatchID(fCopyPathRanges.count());
256         fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
257     }
258 
259     this->recordCopyPathInstance(
260             *entry, newAtlasOffset, fillRule, sk_ref_sp(cachedAtlas->getOnFlushProxy()));
261 
262     sk_sp<GrTexture> previousAtlasTexture =
263             sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
264     GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
265     if (ReleaseAtlasResult::kDidInvalidateFromCache ==
266             entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
267         // This texture just got booted out of the cache. Keep it around, in case we might be able
268         // to recycle it for a new atlas. We can recycle it because copying happens before rendering
269         // new paths, and every path from the atlas that we're planning to use this flush will be
270         // copied to a new atlas. We'll never copy some and leave others.
271         fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
272     }
273 }
274 
recordCopyPathInstance(const GrCCPathCacheEntry & entry,const SkIVector & newAtlasOffset,GrFillRule fillRule,sk_sp<GrTextureProxy> srcProxy)275 void GrCCPerFlushResources::recordCopyPathInstance(
276         const GrCCPathCacheEntry& entry, const SkIVector& newAtlasOffset, GrFillRule fillRule,
277         sk_sp<GrTextureProxy> srcProxy) {
278     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
279 
280     // Write the instance at the back of the array.
281     int currentInstanceIdx = fNextCopyInstanceIdx++;
282     constexpr uint64_t kWhite = (((uint64_t) SK_Half1) <<  0) |
283                                 (((uint64_t) SK_Half1) << 16) |
284                                 (((uint64_t) SK_Half1) << 32) |
285                                 (((uint64_t) SK_Half1) << 48);
286     fPathInstanceBuffer[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, fillRule);
287 
288     // Percolate the instance forward until it's contiguous with other instances that share the same
289     // proxy.
290     for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
291         if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
292             ++fCopyPathRanges[i].fCount;
293             return;
294         }
295         int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
296         std::swap(fPathInstanceBuffer[rangeFirstInstanceIdx],
297                   fPathInstanceBuffer[currentInstanceIdx]);
298         currentInstanceIdx = rangeFirstInstanceIdx;
299     }
300 
301     // An instance with this particular proxy did not yet exist in the array. Add a range for it,
302     // first moving any later ranges back to make space for it at fCurrCopyAtlasRangesIdx.
303     fCopyPathRanges.push_back();
304     std::move_backward(fCopyPathRanges.begin() + fCurrCopyAtlasRangesIdx,
305                        fCopyPathRanges.end() - 1,
306                        fCopyPathRanges.end());
307     fCopyPathRanges[fCurrCopyAtlasRangesIdx] = {std::move(srcProxy), 1};
308 }
309 
transform_path_pts(const SkMatrix & m,const SkPath & path,const SkAutoSTArray<32,SkPoint> & outDevPts,GrOctoBounds * octoBounds)310 static bool transform_path_pts(
311         const SkMatrix& m, const SkPath& path, const SkAutoSTArray<32, SkPoint>& outDevPts,
312         GrOctoBounds* octoBounds) {
313     const SkPoint* pts = SkPathPriv::PointData(path);
314     int numPts = path.countPoints();
315     SkASSERT(numPts + 1 <= outDevPts.count());
316     SkASSERT(numPts);
317 
318     // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
319     // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
320     // transform is not necessary as long as the shader uses the correct inverse.
321     SkMatrix m45;
322     m45.setSinCos(1, 1);
323     m45.preConcat(m);
324 
325     // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
326     // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
327     //                                                          | 1  1 |
328     Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
329     Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
330     Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
331 
332     // Map the path's points to device space and accumulate bounding boxes.
333     Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
334     devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
335     Sk4f topLeft = devPt;
336     Sk4f bottomRight = devPt;
337 
338     // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
339     // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
340     // be at least one larger than the number of points.
341     devPt.store(&outDevPts[0]);
342 
343     for (int i = 1; i < numPts; ++i) {
344         devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
345         devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
346         topLeft = Sk4f::Min(topLeft, devPt);
347         bottomRight = Sk4f::Max(bottomRight, devPt);
348         devPt.store(&outDevPts[i]);
349     }
350 
351     if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) {
352         // The bounds are infinite or NaN.
353         return false;
354     }
355 
356     SkPoint topLeftPts[2], bottomRightPts[2];
357     topLeft.store(topLeftPts);
358     bottomRight.store(bottomRightPts);
359 
360     const SkRect& devBounds = SkRect::MakeLTRB(
361             topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(), bottomRightPts[0].y());
362     const SkRect& devBounds45 = SkRect::MakeLTRB(
363             topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(), bottomRightPts[1].y());
364 
365     octoBounds->set(devBounds, devBounds45);
366     return true;
367 }
368 
renderShapeInAtlas(const SkIRect & clipIBounds,const SkMatrix & m,const GrShape & shape,float strokeDevWidth,GrOctoBounds * octoBounds,SkIRect * devIBounds,SkIVector * devToAtlasOffset)369 GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
370         const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
371         GrOctoBounds* octoBounds, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
372     SkASSERT(this->isMapped());
373     SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
374 
375     SkPath path;
376     shape.asPath(&path);
377     if (path.isEmpty()) {
378         SkDEBUGCODE(--fEndPathInstance);
379         SkDEBUGCODE(--fEndStencilResolveInstance);
380         return nullptr;
381     }
382     if (!transform_path_pts(m, path, fLocalDevPtsBuffer, octoBounds)) {
383         // The transformed path had infinite or NaN bounds.
384         SkDEBUGCODE(--fEndPathInstance);
385         SkDEBUGCODE(--fEndStencilResolveInstance);
386         return nullptr;
387     }
388 
389     const SkStrokeRec& stroke = shape.style().strokeRec();
390     if (!stroke.isFillStyle()) {
391         float r = SkStrokeRec::GetInflationRadius(
392                 stroke.getJoin(), stroke.getMiter(), stroke.getCap(), strokeDevWidth);
393         octoBounds->outset(r);
394     }
395 
396     GrScissorTest enableScissorInAtlas;
397     if (clipIBounds.contains(octoBounds->bounds())) {
398         enableScissorInAtlas = GrScissorTest::kDisabled;
399     } else if (octoBounds->clip(clipIBounds)) {
400         enableScissorInAtlas = GrScissorTest::kEnabled;
401     } else {
402         // The clip and octo bounds do not intersect. Draw nothing.
403         SkDEBUGCODE(--fEndPathInstance);
404         SkDEBUGCODE(--fEndStencilResolveInstance);
405         return nullptr;
406     }
407     octoBounds->roundOut(devIBounds);
408     SkASSERT(clipIBounds.contains(*devIBounds));
409 
410     this->placeRenderedPathInAtlas(*devIBounds, enableScissorInAtlas, devToAtlasOffset);
411 
412     GrFillRule fillRule;
413     if (stroke.isFillStyle()) {
414         SkASSERT(0 == strokeDevWidth);
415         fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), enableScissorInAtlas,
416                                      *devIBounds, *devToAtlasOffset);
417         fillRule = GrFillRuleForSkPath(path);
418     } else {
419         // Stroke-and-fill is not yet supported.
420         SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() || stroke.isHairlineStyle());
421         SkASSERT(!stroke.isHairlineStyle() || 1 == strokeDevWidth);
422         fStroker.parseDeviceSpaceStroke(
423                 path, fLocalDevPtsBuffer.begin(), stroke, strokeDevWidth, enableScissorInAtlas,
424                 *devIBounds, *devToAtlasOffset);
425         fillRule = GrFillRule::kNonzero;
426     }
427 
428     if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
429         this->recordStencilResolveInstance(*devIBounds, *devToAtlasOffset, fillRule);
430     }
431 
432     return &fRenderedAtlasStack.current();
433 }
434 
renderDeviceSpacePathInAtlas(const SkIRect & clipIBounds,const SkPath & devPath,const SkIRect & devPathIBounds,GrFillRule fillRule,SkIVector * devToAtlasOffset)435 const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
436         const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
437         GrFillRule fillRule, SkIVector* devToAtlasOffset) {
438     SkASSERT(this->isMapped());
439 
440     if (devPath.isEmpty()) {
441         SkDEBUGCODE(--fEndStencilResolveInstance);
442         return nullptr;
443     }
444 
445     GrScissorTest enableScissorInAtlas;
446     SkIRect clippedPathIBounds;
447     if (clipIBounds.contains(devPathIBounds)) {
448         clippedPathIBounds = devPathIBounds;
449         enableScissorInAtlas = GrScissorTest::kDisabled;
450     } else if (clippedPathIBounds.intersect(clipIBounds, devPathIBounds)) {
451         enableScissorInAtlas = GrScissorTest::kEnabled;
452     } else {
453         // The clip and path bounds do not intersect. Draw nothing.
454         SkDEBUGCODE(--fEndStencilResolveInstance);
455         return nullptr;
456     }
457 
458     this->placeRenderedPathInAtlas(clippedPathIBounds, enableScissorInAtlas, devToAtlasOffset);
459     fFiller.parseDeviceSpaceFill(devPath, SkPathPriv::PointData(devPath), enableScissorInAtlas,
460                                  clippedPathIBounds, *devToAtlasOffset);
461 
462     // In MSAA mode we also record an internal draw instance that will be used to resolve stencil
463     // winding values to coverage when the atlas is generated.
464     if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
465         this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule);
466     }
467 
468     return &fRenderedAtlasStack.current();
469 }
470 
placeRenderedPathInAtlas(const SkIRect & clippedPathIBounds,GrScissorTest scissorTest,SkIVector * devToAtlasOffset)471 void GrCCPerFlushResources::placeRenderedPathInAtlas(
472         const SkIRect& clippedPathIBounds, GrScissorTest scissorTest, SkIVector* devToAtlasOffset) {
473     if (GrCCAtlas* retiredAtlas =
474                 fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
475         // We did not fit in the previous coverage count atlas and it was retired. Close the path
476         // parser's current batch (which does not yet include the path we just parsed). We will
477         // render this batch into the retired atlas during finalize().
478         retiredAtlas->setFillBatchID(fFiller.closeCurrentBatch());
479         retiredAtlas->setStrokeBatchID(fStroker.closeCurrentBatch());
480         retiredAtlas->setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
481     }
482 }
483 
recordStencilResolveInstance(const SkIRect & clippedPathIBounds,const SkIVector & devToAtlasOffset,GrFillRule fillRule)484 void GrCCPerFlushResources::recordStencilResolveInstance(
485         const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule fillRule) {
486     SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType());
487     SkASSERT(fNextStencilResolveInstanceIdx < fEndStencilResolveInstance);
488 
489     SkIRect atlasIBounds = clippedPathIBounds.makeOffset(devToAtlasOffset);
490     if (GrFillRule::kEvenOdd == fillRule) {
491         // Make even/odd fills counterclockwise. The resolve draw uses two-sided stencil, with
492         // "nonzero" settings in front and "even/odd" settings in back.
493         std::swap(atlasIBounds.fLeft, atlasIBounds.fRight);
494     }
495     fStencilResolveBuffer[fNextStencilResolveInstanceIdx++] = {
496             (int16_t)atlasIBounds.left(), (int16_t)atlasIBounds.top(),
497             (int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()};
498 }
499 
finalize(GrOnFlushResourceProvider * onFlushRP)500 bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
501     SkASSERT(this->isMapped());
502     SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
503     SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
504     SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
505              fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
506 
507     fPathInstanceBuffer.unmapBuffer();
508 
509     if (fStencilResolveBuffer.gpuBuffer()) {
510         fStencilResolveBuffer.unmapBuffer();
511     }
512 
513     if (!fCopyAtlasStack.empty()) {
514         fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
515         fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
516     }
517     if (!fRenderedAtlasStack.empty()) {
518         fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
519         fRenderedAtlasStack.current().setStrokeBatchID(fStroker.closeCurrentBatch());
520         fRenderedAtlasStack.current().setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
521     }
522 
523     // Build the GPU buffers to render path coverage counts. (This must not happen until after the
524     // final calls to fFiller/fStroker.closeCurrentBatch().)
525     if (!fFiller.prepareToDraw(onFlushRP)) {
526         return false;
527     }
528     if (!fStroker.prepareToDraw(onFlushRP)) {
529         return false;
530     }
531 
532     // Draw the copies from coverage count or msaa atlas(es) into 8-bit cached atlas(es).
533     int copyRangeIdx = 0;
534     int baseCopyInstance = 0;
535     for (GrCCAtlas& atlas : fCopyAtlasStack.atlases()) {
536         int endCopyRange = atlas.getFillBatchID();
537         SkASSERT(endCopyRange > copyRangeIdx);
538 
539         auto rtc = atlas.instantiate(onFlushRP);
540         for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
541             const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
542             int endCopyInstance = baseCopyInstance + copyRange.fCount;
543             if (rtc) {
544                 auto op = CopyAtlasOp::Make(
545                         rtc->surfPriv().getContext(), sk_ref_sp(this), copyRange.fSrcProxy,
546                         baseCopyInstance, endCopyInstance, atlas.drawBounds());
547                 rtc->addDrawOp(GrNoClip(), std::move(op));
548             }
549             baseCopyInstance = endCopyInstance;
550         }
551     }
552     SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
553     SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
554     SkASSERT(baseCopyInstance == fEndCopyInstance);
555 
556     // Render the coverage count atlas(es).
557     int baseStencilResolveInstance = 0;
558     for (GrCCAtlas& atlas : fRenderedAtlasStack.atlases()) {
559         // Copies will be finished by the time we get to rendering new atlases. See if we can
560         // recycle any previous invalidated atlas textures instead of creating new ones.
561         sk_sp<GrTexture> backingTexture;
562         for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
563             if (texture && atlas.currentHeight() == texture->height() &&
564                     atlas.currentWidth() == texture->width()) {
565                 backingTexture = skstd::exchange(texture, nullptr);
566                 break;
567             }
568         }
569 
570         if (auto rtc = atlas.instantiate(onFlushRP, std::move(backingTexture))) {
571             std::unique_ptr<GrDrawOp> op;
572             if (CoverageType::kA8_Multisample == fRenderedAtlasStack.coverageType()) {
573                 op = GrStencilAtlasOp::Make(
574                         rtc->surfPriv().getContext(), sk_ref_sp(this), atlas.getFillBatchID(),
575                         atlas.getStrokeBatchID(), baseStencilResolveInstance,
576                         atlas.getEndStencilResolveInstance(), atlas.drawBounds());
577             } else if (onFlushRP->caps()->shaderCaps()->geometryShaderSupport()) {
578                 op = RenderAtlasOp<GrGSCoverageProcessor>::Make(
579                         rtc->surfPriv().getContext(), sk_ref_sp(this), atlas.getFillBatchID(),
580                         atlas.getStrokeBatchID(), atlas.drawBounds());
581             } else {
582                 op = RenderAtlasOp<GrVSCoverageProcessor>::Make(
583                         rtc->surfPriv().getContext(), sk_ref_sp(this), atlas.getFillBatchID(),
584                         atlas.getStrokeBatchID(), atlas.drawBounds());
585             }
586             rtc->addDrawOp(GrNoClip(), std::move(op));
587             if (rtc->asSurfaceProxy()->requiresManualMSAAResolve()) {
588                 onFlushRP->addTextureResolveTask(sk_ref_sp(rtc->asTextureProxy()),
589                                                  GrSurfaceProxy::ResolveFlags::kMSAA);
590             }
591         }
592 
593         SkASSERT(atlas.getEndStencilResolveInstance() >= baseStencilResolveInstance);
594         baseStencilResolveInstance = atlas.getEndStencilResolveInstance();
595     }
596     SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
597              baseStencilResolveInstance == fEndStencilResolveInstance);
598 
599     return true;
600 }
601 
cancelCopies()602 void GrCCPerFlushResourceSpecs::cancelCopies() {
603     // Convert copies to cached draws.
604     fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
605     fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
606     fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
607     fCopyAtlasSpecs = GrCCAtlas::Specs();
608 }
609