1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/core/SkExecutor.h"
9 #include "include/gpu/GrContextOptions.h"
10 #include "tools/flags/CommonFlags.h"
11 
12 DEFINE_int(gpuThreads,
13              2,
14              "Create this many extra threads to assist with GPU work, "
15              "including software path rendering. Defaults to two.");
16 
17 static DEFINE_bool(cachePathMasks, true,
18                    "Allows path mask textures to be cached in GPU configs.");
19 
20 static DEFINE_bool(noGS, false, "Disables support for geometry shaders.");
21 
22 static DEFINE_bool(cc, false, "Allow coverage counting shortcuts to render paths?");
23 
24 static DEFINE_string(pr, "",
25               "Set of enabled gpu path renderers. Defined as a list of: "
26               "[~]none [~]dashline [~]tess [~]nvpr [~]ccpr [~]aahairline [~]aaconvex "
27               "[~]aalinearizing [~]small [~]tri] [~]all");
28 
29 static DEFINE_int(internalSamples, 4,
30                   "Number of samples for internal draws that use MSAA or mixed samples.");
31 
32 static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
33                    "Disables all GPU driver correctness workarounds");
34 
35 static DEFINE_bool(reduceOpsTaskSplitting, false, "Improve opsTask sorting");
36 static DEFINE_bool(dontReduceOpsTaskSplitting, false, "Allow more opsTask splitting");
37 
get_named_pathrenderers_flags(const char * name)38 static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
39     if (!strcmp(name, "none")) {
40         return GpuPathRenderers::kNone;
41     } else if (!strcmp(name, "dashline")) {
42         return GpuPathRenderers::kDashLine;
43     } else if (!strcmp(name, "tess")) {
44         return GpuPathRenderers::kTessellation;
45     } else if (!strcmp(name, "nvpr")) {
46         return GpuPathRenderers::kStencilAndCover;
47     } else if (!strcmp(name, "ccpr")) {
48         return GpuPathRenderers::kCoverageCounting;
49     } else if (!strcmp(name, "aahairline")) {
50         return GpuPathRenderers::kAAHairline;
51     } else if (!strcmp(name, "aaconvex")) {
52         return GpuPathRenderers::kAAConvex;
53     } else if (!strcmp(name, "aalinearizing")) {
54         return GpuPathRenderers::kAALinearizing;
55     } else if (!strcmp(name, "small")) {
56         return GpuPathRenderers::kSmall;
57     } else if (!strcmp(name, "tri")) {
58         return GpuPathRenderers::kTriangulating;
59     } else if (!strcmp(name, "default")) {
60         return GpuPathRenderers::kDefault;
61     }
62     SK_ABORT(SkStringPrintf("error: unknown named path renderer \"%s\"\n", name).c_str());
63 }
64 
collect_gpu_path_renderers_from_flags()65 static GpuPathRenderers collect_gpu_path_renderers_from_flags() {
66     if (FLAGS_pr.isEmpty()) {
67         return GpuPathRenderers::kDefault;
68     }
69 
70     GpuPathRenderers gpuPathRenderers = ('~' == FLAGS_pr[0][0])
71             ? GpuPathRenderers::kDefault
72             : GpuPathRenderers::kNone;
73 
74     for (int i = 0; i < FLAGS_pr.count(); ++i) {
75         const char* name = FLAGS_pr[i];
76         if (name[0] == '~') {
77             gpuPathRenderers &= ~get_named_pathrenderers_flags(&name[1]);
78         } else {
79             gpuPathRenderers |= get_named_pathrenderers_flags(name);
80         }
81     }
82     return gpuPathRenderers;
83 }
84 
SetCtxOptionsFromCommonFlags(GrContextOptions * ctxOptions)85 void SetCtxOptionsFromCommonFlags(GrContextOptions* ctxOptions) {
86     static std::unique_ptr<SkExecutor> gGpuExecutor = (0 != FLAGS_gpuThreads)
87         ? SkExecutor::MakeFIFOThreadPool(FLAGS_gpuThreads)
88         : nullptr;
89 
90     ctxOptions->fExecutor                            = gGpuExecutor.get();
91     ctxOptions->fDisableCoverageCountingPaths        = !FLAGS_cc;
92     ctxOptions->fAllowPathMaskCaching                = FLAGS_cachePathMasks;
93     ctxOptions->fSuppressGeometryShaders             = FLAGS_noGS;
94     ctxOptions->fGpuPathRenderers                    = collect_gpu_path_renderers_from_flags();
95     ctxOptions->fInternalMultisampleCount            = FLAGS_internalSamples;
96     ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
97 
98     if (FLAGS_reduceOpsTaskSplitting) {
99         SkASSERT(!FLAGS_dontReduceOpsTaskSplitting);
100         ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kYes;
101     } else if (FLAGS_dontReduceOpsTaskSplitting) {
102         ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kNo;
103     }
104 }
105