1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <ctype.h>
9
10 #include "bench/nanobench.h"
11
12 #include "bench/AndroidCodecBench.h"
13 #include "bench/Benchmark.h"
14 #include "bench/CodecBench.h"
15 #include "bench/CodecBenchPriv.h"
16 #include "bench/GMBench.h"
17 #include "bench/RecordingBench.h"
18 #include "bench/ResultsWriter.h"
19 #include "bench/SKPAnimationBench.h"
20 #include "bench/SKPBench.h"
21 #include "bench/SkGlyphCacheBench.h"
22 #include "bench/SkSLBench.h"
23 #include "include/codec/SkAndroidCodec.h"
24 #include "include/codec/SkCodec.h"
25 #include "include/core/SkCanvas.h"
26 #include "include/core/SkData.h"
27 #include "include/core/SkGraphics.h"
28 #include "include/core/SkPictureRecorder.h"
29 #include "include/core/SkString.h"
30 #include "include/core/SkSurface.h"
31 #include "include/core/SkTime.h"
32 #include "src/core/SkAutoMalloc.h"
33 #include "src/core/SkColorSpacePriv.h"
34 #include "src/core/SkLeanWindows.h"
35 #include "src/core/SkOSFile.h"
36 #include "src/core/SkTaskGroup.h"
37 #include "src/core/SkTraceEvent.h"
38 #include "src/utils/SkJSONWriter.h"
39 #include "src/utils/SkOSPath.h"
40 #include "tools/AutoreleasePool.h"
41 #include "tools/CrashHandler.h"
42 #include "tools/ProcStats.h"
43 #include "tools/Stats.h"
44 #include "tools/flags/CommonFlags.h"
45 #include "tools/flags/CommonFlagsConfig.h"
46 #include "tools/ios_utils.h"
47 #include "tools/trace/EventTracingPriv.h"
48 #include "tools/trace/SkDebugfTracer.h"
49
50 #ifdef SK_XML
51 #include "modules/svg/include/SkSVGDOM.h"
52 #endif // SK_XML
53
54 #ifdef SK_ENABLE_ANDROID_UTILS
55 #include "bench/BitmapRegionDecoderBench.h"
56 #include "client_utils/android/BitmapRegionDecoder.h"
57 #endif
58
59 #include <cinttypes>
60 #include <stdlib.h>
61 #include <memory>
62 #include <thread>
63
64 extern bool gSkForceRasterPipelineBlitter;
65 extern bool gUseSkVMBlitter;
66 extern bool gSkVMAllowJIT;
67 extern bool gSkVMJITViaDylib;
68
69 #ifndef SK_BUILD_FOR_WIN
70 #include <unistd.h>
71
72 #endif
73
74 #include "include/gpu/GrDirectContext.h"
75 #include "src/gpu/GrCaps.h"
76 #include "src/gpu/GrDirectContextPriv.h"
77 #include "src/gpu/SkGr.h"
78 #include "src/gpu/gl/GrGLDefines.h"
79 #include "src/gpu/gl/GrGLGpu.h"
80 #include "src/gpu/gl/GrGLUtil.h"
81 #include "tools/gpu/GrContextFactory.h"
82
83 using sk_gpu_test::ContextInfo;
84 using sk_gpu_test::GrContextFactory;
85 using sk_gpu_test::TestContext;
86
87 GrContextOptions grContextOpts;
88
89 static const int kAutoTuneLoops = 0;
90
loops_help_txt()91 static SkString loops_help_txt() {
92 SkString help;
93 help.printf("Number of times to run each bench. Set this to %d to auto-"
94 "tune for each bench. Timings are only reported when auto-tuning.",
95 kAutoTuneLoops);
96 return help;
97 }
98
to_string(int n)99 static SkString to_string(int n) {
100 SkString str;
101 str.appendS32(n);
102 return str;
103 }
104
105 static DEFINE_int(loops, kAutoTuneLoops, loops_help_txt().c_str());
106
107 static DEFINE_int(samples, 10, "Number of samples to measure for each bench.");
108 static DEFINE_int(ms, 0, "If >0, run each bench for this many ms instead of obeying --samples.");
109 static DEFINE_int(overheadLoops, 100000, "Loops to estimate timer overhead.");
110 static DEFINE_double(overheadGoal, 0.0001,
111 "Loop until timer overhead is at most this fraction of our measurments.");
112 static DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
113 static DEFINE_int(gpuFrameLag, 5,
114 "If unknown, estimated maximum number of frames GPU allows to lag.");
115
116 static DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
117 static DEFINE_int(maxCalibrationAttempts, 3,
118 "Try up to this many times to guess loops for a bench, or skip the bench.");
119 static DEFINE_int(maxLoops, 1000000, "Never run a bench more times than this.");
120 static DEFINE_string(clip, "0,0,1000,1000", "Clip for SKPs.");
121 static DEFINE_string(scales, "1.0", "Space-separated scales for SKPs.");
122 static DEFINE_string(zoom, "1.0,0",
123 "Comma-separated zoomMax,zoomPeriodMs factors for a periodic SKP zoom "
124 "function that ping-pongs between 1.0 and zoomMax.");
125 static DEFINE_bool(bbh, true, "Build a BBH for SKPs?");
126 static DEFINE_bool(loopSKP, true, "Loop SKPs like we do for micro benches?");
127 static DEFINE_int(flushEvery, 10, "Flush --outResultsFile every Nth run.");
128 static DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
129 static DEFINE_bool(gpuStatsDump, false, "Dump GPU states after each benchmark to json");
130 static DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
131 static DEFINE_bool(csv, false, "Print status in CSV format");
132 static DEFINE_string(sourceType, "",
133 "Apply usual --match rules to source type: bench, gm, skp, image, etc.");
134 static DEFINE_string(benchType, "",
135 "Apply usual --match rules to bench type: micro, recording, "
136 "piping, playback, skcodec, etc.");
137
138 static DEFINE_bool(forceRasterPipeline, false, "sets gSkForceRasterPipelineBlitter");
139 static DEFINE_bool(skvm, false, "sets gUseSkVMBlitter");
140 static DEFINE_bool(jit, true, "sets gSkVMAllowJIT and gSkVMJITViaDylib");
141
142 static DEFINE_bool2(pre_log, p, false,
143 "Log before running each test. May be incomprehensible when threading");
144
145 static DEFINE_bool(cpu, true, "Run CPU-bound work?");
146 static DEFINE_bool(gpu, true, "Run GPU-bound work?");
147 static DEFINE_bool(dryRun, false,
148 "just print the tests that would be run, without actually running them.");
149 static DEFINE_string(images, "",
150 "List of images and/or directories to decode. A directory with no images"
151 " is treated as a fatal error.");
152 static DEFINE_bool(simpleCodec, false,
153 "Runs of a subset of the codec tests, always N32, Premul or Opaque");
154
155 static DEFINE_string2(match, m, nullptr,
156 "[~][^]substring[$] [...] of name to run.\n"
157 "Multiple matches may be separated by spaces.\n"
158 "~ causes a matching name to always be skipped\n"
159 "^ requires the start of the name to match\n"
160 "$ requires the end of the name to match\n"
161 "^ and $ requires an exact match\n"
162 "If a name does not match any list entry,\n"
163 "it is skipped unless some list entry starts with ~");
164
165 static DEFINE_bool2(quiet, q, false, "if true, don't print status updates.");
166 static DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
167
168
169 static DEFINE_string(skps, "skps", "Directory to read skps from.");
170 static DEFINE_string(svgs, "", "Directory to read SVGs from, or a single SVG file.");
171 static DEFINE_string(texttraces, "", "Directory to read TextBlobTrace files from.");
172
173 static DEFINE_int_2(threads, j, -1,
174 "Run threadsafe tests on a threadpool with this many extra threads, "
175 "defaulting to one extra thread per core.");
176
177 static DEFINE_string2(writePath, w, "", "If set, write bitmaps here as .pngs.");
178
179 static DEFINE_string(key, "",
180 "Space-separated key/value pairs to add to JSON identifying this builder.");
181 static DEFINE_string(properties, "",
182 "Space-separated key/value pairs to add to JSON identifying this run.");
183
184 static DEFINE_bool(purgeBetweenBenches, false,
185 "Call SkGraphics::PurgeAllCaches() between each benchmark?");
186
now_ms()187 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
188
humanize(double ms)189 static SkString humanize(double ms) {
190 if (FLAGS_verbose) return SkStringPrintf("%" PRIu64, (uint64_t)(ms*1e6));
191 return HumanizeMs(ms);
192 }
193 #define HUMANIZE(ms) humanize(ms).c_str()
194
init(SkImageInfo info,Benchmark * bench)195 bool Target::init(SkImageInfo info, Benchmark* bench) {
196 if (Benchmark::kRaster_Backend == config.backend) {
197 this->surface = SkSurface::MakeRaster(info);
198 if (!this->surface) {
199 return false;
200 }
201 }
202 return true;
203 }
capturePixels(SkBitmap * bmp)204 bool Target::capturePixels(SkBitmap* bmp) {
205 SkCanvas* canvas = this->getCanvas();
206 if (!canvas) {
207 return false;
208 }
209 bmp->allocPixels(canvas->imageInfo());
210 if (!canvas->readPixels(*bmp, 0, 0)) {
211 SkDebugf("Can't read canvas pixels.\n");
212 return false;
213 }
214 return true;
215 }
216
217 struct GPUTarget : public Target {
GPUTargetGPUTarget218 explicit GPUTarget(const Config& c) : Target(c) {}
219 ContextInfo contextInfo;
220 std::unique_ptr<GrContextFactory> factory;
221
~GPUTargetGPUTarget222 ~GPUTarget() override {
223 // For Vulkan we need to release all our refs to the GrContext before destroy the vulkan
224 // context which happens at the end of this destructor. Thus we need to release the surface
225 // here which holds a ref to the GrContext.
226 surface.reset();
227 }
228
setupGPUTarget229 void setup() override {
230 this->contextInfo.testContext()->makeCurrent();
231 // Make sure we're done with whatever came before.
232 this->contextInfo.testContext()->finish();
233 }
endTimingGPUTarget234 void endTiming() override {
235 if (this->contextInfo.testContext()) {
236 this->contextInfo.testContext()->flushAndWaitOnSync(contextInfo.directContext());
237 }
238 }
fenceGPUTarget239 void fence() override { this->contextInfo.testContext()->finish(); }
240
needsFrameTimingGPUTarget241 bool needsFrameTiming(int* maxFrameLag) const override {
242 if (!this->contextInfo.testContext()->getMaxGpuFrameLag(maxFrameLag)) {
243 // Frame lag is unknown.
244 *maxFrameLag = FLAGS_gpuFrameLag;
245 }
246 return true;
247 }
initGPUTarget248 bool init(SkImageInfo info, Benchmark* bench) override {
249 GrContextOptions options = grContextOpts;
250 bench->modifyGrContextOptions(&options);
251 this->factory = std::make_unique<GrContextFactory>(options);
252 uint32_t flags = this->config.useDFText ? SkSurfaceProps::kUseDeviceIndependentFonts_Flag :
253 0;
254 SkSurfaceProps props(flags, kRGB_H_SkPixelGeometry);
255 this->surface = SkSurface::MakeRenderTarget(
256 this->factory->get(this->config.ctxType, this->config.ctxOverrides),
257 SkBudgeted::kNo, info, this->config.samples, &props);
258 this->contextInfo =
259 this->factory->getContextInfo(this->config.ctxType, this->config.ctxOverrides);
260 if (!this->surface) {
261 return false;
262 }
263 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
264 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
265 "Timings might not be accurate.\n", this->config.name.c_str());
266 }
267 return true;
268 }
fillOptionsGPUTarget269 void fillOptions(NanoJSONResultsWriter& log) override {
270 #ifdef SK_GL
271 const GrGLubyte* version;
272 if (this->contextInfo.backend() == GrBackendApi::kOpenGL) {
273 const GrGLInterface* gl =
274 static_cast<GrGLGpu*>(this->contextInfo.directContext()->priv().getGpu())
275 ->glInterface();
276 GR_GL_CALL_RET(gl, version, GetString(GR_GL_VERSION));
277 log.appendString("GL_VERSION", (const char*)(version));
278
279 GR_GL_CALL_RET(gl, version, GetString(GR_GL_RENDERER));
280 log.appendString("GL_RENDERER", (const char*) version);
281
282 GR_GL_CALL_RET(gl, version, GetString(GR_GL_VENDOR));
283 log.appendString("GL_VENDOR", (const char*) version);
284
285 GR_GL_CALL_RET(gl, version, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
286 log.appendString("GL_SHADING_LANGUAGE_VERSION", (const char*) version);
287 }
288 #endif
289 }
290
dumpStatsGPUTarget291 void dumpStats() override {
292 auto context = this->contextInfo.directContext();
293
294 context->priv().printCacheStats();
295 context->priv().printGpuStats();
296 context->priv().printContextStats();
297 }
298 };
299
time(int loops,Benchmark * bench,Target * target)300 static double time(int loops, Benchmark* bench, Target* target) {
301 SkCanvas* canvas = target->getCanvas();
302 if (canvas) {
303 canvas->clear(SK_ColorWHITE);
304 }
305 bench->preDraw(canvas);
306 double start = now_ms();
307 canvas = target->beginTiming(canvas);
308 bench->draw(loops, canvas);
309 target->endTiming();
310 double elapsed = now_ms() - start;
311 bench->postDraw(canvas);
312 return elapsed;
313 }
314
estimate_timer_overhead()315 static double estimate_timer_overhead() {
316 double overhead = 0;
317 for (int i = 0; i < FLAGS_overheadLoops; i++) {
318 double start = now_ms();
319 overhead += now_ms() - start;
320 }
321 return overhead / FLAGS_overheadLoops;
322 }
323
detect_forever_loops(int loops)324 static int detect_forever_loops(int loops) {
325 // look for a magic run-forever value
326 if (loops < 0) {
327 loops = SK_MaxS32;
328 }
329 return loops;
330 }
331
clamp_loops(int loops)332 static int clamp_loops(int loops) {
333 if (loops < 1) {
334 SkDebugf("ERROR: clamping loops from %d to 1. "
335 "There's probably something wrong with the bench.\n", loops);
336 return 1;
337 }
338 if (loops > FLAGS_maxLoops) {
339 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
340 return FLAGS_maxLoops;
341 }
342 return loops;
343 }
344
write_canvas_png(Target * target,const SkString & filename)345 static bool write_canvas_png(Target* target, const SkString& filename) {
346
347 if (filename.isEmpty()) {
348 return false;
349 }
350 if (target->getCanvas() &&
351 kUnknown_SkColorType == target->getCanvas()->imageInfo().colorType()) {
352 return false;
353 }
354
355 SkBitmap bmp;
356
357 if (!target->capturePixels(&bmp)) {
358 return false;
359 }
360
361 SkString dir = SkOSPath::Dirname(filename.c_str());
362 if (!sk_mkdir(dir.c_str())) {
363 SkDebugf("Can't make dir %s.\n", dir.c_str());
364 return false;
365 }
366 SkFILEWStream stream(filename.c_str());
367 if (!stream.isValid()) {
368 SkDebugf("Can't write %s.\n", filename.c_str());
369 return false;
370 }
371 if (!SkEncodeImage(&stream, bmp, SkEncodedImageFormat::kPNG, 100)) {
372 SkDebugf("Can't encode a PNG.\n");
373 return false;
374 }
375 return true;
376 }
377
378 static int kFailedLoops = -2;
setup_cpu_bench(const double overhead,Target * target,Benchmark * bench)379 static int setup_cpu_bench(const double overhead, Target* target, Benchmark* bench) {
380 // First figure out approximately how many loops of bench it takes to make overhead negligible.
381 double bench_plus_overhead = 0.0;
382 int round = 0;
383 int loops = bench->calculateLoops(FLAGS_loops);
384 if (kAutoTuneLoops == loops) {
385 while (bench_plus_overhead < overhead) {
386 if (round++ == FLAGS_maxCalibrationAttempts) {
387 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
388 bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
389 return kFailedLoops;
390 }
391 bench_plus_overhead = time(1, bench, target);
392 }
393 }
394
395 // Later we'll just start and stop the timer once but loop N times.
396 // We'll pick N to make timer overhead negligible:
397 //
398 // overhead
399 // ------------------------- < FLAGS_overheadGoal
400 // overhead + N * Bench Time
401 //
402 // where bench_plus_overhead ~=~ overhead + Bench Time.
403 //
404 // Doing some math, we get:
405 //
406 // (overhead / FLAGS_overheadGoal) - overhead
407 // ------------------------------------------ < N
408 // bench_plus_overhead - overhead)
409 //
410 // Luckily, this also works well in practice. :)
411 if (kAutoTuneLoops == loops) {
412 const double numer = overhead / FLAGS_overheadGoal - overhead;
413 const double denom = bench_plus_overhead - overhead;
414 loops = (int)ceil(numer / denom);
415 loops = clamp_loops(loops);
416 } else {
417 loops = detect_forever_loops(loops);
418 }
419
420 return loops;
421 }
422
setup_gpu_bench(Target * target,Benchmark * bench,int maxGpuFrameLag)423 static int setup_gpu_bench(Target* target, Benchmark* bench, int maxGpuFrameLag) {
424 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
425 int loops = bench->calculateLoops(FLAGS_loops);
426 if (kAutoTuneLoops == loops) {
427 loops = 1;
428 double elapsed = 0;
429 do {
430 if (1<<30 == loops) {
431 // We're about to wrap. Something's wrong with the bench.
432 loops = 0;
433 break;
434 }
435 loops *= 2;
436 // If the GPU lets frames lag at all, we need to make sure we're timing
437 // _this_ round, not still timing last round.
438 for (int i = 0; i < maxGpuFrameLag; i++) {
439 elapsed = time(loops, bench, target);
440 }
441 } while (elapsed < FLAGS_gpuMs);
442
443 // We've overshot at least a little. Scale back linearly.
444 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
445 loops = clamp_loops(loops);
446
447 // Make sure we're not still timing our calibration.
448 target->fence();
449 } else {
450 loops = detect_forever_loops(loops);
451 }
452 // Pretty much the same deal as the calibration: do some warmup to make
453 // sure we're timing steady-state pipelined frames.
454 for (int i = 0; i < maxGpuFrameLag; i++) {
455 time(loops, bench, target);
456 }
457
458 return loops;
459 }
460
461 #define kBogusContextType GrContextFactory::kGL_ContextType
462 #define kBogusContextOverrides GrContextFactory::ContextOverrides::kNone
463
create_config(const SkCommandLineConfig * config,SkTArray<Config> * configs)464 static void create_config(const SkCommandLineConfig* config, SkTArray<Config>* configs) {
465 if (const auto* gpuConfig = config->asConfigGpu()) {
466 if (!FLAGS_gpu) {
467 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
468 return;
469 }
470
471 const auto ctxType = gpuConfig->getContextType();
472 const auto ctxOverrides = gpuConfig->getContextOverrides();
473 const auto sampleCount = gpuConfig->getSamples();
474 const auto colorType = gpuConfig->getColorType();
475 auto colorSpace = gpuConfig->getColorSpace();
476 if (gpuConfig->getSurfType() != SkCommandLineConfigGpu::SurfType::kDefault) {
477 SkDebugf("This tool only supports the default surface type.");
478 return;
479 }
480
481 GrContextFactory factory(grContextOpts);
482 if (const auto ctx = factory.get(ctxType, ctxOverrides)) {
483 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
484 int supportedSampleCount =
485 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
486 if (sampleCount != supportedSampleCount) {
487 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
488 config->getTag().c_str(), sampleCount);
489 return;
490 }
491 } else {
492 SkDebugf("No context was available matching config '%s'.\n",
493 config->getTag().c_str());
494 return;
495 }
496
497 Config target = {
498 gpuConfig->getTag(),
499 Benchmark::kGPU_Backend,
500 colorType,
501 kPremul_SkAlphaType,
502 sk_ref_sp(colorSpace),
503 sampleCount,
504 ctxType,
505 ctxOverrides,
506 gpuConfig->getUseDIText()
507 };
508
509 configs->push_back(target);
510 return;
511 }
512
513 #define CPU_CONFIG(name, backend, color, alpha, colorSpace) \
514 if (config->getTag().equals(#name)) { \
515 if (!FLAGS_cpu) { \
516 SkDebugf("Skipping config '%s' as requested.\n", \
517 config->getTag().c_str()); \
518 return; \
519 } \
520 Config config = { \
521 SkString(#name), Benchmark::backend, color, alpha, colorSpace, \
522 0, kBogusContextType, kBogusContextOverrides, false \
523 }; \
524 configs->push_back(config); \
525 return; \
526 }
527
528 CPU_CONFIG(nonrendering, kNonRendering_Backend,
529 kUnknown_SkColorType, kUnpremul_SkAlphaType, nullptr)
530
531 CPU_CONFIG(a8, kRaster_Backend, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr)
532 CPU_CONFIG(8888, kRaster_Backend, kN32_SkColorType, kPremul_SkAlphaType, nullptr)
533 CPU_CONFIG(565, kRaster_Backend, kRGB_565_SkColorType, kOpaque_SkAlphaType, nullptr)
534
535 // 'narrow' has a gamut narrower than sRGB, and different transfer function.
536 auto narrow = SkColorSpace::MakeRGB(SkNamedTransferFn::k2Dot2, gNarrow_toXYZD50),
537 srgb = SkColorSpace::MakeSRGB(),
538 srgbLinear = SkColorSpace::MakeSRGBLinear();
539
540 CPU_CONFIG( f16, kRaster_Backend, kRGBA_F16_SkColorType, kPremul_SkAlphaType, srgbLinear)
541 CPU_CONFIG( srgb, kRaster_Backend, kRGBA_8888_SkColorType, kPremul_SkAlphaType, srgb )
542 CPU_CONFIG( esrgb, kRaster_Backend, kRGBA_F16_SkColorType, kPremul_SkAlphaType, srgb )
543 CPU_CONFIG( narrow, kRaster_Backend, kRGBA_8888_SkColorType, kPremul_SkAlphaType, narrow )
544 CPU_CONFIG(enarrow, kRaster_Backend, kRGBA_F16_SkColorType, kPremul_SkAlphaType, narrow )
545
546 #undef CPU_CONFIG
547
548 SkDebugf("Unknown config '%s'.\n", config->getTag().c_str());
549 }
550
551 // Append all configs that are enabled and supported.
create_configs(SkTArray<Config> * configs)552 void create_configs(SkTArray<Config>* configs) {
553 SkCommandLineConfigArray array;
554 ParseConfigs(FLAGS_config, &array);
555 for (int i = 0; i < array.count(); ++i) {
556 create_config(array[i].get(), configs);
557 }
558
559 // If no just default configs were requested, then we're okay.
560 if (array.count() == 0 || FLAGS_config.count() == 0 ||
561 // Otherwise, make sure that all specified configs have been created.
562 array.count() == configs->count()) {
563 return;
564 }
565 exit(1);
566 }
567
568 // disable warning : switch statement contains default but no 'case' labels
569 #if defined _WIN32
570 #pragma warning ( push )
571 #pragma warning ( disable : 4065 )
572 #endif
573
574 // If bench is enabled for config, returns a Target* for it, otherwise nullptr.
is_enabled(Benchmark * bench,const Config & config)575 static Target* is_enabled(Benchmark* bench, const Config& config) {
576 if (!bench->isSuitableFor(config.backend)) {
577 return nullptr;
578 }
579
580 SkImageInfo info = SkImageInfo::Make(bench->getSize().fX, bench->getSize().fY,
581 config.color, config.alpha, config.colorSpace);
582
583 Target* target = nullptr;
584
585 switch (config.backend) {
586 case Benchmark::kGPU_Backend:
587 target = new GPUTarget(config);
588 break;
589 default:
590 target = new Target(config);
591 break;
592 }
593
594 if (!target->init(info, bench)) {
595 delete target;
596 return nullptr;
597 }
598 return target;
599 }
600
601 #if defined _WIN32
602 #pragma warning ( pop )
603 #endif
604
605 #ifdef SK_ENABLE_ANDROID_UTILS
valid_brd_bench(sk_sp<SkData> encoded,SkColorType colorType,uint32_t sampleSize,uint32_t minOutputSize,int * width,int * height)606 static bool valid_brd_bench(sk_sp<SkData> encoded, SkColorType colorType, uint32_t sampleSize,
607 uint32_t minOutputSize, int* width, int* height) {
608 auto brd = android::skia::BitmapRegionDecoder::Make(encoded);
609 if (nullptr == brd) {
610 // This is indicates that subset decoding is not supported for a particular image format.
611 return false;
612 }
613
614 if (sampleSize * minOutputSize > (uint32_t) brd->width() || sampleSize * minOutputSize >
615 (uint32_t) brd->height()) {
616 // This indicates that the image is not large enough to decode a
617 // minOutputSize x minOutputSize subset at the given sampleSize.
618 return false;
619 }
620
621 // Set the image width and height. The calling code will use this to choose subsets to decode.
622 *width = brd->width();
623 *height = brd->height();
624 return true;
625 }
626 #endif
627
cleanup_run(Target * target)628 static void cleanup_run(Target* target) {
629 delete target;
630 }
631
collect_files(const CommandLineFlags::StringArray & paths,const char * ext,SkTArray<SkString> * list)632 static void collect_files(const CommandLineFlags::StringArray& paths,
633 const char* ext,
634 SkTArray<SkString>* list) {
635 for (int i = 0; i < paths.count(); ++i) {
636 if (SkStrEndsWith(paths[i], ext)) {
637 list->push_back(SkString(paths[i]));
638 } else {
639 SkOSFile::Iter it(paths[i], ext);
640 SkString path;
641 while (it.next(&path)) {
642 list->push_back(SkOSPath::Join(paths[i], path.c_str()));
643 }
644 }
645 }
646 }
647
648 class BenchmarkStream {
649 public:
BenchmarkStream()650 BenchmarkStream() : fBenches(BenchRegistry::Head())
651 , fGMs(skiagm::GMRegistry::Head()) {
652 collect_files(FLAGS_skps, ".skp", &fSKPs);
653 collect_files(FLAGS_svgs, ".svg", &fSVGs);
654 collect_files(FLAGS_texttraces, ".trace", &fTextBlobTraces);
655
656 if (4 != sscanf(FLAGS_clip[0], "%d,%d,%d,%d",
657 &fClip.fLeft, &fClip.fTop, &fClip.fRight, &fClip.fBottom)) {
658 SkDebugf("Can't parse %s from --clip as an SkIRect.\n", FLAGS_clip[0]);
659 exit(1);
660 }
661
662 for (int i = 0; i < FLAGS_scales.count(); i++) {
663 if (1 != sscanf(FLAGS_scales[i], "%f", &fScales.push_back())) {
664 SkDebugf("Can't parse %s from --scales as an SkScalar.\n", FLAGS_scales[i]);
665 exit(1);
666 }
667 }
668
669 if (2 != sscanf(FLAGS_zoom[0], "%f,%lf", &fZoomMax, &fZoomPeriodMs)) {
670 SkDebugf("Can't parse %s from --zoom as a zoomMax,zoomPeriodMs.\n", FLAGS_zoom[0]);
671 exit(1);
672 }
673
674 // Prepare the images for decoding
675 if (!CollectImages(FLAGS_images, &fImages)) {
676 exit(1);
677 }
678
679 // Choose the candidate color types for image decoding
680 fColorTypes.push_back(kN32_SkColorType);
681 if (!FLAGS_simpleCodec) {
682 fColorTypes.push_back(kRGB_565_SkColorType);
683 fColorTypes.push_back(kAlpha_8_SkColorType);
684 fColorTypes.push_back(kGray_8_SkColorType);
685 }
686 }
687
ReadPicture(const char * path)688 static sk_sp<SkPicture> ReadPicture(const char* path) {
689 // Not strictly necessary, as it will be checked again later,
690 // but helps to avoid a lot of pointless work if we're going to skip it.
691 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
692 return nullptr;
693 }
694
695 std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(path);
696 if (!stream) {
697 SkDebugf("Could not read %s.\n", path);
698 return nullptr;
699 }
700
701 return SkPicture::MakeFromStream(stream.get());
702 }
703
ReadSVGPicture(const char * path)704 static sk_sp<SkPicture> ReadSVGPicture(const char* path) {
705 sk_sp<SkData> data(SkData::MakeFromFileName(path));
706 if (!data) {
707 SkDebugf("Could not read %s.\n", path);
708 return nullptr;
709 }
710
711 #ifdef SK_XML
712 SkMemoryStream stream(std::move(data));
713 sk_sp<SkSVGDOM> svgDom = SkSVGDOM::MakeFromStream(stream);
714 if (!svgDom) {
715 SkDebugf("Could not parse %s.\n", path);
716 return nullptr;
717 }
718
719 // Use the intrinsic SVG size if available, otherwise fall back to a default value.
720 static const SkSize kDefaultContainerSize = SkSize::Make(128, 128);
721 if (svgDom->containerSize().isEmpty()) {
722 svgDom->setContainerSize(kDefaultContainerSize);
723 }
724
725 SkPictureRecorder recorder;
726 svgDom->render(recorder.beginRecording(svgDom->containerSize().width(),
727 svgDom->containerSize().height()));
728 return recorder.finishRecordingAsPicture();
729 #else
730 return nullptr;
731 #endif // SK_XML
732 }
733
next()734 Benchmark* next() {
735 std::unique_ptr<Benchmark> bench;
736 do {
737 bench.reset(this->rawNext());
738 if (!bench) {
739 return nullptr;
740 }
741 } while (CommandLineFlags::ShouldSkip(FLAGS_sourceType, fSourceType) ||
742 CommandLineFlags::ShouldSkip(FLAGS_benchType, fBenchType));
743 return bench.release();
744 }
745
rawNext()746 Benchmark* rawNext() {
747 if (fBenches) {
748 Benchmark* bench = fBenches->get()(nullptr);
749 fBenches = fBenches->next();
750 fSourceType = "bench";
751 fBenchType = "micro";
752 return bench;
753 }
754
755 while (fGMs) {
756 std::unique_ptr<skiagm::GM> gm = fGMs->get()();
757 fGMs = fGMs->next();
758 if (gm->runAsBench()) {
759 fSourceType = "gm";
760 fBenchType = "micro";
761 return new GMBench(std::move(gm));
762 }
763 }
764
765 while (fCurrentTextBlobTrace < fTextBlobTraces.count()) {
766 SkString path = fTextBlobTraces[fCurrentTextBlobTrace++];
767 SkString basename = SkOSPath::Basename(path.c_str());
768 static constexpr char kEnding[] = ".trace";
769 if (basename.endsWith(kEnding)) {
770 basename.remove(basename.size() - strlen(kEnding), strlen(kEnding));
771 }
772 fSourceType = "texttrace";
773 fBenchType = "micro";
774 return CreateDiffCanvasBench(
775 SkStringPrintf("SkDiffBench-%s", basename.c_str()),
776 [path](){ return SkStream::MakeFromFile(path.c_str()); });
777 }
778
779 // First add all .skps as RecordingBenches.
780 while (fCurrentRecording < fSKPs.count()) {
781 const SkString& path = fSKPs[fCurrentRecording++];
782 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
783 if (!pic) {
784 continue;
785 }
786 SkString name = SkOSPath::Basename(path.c_str());
787 fSourceType = "skp";
788 fBenchType = "recording";
789 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
790 fSKPOps = pic->approximateOpCount();
791 return new RecordingBench(name.c_str(), pic.get(), FLAGS_bbh);
792 }
793
794 // Add all .skps as DeserializePictureBenchs.
795 while (fCurrentDeserialPicture < fSKPs.count()) {
796 const SkString& path = fSKPs[fCurrentDeserialPicture++];
797 sk_sp<SkData> data = SkData::MakeFromFileName(path.c_str());
798 if (!data) {
799 continue;
800 }
801 SkString name = SkOSPath::Basename(path.c_str());
802 fSourceType = "skp";
803 fBenchType = "deserial";
804 fSKPBytes = static_cast<double>(data->size());
805 fSKPOps = 0;
806 return new DeserializePictureBench(name.c_str(), std::move(data));
807 }
808
809 // Then once each for each scale as SKPBenches (playback).
810 while (fCurrentScale < fScales.count()) {
811 while (fCurrentSKP < fSKPs.count()) {
812 const SkString& path = fSKPs[fCurrentSKP++];
813 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
814 if (!pic) {
815 continue;
816 }
817
818 if (FLAGS_bbh) {
819 // The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
820 SkRTreeFactory factory;
821 SkPictureRecorder recorder;
822 pic->playback(recorder.beginRecording(pic->cullRect().width(),
823 pic->cullRect().height(),
824 &factory));
825 pic = recorder.finishRecordingAsPicture();
826 }
827 SkString name = SkOSPath::Basename(path.c_str());
828 fSourceType = "skp";
829 fBenchType = "playback";
830 return new SKPBench(name.c_str(), pic.get(), fClip, fScales[fCurrentScale],
831 FLAGS_loopSKP);
832 }
833
834 while (fCurrentSVG < fSVGs.count()) {
835 const char* path = fSVGs[fCurrentSVG++].c_str();
836 if (sk_sp<SkPicture> pic = ReadSVGPicture(path)) {
837 fSourceType = "svg";
838 fBenchType = "playback";
839 return new SKPBench(SkOSPath::Basename(path).c_str(), pic.get(), fClip,
840 fScales[fCurrentScale], FLAGS_loopSKP);
841 }
842 }
843
844 fCurrentSKP = 0;
845 fCurrentSVG = 0;
846 fCurrentScale++;
847 }
848
849 // Now loop over each skp again if we have an animation
850 if (fZoomMax != 1.0f && fZoomPeriodMs > 0) {
851 while (fCurrentAnimSKP < fSKPs.count()) {
852 const SkString& path = fSKPs[fCurrentAnimSKP];
853 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
854 if (!pic) {
855 fCurrentAnimSKP++;
856 continue;
857 }
858
859 fCurrentAnimSKP++;
860 SkString name = SkOSPath::Basename(path.c_str());
861 sk_sp<SKPAnimationBench::Animation> animation =
862 SKPAnimationBench::MakeZoomAnimation(fZoomMax, fZoomPeriodMs);
863 return new SKPAnimationBench(name.c_str(), pic.get(), fClip, std::move(animation),
864 FLAGS_loopSKP);
865 }
866 }
867
868 for (; fCurrentCodec < fImages.count(); fCurrentCodec++) {
869 fSourceType = "image";
870 fBenchType = "skcodec";
871 const SkString& path = fImages[fCurrentCodec];
872 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
873 continue;
874 }
875 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
876 std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(encoded));
877 if (!codec) {
878 // Nothing to time.
879 SkDebugf("Cannot find codec for %s\n", path.c_str());
880 continue;
881 }
882
883 while (fCurrentColorType < fColorTypes.count()) {
884 const SkColorType colorType = fColorTypes[fCurrentColorType];
885
886 SkAlphaType alphaType = codec->getInfo().alphaType();
887 if (FLAGS_simpleCodec) {
888 if (kUnpremul_SkAlphaType == alphaType) {
889 alphaType = kPremul_SkAlphaType;
890 }
891
892 fCurrentColorType++;
893 } else {
894 switch (alphaType) {
895 case kOpaque_SkAlphaType:
896 // We only need to test one alpha type (opaque).
897 fCurrentColorType++;
898 break;
899 case kUnpremul_SkAlphaType:
900 case kPremul_SkAlphaType:
901 if (0 == fCurrentAlphaType) {
902 // Test unpremul first.
903 alphaType = kUnpremul_SkAlphaType;
904 fCurrentAlphaType++;
905 } else {
906 // Test premul.
907 alphaType = kPremul_SkAlphaType;
908 fCurrentAlphaType = 0;
909 fCurrentColorType++;
910 }
911 break;
912 default:
913 SkASSERT(false);
914 fCurrentColorType++;
915 break;
916 }
917 }
918
919 // Make sure we can decode to this color type and alpha type.
920 SkImageInfo info =
921 codec->getInfo().makeColorType(colorType).makeAlphaType(alphaType);
922 const size_t rowBytes = info.minRowBytes();
923 SkAutoMalloc storage(info.computeByteSize(rowBytes));
924
925 const SkCodec::Result result = codec->getPixels(
926 info, storage.get(), rowBytes);
927 switch (result) {
928 case SkCodec::kSuccess:
929 case SkCodec::kIncompleteInput:
930 return new CodecBench(SkOSPath::Basename(path.c_str()),
931 encoded.get(), colorType, alphaType);
932 case SkCodec::kInvalidConversion:
933 // This is okay. Not all conversions are valid.
934 break;
935 default:
936 // This represents some sort of failure.
937 SkASSERT(false);
938 break;
939 }
940 }
941 fCurrentColorType = 0;
942 }
943
944 // Run AndroidCodecBenches
945 const int sampleSizes[] = { 2, 4, 8 };
946 for (; fCurrentAndroidCodec < fImages.count(); fCurrentAndroidCodec++) {
947 fSourceType = "image";
948 fBenchType = "skandroidcodec";
949
950 const SkString& path = fImages[fCurrentAndroidCodec];
951 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
952 continue;
953 }
954 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
955 std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::MakeFromData(encoded));
956 if (!codec) {
957 // Nothing to time.
958 SkDebugf("Cannot find codec for %s\n", path.c_str());
959 continue;
960 }
961
962 while (fCurrentSampleSize < (int) SK_ARRAY_COUNT(sampleSizes)) {
963 int sampleSize = sampleSizes[fCurrentSampleSize];
964 fCurrentSampleSize++;
965 if (10 * sampleSize > std::min(codec->getInfo().width(), codec->getInfo().height())) {
966 // Avoid benchmarking scaled decodes of already small images.
967 break;
968 }
969
970 return new AndroidCodecBench(SkOSPath::Basename(path.c_str()),
971 encoded.get(), sampleSize);
972 }
973 fCurrentSampleSize = 0;
974 }
975
976 #ifdef SK_ENABLE_ANDROID_UTILS
977 // Run the BRDBenches
978 // We intend to create benchmarks that model the use cases in
979 // android/libraries/social/tiledimage. In this library, an image is decoded in 512x512
980 // tiles. The image can be translated freely, so the location of a tile may be anywhere in
981 // the image. For that reason, we will benchmark decodes in five representative locations
982 // in the image. Additionally, this use case utilizes power of two scaling, so we will
983 // test on power of two sample sizes. The output tile is always 512x512, so, when a
984 // sampleSize is used, the size of the subset that is decoded is always
985 // (sampleSize*512)x(sampleSize*512).
986 // There are a few good reasons to only test on power of two sample sizes at this time:
987 // All use cases we are aware of only scale by powers of two.
988 // PNG decodes use the indicated sampling strategy regardless of the sample size, so
989 // these tests are sufficient to provide good coverage of our scaling options.
990 const uint32_t brdSampleSizes[] = { 1, 2, 4, 8, 16 };
991 const uint32_t minOutputSize = 512;
992 for (; fCurrentBRDImage < fImages.count(); fCurrentBRDImage++) {
993 fSourceType = "image";
994 fBenchType = "BRD";
995
996 const SkString& path = fImages[fCurrentBRDImage];
997 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
998 continue;
999 }
1000
1001 while (fCurrentColorType < fColorTypes.count()) {
1002 while (fCurrentSampleSize < (int) SK_ARRAY_COUNT(brdSampleSizes)) {
1003 while (fCurrentSubsetType <= kLastSingle_SubsetType) {
1004
1005 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1006 const SkColorType colorType = fColorTypes[fCurrentColorType];
1007 uint32_t sampleSize = brdSampleSizes[fCurrentSampleSize];
1008 int currentSubsetType = fCurrentSubsetType++;
1009
1010 int width = 0;
1011 int height = 0;
1012 if (!valid_brd_bench(encoded, colorType, sampleSize, minOutputSize,
1013 &width, &height)) {
1014 break;
1015 }
1016
1017 SkString basename = SkOSPath::Basename(path.c_str());
1018 SkIRect subset;
1019 const uint32_t subsetSize = sampleSize * minOutputSize;
1020 switch (currentSubsetType) {
1021 case kTopLeft_SubsetType:
1022 basename.append("_TopLeft");
1023 subset = SkIRect::MakeXYWH(0, 0, subsetSize, subsetSize);
1024 break;
1025 case kTopRight_SubsetType:
1026 basename.append("_TopRight");
1027 subset = SkIRect::MakeXYWH(width - subsetSize, 0, subsetSize,
1028 subsetSize);
1029 break;
1030 case kMiddle_SubsetType:
1031 basename.append("_Middle");
1032 subset = SkIRect::MakeXYWH((width - subsetSize) / 2,
1033 (height - subsetSize) / 2, subsetSize, subsetSize);
1034 break;
1035 case kBottomLeft_SubsetType:
1036 basename.append("_BottomLeft");
1037 subset = SkIRect::MakeXYWH(0, height - subsetSize, subsetSize,
1038 subsetSize);
1039 break;
1040 case kBottomRight_SubsetType:
1041 basename.append("_BottomRight");
1042 subset = SkIRect::MakeXYWH(width - subsetSize,
1043 height - subsetSize, subsetSize, subsetSize);
1044 break;
1045 default:
1046 SkASSERT(false);
1047 }
1048
1049 return new BitmapRegionDecoderBench(basename.c_str(), encoded.get(),
1050 colorType, sampleSize, subset);
1051 }
1052 fCurrentSubsetType = 0;
1053 fCurrentSampleSize++;
1054 }
1055 fCurrentSampleSize = 0;
1056 fCurrentColorType++;
1057 }
1058 fCurrentColorType = 0;
1059 }
1060 #endif // SK_ENABLE_ANDROID_UTILS
1061
1062 return nullptr;
1063 }
1064
fillCurrentOptions(NanoJSONResultsWriter & log) const1065 void fillCurrentOptions(NanoJSONResultsWriter& log) const {
1066 log.appendString("source_type", fSourceType);
1067 log.appendString("bench_type", fBenchType);
1068 if (0 == strcmp(fSourceType, "skp")) {
1069 log.appendString("clip",
1070 SkStringPrintf("%d %d %d %d", fClip.fLeft, fClip.fTop,
1071 fClip.fRight, fClip.fBottom).c_str());
1072 SkASSERT_RELEASE(fCurrentScale < fScales.count()); // debugging paranoia
1073 log.appendString("scale", SkStringPrintf("%.2g", fScales[fCurrentScale]).c_str());
1074 }
1075 }
1076
fillCurrentMetrics(NanoJSONResultsWriter & log) const1077 void fillCurrentMetrics(NanoJSONResultsWriter& log) const {
1078 if (0 == strcmp(fBenchType, "recording")) {
1079 log.appendMetric("bytes", fSKPBytes);
1080 log.appendMetric("ops", fSKPOps);
1081 }
1082 }
1083
1084 private:
1085 #ifdef SK_ENABLE_ANDROID_UTILS
1086 enum SubsetType {
1087 kTopLeft_SubsetType = 0,
1088 kTopRight_SubsetType = 1,
1089 kMiddle_SubsetType = 2,
1090 kBottomLeft_SubsetType = 3,
1091 kBottomRight_SubsetType = 4,
1092 kTranslate_SubsetType = 5,
1093 kZoom_SubsetType = 6,
1094 kLast_SubsetType = kZoom_SubsetType,
1095 kLastSingle_SubsetType = kBottomRight_SubsetType,
1096 };
1097 #endif
1098
1099 const BenchRegistry* fBenches;
1100 const skiagm::GMRegistry* fGMs;
1101 SkIRect fClip;
1102 SkTArray<SkScalar> fScales;
1103 SkTArray<SkString> fSKPs;
1104 SkTArray<SkString> fSVGs;
1105 SkTArray<SkString> fTextBlobTraces;
1106 SkTArray<SkString> fImages;
1107 SkTArray<SkColorType, true> fColorTypes;
1108 SkScalar fZoomMax;
1109 double fZoomPeriodMs;
1110
1111 double fSKPBytes, fSKPOps;
1112
1113 const char* fSourceType; // What we're benching: bench, GM, SKP, ...
1114 const char* fBenchType; // How we bench it: micro, recording, playback, ...
1115 int fCurrentRecording = 0;
1116 int fCurrentDeserialPicture = 0;
1117 int fCurrentScale = 0;
1118 int fCurrentSKP = 0;
1119 int fCurrentSVG = 0;
1120 int fCurrentTextBlobTrace = 0;
1121 int fCurrentCodec = 0;
1122 int fCurrentAndroidCodec = 0;
1123 #ifdef SK_ENABLE_ANDROID_UTILS
1124 int fCurrentBRDImage = 0;
1125 int fCurrentSubsetType = 0;
1126 #endif
1127 int fCurrentColorType = 0;
1128 int fCurrentAlphaType = 0;
1129 int fCurrentSampleSize = 0;
1130 int fCurrentAnimSKP = 0;
1131 };
1132
1133 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
1134 // This prints something every once in a while so that it knows we're still working.
start_keepalive()1135 static void start_keepalive() {
1136 static std::thread* intentionallyLeaked = new std::thread([]{
1137 for (;;) {
1138 static const int kSec = 1200;
1139 #if defined(SK_BUILD_FOR_WIN)
1140 Sleep(kSec * 1000);
1141 #else
1142 sleep(kSec);
1143 #endif
1144 SkDebugf("\nBenchmarks still running...\n");
1145 }
1146 });
1147 (void)intentionallyLeaked;
1148 }
1149
main(int argc,char ** argv)1150 int main(int argc, char** argv) {
1151 CommandLineFlags::Parse(argc, argv);
1152
1153 initializeEventTracingForTools();
1154
1155 #if defined(SK_BUILD_FOR_IOS)
1156 cd_Documents();
1157 #endif
1158 SetupCrashHandler();
1159 SkAutoGraphics ag;
1160 SkTaskGroup::Enabler enabled(FLAGS_threads);
1161
1162 SetCtxOptionsFromCommonFlags(&grContextOpts);
1163
1164 if (kAutoTuneLoops != FLAGS_loops) {
1165 FLAGS_samples = 1;
1166 FLAGS_gpuFrameLag = 0;
1167 }
1168
1169 if (!FLAGS_writePath.isEmpty()) {
1170 SkDebugf("Writing files to %s.\n", FLAGS_writePath[0]);
1171 if (!sk_mkdir(FLAGS_writePath[0])) {
1172 SkDebugf("Could not create %s. Files won't be written.\n", FLAGS_writePath[0]);
1173 FLAGS_writePath.set(0, nullptr);
1174 }
1175 }
1176
1177 std::unique_ptr<SkWStream> logStream(new SkNullWStream);
1178 if (!FLAGS_outResultsFile.isEmpty()) {
1179 #if defined(SK_RELEASE)
1180 // SkJSONWriter uses a 32k in-memory cache, so it only flushes occasionally and is well
1181 // equipped for a stream that re-opens, appends, and closes the file on every write.
1182 logStream.reset(new NanoFILEAppendAndCloseStream(FLAGS_outResultsFile[0]));
1183 #else
1184 SkDebugf("I'm ignoring --outResultsFile because this is a Debug build.");
1185 return 1;
1186 #endif
1187 }
1188 NanoJSONResultsWriter log(logStream.get(), SkJSONWriter::Mode::kPretty);
1189 log.beginObject(); // root
1190
1191 if (1 == FLAGS_properties.count() % 2) {
1192 SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
1193 return 1;
1194 }
1195 for (int i = 1; i < FLAGS_properties.count(); i += 2) {
1196 log.appendString(FLAGS_properties[i-1], FLAGS_properties[i]);
1197 }
1198
1199 if (1 == FLAGS_key.count() % 2) {
1200 SkDebugf("ERROR: --key must be passed with an even number of arguments.\n");
1201 return 1;
1202 }
1203 if (FLAGS_key.count()) {
1204 log.beginObject("key");
1205 for (int i = 1; i < FLAGS_key.count(); i += 2) {
1206 log.appendString(FLAGS_key[i - 1], FLAGS_key[i]);
1207 }
1208 log.endObject(); // key
1209 }
1210
1211 const double overhead = estimate_timer_overhead();
1212 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
1213
1214 SkTArray<double> samples;
1215
1216 if (kAutoTuneLoops != FLAGS_loops) {
1217 SkDebugf("Fixed number of loops; times would only be misleading so we won't print them.\n");
1218 } else if (FLAGS_quiet) {
1219 SkDebugf("! -> high variance, ? -> moderate variance\n");
1220 SkDebugf(" micros \tbench\n");
1221 } else if (FLAGS_ms) {
1222 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
1223 } else {
1224 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\t%-*s\tconfig\tbench\n",
1225 FLAGS_samples, "samples");
1226 }
1227
1228 SkTArray<Config> configs;
1229 create_configs(&configs);
1230
1231 if (FLAGS_keepAlive) {
1232 start_keepalive();
1233 }
1234
1235 SetAnalyticAAFromCommonFlags();
1236
1237 gSkForceRasterPipelineBlitter = FLAGS_forceRasterPipeline;
1238 gUseSkVMBlitter = FLAGS_skvm;
1239 gSkVMAllowJIT = gSkVMJITViaDylib = FLAGS_jit;
1240
1241 int runs = 0;
1242 BenchmarkStream benchStream;
1243 log.beginObject("results");
1244 AutoreleasePool pool;
1245 while (Benchmark* b = benchStream.next()) {
1246 std::unique_ptr<Benchmark> bench(b);
1247 if (CommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
1248 continue;
1249 }
1250
1251 if (!configs.empty()) {
1252 log.beginBench(bench->getUniqueName(), bench->getSize().fX, bench->getSize().fY);
1253 bench->delayedSetup();
1254 }
1255 for (int i = 0; i < configs.count(); ++i) {
1256 Target* target = is_enabled(b, configs[i]);
1257 if (!target) {
1258 continue;
1259 }
1260
1261 // During HWUI output this canvas may be nullptr.
1262 SkCanvas* canvas = target->getCanvas();
1263 const char* config = target->config.name.c_str();
1264
1265 if (FLAGS_pre_log || FLAGS_dryRun) {
1266 SkDebugf("Running %s\t%s\n"
1267 , bench->getUniqueName()
1268 , config);
1269 if (FLAGS_dryRun) {
1270 continue;
1271 }
1272 }
1273
1274 if (FLAGS_purgeBetweenBenches) {
1275 SkGraphics::PurgeAllCaches();
1276 }
1277
1278 TRACE_EVENT2("skia", "Benchmark", "name", TRACE_STR_COPY(bench->getUniqueName()),
1279 "config", TRACE_STR_COPY(config));
1280
1281 target->setup();
1282 bench->perCanvasPreDraw(canvas);
1283
1284 int maxFrameLag;
1285 int loops = target->needsFrameTiming(&maxFrameLag)
1286 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
1287 : setup_cpu_bench(overhead, target, bench.get());
1288
1289 if (kFailedLoops == loops) {
1290 // Can't be timed. A warning note has already been printed.
1291 cleanup_run(target);
1292 continue;
1293 }
1294
1295 if (runs == 0 && FLAGS_ms < 1000) {
1296 // Run the first bench for 1000ms to warm up the nanobench if FLAGS_ms < 1000.
1297 // Otherwise, the first few benches' measurements will be inaccurate.
1298 auto stop = now_ms() + 1000;
1299 do {
1300 time(loops, bench.get(), target);
1301 pool.drain();
1302 } while (now_ms() < stop);
1303 }
1304
1305 if (FLAGS_ms) {
1306 samples.reset();
1307 auto stop = now_ms() + FLAGS_ms;
1308 do {
1309 samples.push_back(time(loops, bench.get(), target) / loops);
1310 pool.drain();
1311 } while (now_ms() < stop);
1312 } else {
1313 samples.reset(FLAGS_samples);
1314 for (int s = 0; s < FLAGS_samples; s++) {
1315 samples[s] = time(loops, bench.get(), target) / loops;
1316 pool.drain();
1317 }
1318 }
1319
1320 // Scale each result to the benchmark's own units, time/unit.
1321 for (double& sample : samples) {
1322 sample *= (1.0 / bench->getUnits());
1323 }
1324
1325 SkTArray<SkString> keys;
1326 SkTArray<double> values;
1327 bool gpuStatsDump = FLAGS_gpuStatsDump && Benchmark::kGPU_Backend == configs[i].backend;
1328 if (gpuStatsDump) {
1329 // TODO cache stats
1330 bench->getGpuStats(canvas, &keys, &values);
1331 }
1332
1333 bench->perCanvasPostDraw(canvas);
1334
1335 if (Benchmark::kNonRendering_Backend != target->config.backend &&
1336 !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
1337 SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
1338 pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
1339 pngFilename.append(".png");
1340 write_canvas_png(target, pngFilename);
1341 }
1342
1343 // Building stats.plot often shows up in profiles,
1344 // so skip building it when we're not going to print it anyway.
1345 const bool want_plot = !FLAGS_quiet;
1346
1347 Stats stats(samples, want_plot);
1348 log.beginObject(config);
1349
1350 log.beginObject("options");
1351 log.appendString("name", bench->getName());
1352 benchStream.fillCurrentOptions(log);
1353 target->fillOptions(log);
1354 log.endObject(); // options
1355
1356 // Metrics
1357 log.appendMetric("min_ms", stats.min);
1358 log.beginArray("samples");
1359 for (double sample : samples) {
1360 log.appendDoubleDigits(sample, 16);
1361 }
1362 log.endArray(); // samples
1363 benchStream.fillCurrentMetrics(log);
1364 if (gpuStatsDump) {
1365 // dump to json, only SKPBench currently returns valid keys / values
1366 SkASSERT(keys.count() == values.count());
1367 for (int i = 0; i < keys.count(); i++) {
1368 log.appendMetric(keys[i].c_str(), values[i]);
1369 }
1370 }
1371
1372 log.endObject(); // config
1373
1374 if (runs++ % FLAGS_flushEvery == 0) {
1375 log.flush();
1376 }
1377
1378 if (kAutoTuneLoops != FLAGS_loops) {
1379 if (configs.count() == 1) {
1380 config = ""; // Only print the config if we run the same bench on more than one.
1381 }
1382 SkDebugf("%4d/%-4dMB\t%s\t%s\n"
1383 , sk_tools::getCurrResidentSetSizeMB()
1384 , sk_tools::getMaxResidentSetSizeMB()
1385 , bench->getUniqueName()
1386 , config);
1387 } else if (FLAGS_quiet) {
1388 const char* mark = " ";
1389 const double stddev_percent =
1390 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1391 if (stddev_percent > 5) mark = "?";
1392 if (stddev_percent > 10) mark = "!";
1393
1394 SkDebugf("%10.2f %s\t%s\t%s\n",
1395 stats.median*1e3, mark, bench->getUniqueName(), config);
1396 } else if (FLAGS_csv) {
1397 const double stddev_percent =
1398 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1399 SkDebugf("%g,%g,%g,%g,%g,%s,%s\n"
1400 , stats.min
1401 , stats.median
1402 , stats.mean
1403 , stats.max
1404 , stddev_percent
1405 , config
1406 , bench->getUniqueName()
1407 );
1408 } else {
1409 const char* format = "%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n";
1410 const double stddev_percent =
1411 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1412 SkDebugf(format
1413 , sk_tools::getCurrResidentSetSizeMB()
1414 , sk_tools::getMaxResidentSetSizeMB()
1415 , loops
1416 , HUMANIZE(stats.min)
1417 , HUMANIZE(stats.median)
1418 , HUMANIZE(stats.mean)
1419 , HUMANIZE(stats.max)
1420 , stddev_percent
1421 , FLAGS_ms ? to_string(samples.count()).c_str() : stats.plot.c_str()
1422 , config
1423 , bench->getUniqueName()
1424 );
1425 }
1426
1427 if (FLAGS_gpuStats && Benchmark::kGPU_Backend == configs[i].backend) {
1428 target->dumpStats();
1429 }
1430
1431 if (FLAGS_verbose) {
1432 SkDebugf("Samples: ");
1433 for (int i = 0; i < samples.count(); i++) {
1434 SkDebugf("%s ", HUMANIZE(samples[i]));
1435 }
1436 SkDebugf("%s\n", bench->getUniqueName());
1437 }
1438 cleanup_run(target);
1439 pool.drain();
1440 }
1441 if (!configs.empty()) {
1442 log.endBench();
1443 }
1444 }
1445
1446 SkGraphics::PurgeAllCaches();
1447
1448 log.beginBench("memory_usage", 0, 0);
1449 log.beginObject("meta"); // config
1450 log.appendS32("max_rss_mb", sk_tools::getMaxResidentSetSizeMB());
1451 log.endObject(); // config
1452 log.endBench();
1453
1454 RunSkSLMemoryBenchmarks(&log);
1455
1456 log.endObject(); // results
1457 log.endObject(); // root
1458 log.flush();
1459
1460 return 0;
1461 }
1462