1%% ``Licensed under the Apache License, Version 2.0 (the "License"); 2%% you may not use this file except in compliance with the License. 3%% You may obtain a copy of the License at 4%% 5%% http://www.apache.org/licenses/LICENSE-2.0 6%% 7%% Unless required by applicable law or agreed to in writing, software 8%% distributed under the License is distributed on an "AS IS" BASIS, 9%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10%% See the License for the specific language governing permissions and 11%% limitations under the License. 12%% 13%% The Initial Developer of the Original Code is Ericsson Utvecklings AB. 14%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings 15%% AB. All Rights Reserved.'' 16%% 17%% $Id$ 18%% 19 20-module(bench). 21 22%% User interface 23-export([run/0]). 24 25%% Exported to be used in spawn 26-export([measure/4]). 27 28%% Internal constants 29-define(MAX, 999999999999999). 30-define(RANGE_MAX, 16#7ffffff). 31 32%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 33%%% Interface 34%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 35 36%%--------------------------------------------------------------------------- 37%% run() -> _ 38%% 39%% Compiles and runs all benchmarks in the current directory, 40%% and creates a report 41%%--------------------------------------------------------------------------- 42run() -> 43 run(compiler_options()). 44 45 46%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 47%%% Generic Benchmark functions 48%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 49 50%%--------------------------------------------------------------------------- 51%% compiler_options() -> OptionsList 52%% OptionsList = list() - See Erlang/OTP module compile 53%%--------------------------------------------------------------------------- 54compiler_options() -> 55 [report_errors, report_warnings]. 56 57%%--------------------------------------------------------------------------- 58%% run(OptionsList) -> 59%% OptionsList = list() - See Erlang/OTP module compile 60%% 61%% Help function to run/0. 62%%--------------------------------------------------------------------------- 63run(OptionsList) -> 64 Bms = compile_benchmarks(OptionsList), 65 run_benchmarks(Bms), 66 report(). 67 68%%--------------------------------------------------------------------------- 69%% compile_benchmarks(OptionsList) -> [BmInfo| _] 70%% OptionsList = list() - See Erlang/OTP module compile 71%% BmInfo = {Module, Iterations, [BmFunctionName| _]} 72%% Module = atom() 73%% Iterations = integer() 74%% BmFunctionName = atom() 75%% 76%% Compiles all benchmark modules in the current directory and 77%% returns info about the benchmarks. 78%%--------------------------------------------------------------------------- 79compile_benchmarks(OptionsList) -> 80 {ok, FilesInCurrentDir} = file:list_dir("."), 81 ErlFiles = [ErlFile || ErlFile <- lists:sort(FilesInCurrentDir), 82 lists:suffix(".erl", ErlFile)], 83 lists:foldr(fun(File, BmInfoAcc) -> 84 case lists:suffix("_bm.erl", File) of 85 true -> 86 BmInfo = bm_compile(File, OptionsList), 87 [BmInfo | BmInfoAcc]; 88 false -> 89 just_compile(File, OptionsList), 90 BmInfoAcc 91 end 92 end, [], ErlFiles). 93 94%%--------------------------------------------------------------------------- 95%% just_compile(FileName, OptionsList) -> ok 96%% FileName = string() 97%% OptionsList = list() - See Erlang/OTP module compile 98%% 99%% Compiles a support module. 100%%--------------------------------------------------------------------------- 101just_compile(FileName, OptionsList) -> 102 io:format("Compiling ~s...\n", [FileName]), % Progress info to user 103 case c:c(FileName, OptionsList) of 104 {ok, _Mod} -> 105 ok; 106 %% If compilation fails there is no point in trying to continue 107 error -> 108 Reason = 109 lists:flatten( 110 io_lib:format("Could not compile file ~s", [FileName])), 111 exit(self(), Reason) 112 end. 113%%--------------------------------------------------------------------------- 114%% bm_compile(FileName, OptionsList) -> BmInfo 115%% FileName = string() 116%% OptionsList = list() - See Erlang/OTP module compile 117%% BmInfo = {Module, Iterations, [BmFunctionName| _]} 118%% Iterations = integer() 119%% Module = atom() 120%% BmFunctionName = atom() 121%% 122%% Compiles the benchmark module implemented in <FileName> and returns 123%% information about the benchmark tests. 124%%--------------------------------------------------------------------------- 125bm_compile(FileName, OptionsList) -> 126 io:format("Compiling ~s...\n", [FileName]), % Progress info to user 127 case c:c(FileName, OptionsList) of 128 {ok, Mod} -> 129 bm_cases(Mod); 130 %% If compilation fails there is no point in trying to continue 131 error -> 132 Reason = 133 lists:flatten( 134 io_lib:format("Could not compile file ~s", [FileName])), 135 exit(self(), Reason) 136 end. 137%%--------------------------------------------------------------------------- 138%% bm_cases(Module) -> {Module, Iter, [BmFunctionName |_]} 139%% Module = atom() 140%% Iter = integer() 141%% BmFunctionName = atom() 142%% 143%% Fetches the number of iterations and the names of the benchmark 144%% functions for the module <Module>. 145%%--------------------------------------------------------------------------- 146bm_cases(Module) -> 147 case catch Module:benchmarks() of 148 {Iter, BmList} when integer(Iter), list(BmList) -> 149 {Module, Iter, BmList}; 150 %% The benchmark is incorrect implemented there is no point in 151 %% trying to continue 152 Other -> 153 Reason = 154 lists:flatten( 155 io_lib:format("Incorrect return value: ~p " 156 "from ~p:benchmarks()", 157 [Other, Module])), 158 exit(self(), Reason) 159 end. 160%%--------------------------------------------------------------------------- 161%% run_benchmarks(Bms) -> 162%% Bms = [{Module, Iter, [BmFunctionName |_]} | _] 163%% Module = atom() 164%% Iter = integer() 165%% BmFunctionName = atom() 166%% 167%% Runs all the benchmark tests described in <Bms>. 168%%--------------------------------------------------------------------------- 169run_benchmarks(Bms) -> 170 Ver = erlang:system_info(version), 171 Machine = erlang:system_info(machine), 172 SysInfo = {Ver,Machine}, 173 174 Res = [bms_run(Mod, Tests, Iter, SysInfo) || {Mod,Iter,Tests} <- Bms], 175 176 %% Create an intermediate file that is later used to generate a bench 177 %% mark report. 178 Name = Ver ++ [$.|Machine] ++ ".bmres", 179 {ok, IntermediatFile} = file:open(Name, [write]), 180 181 %% Create mark that identifies version of the benchmark modules 182 io:format(IntermediatFile, "~p.\n", [erlang:phash(Bms, ?RANGE_MAX)]), 183 184 io:format(IntermediatFile, "~p.\n", [Res]), 185 file:close(IntermediatFile). 186 187%%--------------------------------------------------------------------------- 188%% bms_run(Module, BmTests, Iter, Info) -> 189%% Module = atom(), 190%% BmTests = [BmFunctionName|_], 191%% BmFunctionName = atom() 192%% Iter = integer(), 193%% SysInfo = {Ver, Machine} 194%% Ver = string() 195%% Machine = string() 196%% 197%% Runs all benchmark tests in module <Module>. 198%%--------------------------------------------------------------------------- 199bms_run(Module, BmTests, Iter, SysInfo) -> 200 io:format("Running ~s:", [Module]), % Progress info to user 201 Res = 202 {Module,{SysInfo,[{Bm, bm_run(Module, Bm, Iter)} || Bm <- BmTests]}}, 203 io:nl(), 204 Res. 205%%--------------------------------------------------------------------------- 206%% bm_run(Module, BmTest, Iter) -> Elapsed 207%% Module = atom(), 208%% BmTest = atom(), 209%% Iter = integer() 210%% Elapsed = integer() - elapsed time in milliseconds. 211%% 212%% Runs the benchmark Module:BmTest(Iter) 213%%--------------------------------------------------------------------------- 214bm_run(Module, BmTest, Iter) -> 215 io:format(" ~s", [BmTest]), % Progress info to user 216 spawn_link(?MODULE, measure, [self(), Module, BmTest, Iter]), 217 receive 218 {Elapsed, ok} -> 219 Elapsed; 220 {_Elapsed, Fault} -> 221 io:nl(), 222 Reason = 223 lists:flatten( 224 io_lib:format("~w", [Fault])), 225 exit(self(), Reason) 226 end. 227%%--------------------------------------------------------------------------- 228%% measure(Parent, Module, BmTest, Iter) -> _ 229%% Parent = pid(), 230%% Module = atom(), 231%% BmTest = atom(), 232%% Iter = integer() 233%% 234%% Measures the time it take to execute Module:Bm(Iter) 235%% and send the result to <Parent>. 236%%--------------------------------------------------------------------------- 237measure(Parent, Module, BmTest, Iter) -> 238 statistics(runtime), 239 Res = (catch apply(Module, BmTest, [Iter])), 240 {_TotalRunTime, TimeSinceLastCall} = statistics(runtime), 241 Parent ! {TimeSinceLastCall, Res}. 242 243 244%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 245%%% Report functions 246%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 247 248%%--------------------------------------------------------------------------- 249%% report() -> _ 250%% 251%% Creates a report of the bench marking test that appeals to a human. 252%% Currently this means creating a html-file. (Other formats could be added) 253%%--------------------------------------------------------------------------- 254report() -> 255 {ok, AllFiles} = file:list_dir("."), 256 BmResultFiles = [File || File <- AllFiles, lists:suffix(".bmres", File)], 257 258 Results = fetch_bmres_data(BmResultFiles), 259 create_report(Results). 260 261%%--------------------------------------------------------------------------- 262%% fetch_bmres_data(BmResultFiles) -> Results 263%% BmResultFiles = [FileName | _] 264%% FileName = string() 265%% Results = [[{Bm, Res} | _]] 266%% Bm = atom() - Name of benchmark module 267%% Res = [{VersionInfo, [{Test, Time} | _]}] 268%% VersionInfo = {Ver, Machine} 269%% Ver = string() 270%% Machine = string() 271%% Test = atom() 272%% Time = integer() 273%% 274%% Reads result data from intermediate files 275%%--------------------------------------------------------------------------- 276fetch_bmres_data(BmResultFiles) -> 277 fetch_bmres_data(BmResultFiles, [], undefined). 278 279%%--------------------------------------------------------------------------- 280%% fetch_bmres_data(BmResultFiles, AccResData, Check) -> Results 281%% BmResultFiles = [FileName | _] 282%% FileName = string() 283%% AccResData = see Results fetch_bmres_data/1 284%% Check = integer() | undefined (first time) 285%% 286%% Help function to fetch_bmres_data/1 287%%--------------------------------------------------------------------------- 288fetch_bmres_data([], AccResData, _Check) -> 289 AccResData; 290 291fetch_bmres_data([Name | BmResultFiles], AccResData, Check) -> 292 {DataList, NewCheck} = read_bmres_file(Name, Check), 293 fetch_bmres_data(BmResultFiles, [DataList| AccResData], NewCheck). 294 295%%--------------------------------------------------------------------------- 296%% read_bmres_file(Name, Check) -> 297%% Name = string() 298%% Check = integer() | undefined 299%% 300%% Reads the data from the result files. Checks that all result 301%% files where created with the same set of tests. 302%%--------------------------------------------------------------------------- 303read_bmres_file(Name, Check) -> 304 case file:consult(Name) of 305 {ok, [Check1, List]} when Check =:= undefined, integer(Check1) -> 306 {List, Check1}; 307 {ok, [Check, List]} when integer(Check) -> 308 {List, Check}; 309 {ok, [Check1, _List]} when integer(Check1) -> 310 Reason = 311 lists:flatten( 312 io_lib:format("Different test setup, remove old setup " 313 "result by removing *.bmres files and " 314 "try again", [])), 315 exit(self(), Reason); 316 {error, Reason} when atom(Reason) -> 317 exit(self(), Reason); 318 {error, Reason} -> 319 exit(self(), file:format(Reason)) 320 end. 321 322%%--------------------------------------------------------------------------- 323%% create_report(Results) -> 324%% Results = see Results fetch_bmres_data/1 325%% 326%% Organizes <Result> so it will be right for create_html_report/1 327%% i.e. group results for the same benchmark test, run on different versions 328%% of erlang. 329%%--------------------------------------------------------------------------- 330create_report(Results) -> 331 Dictionary = 332 lists:foldl(fun(BmResultList, Dict0) -> 333 lists:foldl(fun({Bm, VerResult}, Dict1) -> 334 dict:append(Bm, VerResult, 335 Dict1) 336 end,Dict0, BmResultList) 337 end, 338 dict:new(), Results), 339 340 create_html_report(dict:to_list(Dictionary)). 341%%--------------------------------------------------------------------------- 342%% create_html_report(ResultList) -> _ 343%% ResultList = [{Bm, Res} | _] 344%% Bm = atom() - Name of benchmark module 345%% Res = [{VersionInfo, [{Test, Time} | _]} | _] 346%% VersionInfo = {Ver, Machine} 347%% Ver = string() 348%% Machine = string() 349%% Test = atom() 350%% Time = integer() 351%% 352%% Writes the result to an html-file 353%%--------------------------------------------------------------------------- 354create_html_report(ResultList) -> 355 356 {ok, OutputFile} = file:open("index.html", [write]), 357 358 %% Create the beginning of the result html-file. 359 Head = Title = "Benchmark Results", 360 io:put_chars(OutputFile, "<html>\n"), 361 io:put_chars(OutputFile, "<head>\n"), 362 io:format(OutputFile, "<title>~s</title>\n", [Title]), 363 io:put_chars(OutputFile, "</head>\n"), 364 io:put_chars(OutputFile, "<body bgcolor=\"#FFFFFF\" text=\"#000000\"" ++ 365 " link=\"#0000FF\" vlink=\"#800080\" alink=\"#FF0000\">\n"), 366 io:format(OutputFile, "<h1>~s</h1>\n", [Head]), 367 368 %% Add the result tables 369 lists:foreach(fun(Element) -> 370 create_html_table(OutputFile, Element) end, 371 ResultList), 372 373 %% Put in the end-html tags 374 io:put_chars(OutputFile, "</body>\n"), 375 io:put_chars(OutputFile, "</html>\n"), 376 377 file:close(OutputFile). 378 379%%--------------------------------------------------------------------------- 380%% create_html_table(File, {Bm, Res}) -> _ 381%% File = file() - html file to write data to. 382%% Bm = atom() - Name of benchmark module 383%% Res = [{VersionInfo, [{Test, Time} | _]}] 384%% VersionInfo = {Ver, Machine} 385%% Ver = string() 386%% Machine = string() 387%% Test = atom() 388%% Time = integer() 389%% 390%% Creates a html table that displays the result of the benchmark <Bm>. 391%%--------------------------------------------------------------------------- 392create_html_table(File, {Bm, Res}) -> 393 394 {MinTime, Order} = min_time_and_sort(Res), 395 396 io:format(File, "<h2>~s</h2>\n" , [Bm]), 397 398 %% Fun that calculates relative measure values and puts them in 399 %% a dictionary 400 RelativeMesureFun = fun({TestName, Time}, Dict1) -> 401 dict:append(TestName, Time/MinTime, Dict1) 402 end, 403 404 %% For all erlang versions that the benchmark tests has been run, 405 %% calculate the relative measure values and put them in a dictionary. 406 ResultDict = 407 lists:foldl(fun({_VerInfo, Bms}, Dict0) -> 408 lists:foldl(RelativeMesureFun, Dict0, Bms) end, 409 dict:new(), Res), 410 411 %% Create the table and its headings 412 io:put_chars(File, "<table border=0 cellpadding=1><tr>" 413 "<td bgcolor=\"#000000\">\n"), 414 io:put_chars(File, "<table cellpadding=3 border=0 cellspacing=1>\n"), 415 io:put_chars(File, "<tr bgcolor=white>"), 416 io:put_chars(File, "<td>Test</td>"), 417 Heads = table_headers(Res), 418 lists:foreach(fun({Ver,Machine}) -> io:format(File, "<td>~s<br>~s</td>", 419 [Ver,Machine]) end, Heads), 420 io:put_chars(File, "</tr>\n"), 421 422 %% Create table rows 423 lists:foreach(fun(Name) -> 424 create_html_row(File, Name, ResultDict) 425 end, Order), 426 427 %% Tabel end-tags 428 io:put_chars(File, "</table></td></tr></table>\n"), 429 430 %% Create link to benchmark source code 431 io:format(File, "<p><a href=\"~s.erl\">Source for ~s.erl</a>\n", 432 [Bm,Bm]). 433 434%%--------------------------------------------------------------------------- 435%% create_html_row(File, Name, Dict) -> _ 436%% File = file() - html file to write data to. 437%% Name = atom() - Name of benchmark test 438%% Dict = dict() - Dictonary where the relative time measures for 439%% the test can be found. 440%% 441%% Creates an actual html table-row. 442%%--------------------------------------------------------------------------- 443create_html_row(File, Name, Dict) -> 444 ReletiveTimes = dict:fetch(Name, Dict), 445 io:put_chars(File, "<tr bgcolor=white>\n"), 446 io:format(File, "<td>~s</td>", [Name]), 447 lists:foreach(fun(Time) -> 448 io:format(File, "<td>~-8.2f</td>", [Time]) end, 449 ReletiveTimes), 450 io:put_chars(File, "</tr>\n"). 451 452%%--------------------------------------------------------------------------- 453%% min_time_and_sort(ResultList) -> {MinTime, Order} 454%% ResultList = [{VersionInfo, [{Test, Time} | _]}] 455%% MinTime = integer() - The execution time of the fastes test 456%% Order = [BmFunctionName|_] - the order of the testcases in 457%% increasing execution time. 458%% BmFunctionName = atom() 459%%--------------------------------------------------------------------------- 460min_time_and_sort(ResultList) -> 461 462 %% Use the results from the run on the highest version 463 %% of Erlang as norm. 464 {_, TestRes} = 465 lists:foldl(fun ({{Ver, _}, ResList}, 466 {CurrentVer, _}) when Ver > CurrentVer -> 467 {Ver, ResList}; 468 (_, VerAndRes) -> 469 VerAndRes 470 end, {"0", []}, ResultList), 471 472 {lists:foldl(fun ({_, Time0}, Min1) when Time0 < Min1 -> 473 Time0; 474 (_, Min1) -> 475 Min1 476 end, ?MAX, TestRes), 477 [Name || {Name, _} <- lists:keysort(2, TestRes)]}. 478 479%%--------------------------------------------------------------------------- 480%% table_headers(VerResultList) -> SysInfo 481%% VerResultList = [{{Ver, Machine},[{BmFunctionName, Time}]} | _] 482%% Ver = string() 483%% Machine = string() 484%% BmFunctionName = atom() 485%% Time = integer() 486%% SysInfo = {Ver, Machine} 487%%--------------------------------------------------------------------------- 488table_headers(VerResultList) -> 489 [SysInfo || {SysInfo, _} <- VerResultList]. 490