1%% 2%% %CopyrightBegin% 3%% 4%% Copyright Ericsson AB 1997-2020. All Rights Reserved. 5%% 6%% Licensed under the Apache License, Version 2.0 (the "License"); 7%% you may not use this file except in compliance with the License. 8%% You may obtain a copy of the License at 9%% 10%% http://www.apache.org/licenses/LICENSE-2.0 11%% 12%% Unless required by applicable law or agreed to in writing, software 13%% distributed under the License is distributed on an "AS IS" BASIS, 14%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15%% See the License for the specific language governing permissions and 16%% limitations under the License. 17%% 18%% %CopyrightEnd% 19%% 20 21-module(statistics_SUITE). 22 23%% Tests the statistics/1 bif. 24 25-export([all/0, suite/0, groups/0, 26 wall_clock_sanity/1, 27 wall_clock_zero_diff/1, wall_clock_update/1, 28 runtime_sanity/1, 29 runtime_zero_diff/1, 30 runtime_update/1, runtime_diff/1, 31 run_queue_one/1, 32 scheduler_wall_time/1, 33 scheduler_wall_time_all/1, 34 msb_scheduler_wall_time/1, 35 reductions/1, reductions_big/1, garbage_collection/1, io/1, 36 badarg/1, run_queues_lengths_active_tasks/1, msacc/1]). 37 38%% Internal exports. 39 40-export([hog/1]). 41 42-include_lib("common_test/include/ct.hrl"). 43 44suite() -> 45 [{ct_hooks,[ts_install_cth]}, 46 {timetrap, {minutes, 4}}]. 47 48all() -> 49 [{group, wall_clock}, {group, runtime}, reductions, 50 reductions_big, {group, run_queue}, 51 scheduler_wall_time, scheduler_wall_time_all, 52 msb_scheduler_wall_time, 53 garbage_collection, io, badarg, 54 run_queues_lengths_active_tasks, 55 msacc]. 56 57groups() -> 58 [{wall_clock, [], 59 [wall_clock_sanity, wall_clock_zero_diff, wall_clock_update]}, 60 {runtime, [], 61 [runtime_sanity, runtime_zero_diff, runtime_update, runtime_diff]}, 62 {run_queue, [], [run_queue_one]}]. 63 64wall_clock_sanity(Config) when is_list(Config) -> 65 erlang:yield(), 66 {WallClock, _} = statistics(wall_clock), 67 MT = erlang:monotonic_time(), 68 Time = erlang:convert_time_unit(MT - erlang:system_info(start_time), 69 native, millisecond), 70 io:format("Time=~p WallClock=~p~n", 71 [Time, WallClock]), 72 true = WallClock =< Time, 73 true = Time - 100 =< WallClock, 74 ok. 75 76%%% Testing statistics(wall_clock). 77 78%% Tests that the 'Wall clock since last call' element of the result 79%% is zero when statistics(runtime) is called twice in succession. 80wall_clock_zero_diff(Config) when is_list(Config) -> 81 wall_clock_zero_diff1(16). 82 83wall_clock_zero_diff1(N) when N > 0 -> 84 {Time, _} = statistics(wall_clock), 85 case statistics(wall_clock) of 86 {Time, 0} -> ok; 87 _ -> wall_clock_zero_diff1(N-1) 88 end; 89wall_clock_zero_diff1(0) -> 90 ct:fail("Difference never zero."). 91 92%% Test that the time differences returned by two calls to 93%% statistics(wall_clock) are compatible, and are within a small number 94%% of ms of the amount of real time we waited for. 95wall_clock_update(Config) when is_list(Config) -> 96 N = 10, 97 Inc = 200, 98 TotalTime = wall_clock_update1(N, Inc, 0), 99 Overhead = TotalTime - N * Inc, 100 IsDebug = test_server:is_debug(), 101 102 %% Check that the average overhead is reasonable. 103 if 104 Overhead < N * 100 -> 105 ok; 106 IsDebug, Overhead < N * 1000 -> 107 ok; 108 true -> 109 io:format("There was an overhead of ~p ms during ~p rounds.", 110 [Overhead,N]), 111 ct:fail(too_much_overhead) 112 end. 113 114wall_clock_update1(N, Inc, Total) when N > 0 -> 115 {Time1, _} = statistics(wall_clock), 116 receive after Inc -> ok end, 117 {Time2, WcDiff} = statistics(wall_clock), 118 WcDiff = Time2 - Time1, 119 io:format("Wall clock diff = ~p (expected at least ~p)\n", [WcDiff,Inc]), 120 true = WcDiff >= Inc, 121 wall_clock_update1(N-1, Inc, Total + WcDiff); 122wall_clock_update1(0, _, Total) -> 123 Total. 124 125 126%%% Test statistics(runtime). 127 128runtime_sanity(Config) when is_list(Config) -> 129 case erlang:system_info(logical_processors_available) of 130 unknown -> 131 {skipped, "Don't know available logical processors"}; 132 LP when is_integer(LP) -> 133 erlang:yield(), 134 {RunTime, _} = statistics(runtime), 135 MT = erlang:monotonic_time(), 136 Time = erlang:convert_time_unit(MT - erlang:system_info(start_time), 137 native, millisecond), 138 io:format("Time=~p RunTime=~p~n", 139 [Time, RunTime]), 140 true = RunTime =< Time*LP 141 end. 142 143%% Tests that the difference between the times returned from two consectuitive 144%% calls to statistics(runtime) is zero. 145runtime_zero_diff(Config) when is_list(Config) -> 146 runtime_zero_diff1(16). 147 148runtime_zero_diff1(N) when N > 0 -> 149 {T1, _} = statistics(runtime), 150 case statistics(runtime) of 151 {T1, 0} -> ok; 152 _ -> runtime_zero_diff1(N-1) 153 end; 154runtime_zero_diff1(0) -> 155 ct:fail("statistics(runtime) never returned zero difference"). 156 157%% Test that the statistics(runtime) returns a substanstially 158%% updated difference after running a process that takes all CPU 159%% power of the Erlang process for a second. 160runtime_update(Config) when is_list(Config) -> 161 case test_server:is_cover() of 162 false -> 163 process_flag(priority, high), 164 do_runtime_update(10); 165 true -> 166 {skip,"Cover-compiled"} 167 end. 168 169do_runtime_update(0) -> 170 {comment,"Never close enough"}; 171do_runtime_update(N) -> 172 {T1,Diff0} = statistics(runtime), 173 {CPUHog, CPUHogMon} = spawn_opt(fun cpu_heavy/0,[link,monitor]), 174 receive after 1000 -> ok end, 175 {T2,Diff} = statistics(runtime), 176 unlink(CPUHog), 177 exit(CPUHog, kill), 178 179 true = is_integer(T1+T2+Diff0+Diff), 180 io:format("T1 = ~p, T2 = ~p, Diff = ~p, T2-T1 = ~p", [T1,T2,Diff,T2-T1]), 181 receive {'DOWN',CPUHogMon,process,CPUHog,_} -> ok end, 182 if 183 T2 - T1 =:= Diff, 900 =< Diff, Diff =< 1500 -> ok; 184 true -> do_runtime_update(N-1) 185 end. 186 187cpu_heavy() -> 188 cpu_heavy(). 189 190%% Test that the difference between two consecutive absolute runtimes is 191%% equal to the last relative runtime. The loop runs a lot of times since 192%% the bug which this test case tests for showed up only rarely. 193runtime_diff(Config) when is_list(Config) -> 194 runtime_diff1(1000). 195 196runtime_diff1(N) when N > 0 -> 197 {T1_wc_time, _} = statistics(runtime), 198 do_much(), 199 {T2_wc_time, Wc_Diff} = statistics(runtime), 200 Wc_Diff = T2_wc_time - T1_wc_time, 201 runtime_diff1(N-1); 202runtime_diff1(0) -> 203 ok. 204 205%%% do_much(100000) takes about 760 ms on boromir. 206%%% do_much(1000) takes about 8 ms on boromir. 207 208do_much() -> 209 do_much(1000). 210 211do_much(0) -> 212 ok; 213do_much(N) -> 214 _ = 4784728478274827 * 72874284728472, 215 do_much(N-1). 216 217 218%% Test that statistics(reductions) is callable, and that 219%% Total_Reductions and Reductions_Since_Last_Call make sense. 220%% This to fail on pre-R3A version of JAM. 221reductions(Config) when is_list(Config) -> 222 {Reductions, _} = statistics(reductions), 223 224 %% Each loop of reductions/2 takes 4 reductions + that the garbage built 225 %% outside the heap in the BIF calls will bump the reductions. 226 %% 300 * 4 is more than CONTEXT_REDS (1000). Thus, there will be one or 227 %% more context switches. 228 229 Mask = (1 bsl erlang:system_info(wordsize)*8) - 1, 230 reductions(300, Reductions, Mask). 231 232reductions(N, Previous, Mask) when N > 0 -> 233 {Reductions, Diff} = statistics(reductions), 234 build_some_garbage(), 235 if Reductions > 0 -> ok end, 236 if Diff >= 0 -> ok end, 237 io:format("Previous = ~p, Reductions = ~p, Diff = ~p, DiffShouldBe = ~p", 238 [Previous, Reductions, Diff, (Reductions-Previous) band Mask]), 239 if Reductions == ((Previous+Diff) band Mask) -> reductions(N-1, Reductions, Mask) end; 240reductions(0, _, _) -> 241 ok. 242 243build_some_garbage() -> 244 %% This will build garbage outside the process heap, which will cause 245 %% a garbage collection in the scheduler. 246 processes(). 247 248%% Test that the number of reductions can be returned as a big number. 249reductions_big(Config) when is_list(Config) -> 250 reductions_big_loop(), 251 ok. 252 253reductions_big_loop() -> 254 erlang:yield(), 255 case statistics(reductions) of 256 {Red, Diff} when Red >= 16#7ffFFFF -> 257 ok = io:format("Reductions = ~w, Diff = ~w", [Red, Diff]); 258 _ -> 259 reductions_big_loop() 260 end. 261 262 263%%% Tests of statistics(run_queue). 264 265 266%% Tests that statistics(run_queue) returns 1 if we start a 267%% CPU-bound process. 268run_queue_one(Config) when is_list(Config) -> 269 MS = erlang:system_flag(multi_scheduling, block), 270 run_queue_one_test(Config), 271 erlang:system_flag(multi_scheduling, unblock), 272 case MS of 273 blocked -> 274 {comment, 275 "Multi-scheduling blocked during test. This test-case " 276 "was not written to work with multiple schedulers."}; 277 _ -> ok 278 end. 279 280 281run_queue_one_test(Config) when is_list(Config) -> 282 _Hog = spawn_link(?MODULE, hog, [self()]), 283 receive 284 hog_started -> ok 285 end, 286 receive after 100 -> ok end, % Give hog a head start. 287 case statistics(run_queue) of 288 N when N >= 1 -> ok; 289 Other -> ct:fail({unexpected,Other}) 290 end, 291 ok. 292 293%% CPU-bound process, going at low priority. It will always be ready 294%% to run. 295 296hog(Pid) -> 297 process_flag(priority, low), 298 Pid ! hog_started, 299 Mon = erlang:monitor(process, Pid), 300 hog_iter(0, Mon). 301 302hog_iter(N, Mon) when N > 0 -> 303 receive 304 {'DOWN', Mon, _, _, _} -> ok 305 after 0 -> 306 hog_iter(N-1, Mon) 307 end; 308hog_iter(0, Mon) -> 309 hog_iter(10000, Mon). 310 311%%% Tests of statistics(scheduler_wall_time). 312 313%% Tests that statistics(scheduler_wall_time) works as intended 314scheduler_wall_time(Config) when is_list(Config) -> 315 scheduler_wall_time_test(scheduler_wall_time). 316 317%% Tests that statistics(scheduler_wall_time_all) works as intended 318scheduler_wall_time_all(Config) when is_list(Config) -> 319 scheduler_wall_time_test(scheduler_wall_time_all). 320 321scheduler_wall_time_test(Type) -> 322 case string:find(erlang:system_info(system_version), 323 "dirty-schedulers-TEST") == nomatch of 324 true -> run_scheduler_wall_time_test(Type); 325 false -> {skip, "Cannot be run with dirty-schedulers-TEST build"} 326 end. 327 328run_scheduler_wall_time_test(Type) -> 329 %% Should return undefined if system_flag is not turned on yet 330 undefined = statistics(Type), 331 %% Turn on statistics 332 false = erlang:system_flag(scheduler_wall_time, true), 333 try 334 Schedulers = erlang:system_info(schedulers_online), 335 DirtyCPUSchedulers = erlang:system_info(dirty_cpu_schedulers_online), 336 DirtyIOSchedulers = erlang:system_info(dirty_io_schedulers), 337 TotLoadSchedulers = case Type of 338 scheduler_wall_time_all -> 339 Schedulers + DirtyCPUSchedulers + DirtyIOSchedulers; 340 scheduler_wall_time -> 341 Schedulers + DirtyCPUSchedulers 342 end, 343 344 Env = [io_lib:format("~s~n",[KV]) || KV <- os:getenv()], 345 346 ct:log("Env:~n~s",[Env]), 347 348 ct:log("Schedulers: ~p~n" 349 "SchedulersOnline: ~p~n" 350 "DirtyCPUSchedulers: ~p~n" 351 "DirtyCPUSchedulersOnline: ~p~n" 352 "DirtyIOSchedulersOnline: ~p~n", 353 [erlang:system_info(schedulers), 354 Schedulers, 355 erlang:system_info(dirty_cpu_schedulers), 356 DirtyCPUSchedulers, 357 DirtyIOSchedulers]), 358 359 %% Let testserver and everyone else finish their work 360 timer:sleep(1500), 361 %% Empty load 362 EmptyLoad = get_load(Type), 363 {false, _} = {lists:any(fun(Load) -> Load > 50 end, EmptyLoad),EmptyLoad}, 364 MeMySelfAndI = self(), 365 StartHog = fun() -> 366 Pid = spawn_link(?MODULE, hog, [self()]), 367 receive hog_started -> MeMySelfAndI ! go end, 368 Pid 369 end, 370 StartDirtyHog = fun(Func) -> 371 F = fun () -> 372 erts_debug:Func(alive_waitexiting, 373 MeMySelfAndI) 374 end, 375 Pid = spawn_link(F), 376 receive {alive, Pid} -> ok end, 377 Pid 378 end, 379 P1 = StartHog(), 380 %% Max on one, the other schedulers empty (hopefully) 381 %% Be generous the process can jump between schedulers 382 %% which is ok and we don't want the test to fail for wrong reasons 383 _L1 = [S1Load|EmptyScheds1] = get_load(Type), 384 {true,_} = {S1Load > 50,S1Load}, 385 {false,_} = {lists:any(fun(Load) -> Load > 50 end, EmptyScheds1),EmptyScheds1}, 386 {true,_} = {lists:sum(EmptyScheds1) < 60,EmptyScheds1}, 387 388 %% 50% load 389 HalfHogs = [StartHog() || _ <- lists:seq(1, (Schedulers-1) div 2)], 390 HalfDirtyCPUHogs = [StartDirtyHog(dirty_cpu) 391 || _ <- lists:seq(1, lists:max([1,DirtyCPUSchedulers div 2]))], 392 HalfDirtyIOHogs = [StartDirtyHog(dirty_io) 393 || _ <- lists:seq(1, lists:max([1,DirtyIOSchedulers div 2]))], 394 HalfScheds = get_load(Type), 395 ct:log("HalfScheds: ~w",[HalfScheds]), 396 HalfLoad = lists:sum(HalfScheds) div TotLoadSchedulers, 397 if Schedulers =:= 1, HalfLoad > 80 -> ok; %% Ok only one scheduler online and one hog 398 %% We want roughly 50% load 399 HalfLoad > 40, HalfLoad < 60 -> ok; 400 true -> exit({halfload, HalfLoad}) 401 end, 402 403 %% 100% load. Need to take into consideration an odd number of 404 %% schedulers and also special consideration for when there is 405 %% only 1 scheduler 406 LastHogs = [StartHog() || _ <- lists:seq(1, (Schedulers+1) div 2), 407 Schedulers =/= 1], 408 LastDirtyCPUHogs = [StartDirtyHog(dirty_cpu) 409 || _ <- lists:seq(1, (DirtyCPUSchedulers+1) div 2), 410 DirtyCPUSchedulers =/= 1], 411 LastDirtyIOHogs = [StartDirtyHog(dirty_io) 412 || _ <- lists:seq(1, (DirtyIOSchedulers+1) div 2), 413 DirtyIOSchedulers =/= 1], 414 FullScheds = get_load(Type), 415 ct:log("FullScheds: ~w",[FullScheds]), 416 {false,_} = {lists:any(fun(Load) -> Load < 80 end, FullScheds),FullScheds}, 417 FullLoad = lists:sum(FullScheds) div TotLoadSchedulers, 418 if FullLoad > 90 -> ok; 419 true -> exit({fullload, FullLoad}) 420 end, 421 422 KillHog = fun (HP) -> 423 HPM = erlang:monitor(process, HP), 424 unlink(HP), 425 exit(HP, kill), 426 receive 427 {'DOWN', HPM, process, HP, killed} -> 428 ok 429 end 430 end, 431 [KillHog(Pid) || Pid <- [P1|HalfHogs++HalfDirtyCPUHogs++HalfDirtyIOHogs 432 ++LastHogs++LastDirtyCPUHogs++LastDirtyIOHogs]], 433 receive after 2000 -> ok end, %% Give dirty schedulers time to complete... 434 AfterLoad = get_load(Type), 435 io:format("AfterLoad=~p~n", [AfterLoad]), 436 {false,_} = {lists:any(fun(Load) -> Load > 25 end, AfterLoad),AfterLoad}, 437 true = erlang:system_flag(scheduler_wall_time, false) 438 after 439 erlang:system_flag(scheduler_wall_time, false) 440 end. 441 442get_load(Type) -> 443 Start = erlang:statistics(Type), 444 timer:sleep(1500), 445 End = erlang:statistics(Type), 446 447 lists:reverse( 448 lists:sort(load_percentage(online_statistics(Start),online_statistics(End)))). 449 450%% We are only interested in schedulers that are online to remove all 451%% offline normal and dirty cpu schedulers (dirty io cannot be offline) 452online_statistics(Stats) -> 453 Schedulers = erlang:system_info(schedulers), 454 SchedulersOnline = erlang:system_info(schedulers_online), 455 DirtyCPUSchedulers = erlang:system_info(dirty_cpu_schedulers), 456 DirtyCPUSchedulersOnline = erlang:system_info(dirty_cpu_schedulers_online), 457 DirtyIOSchedulersOnline = erlang:system_info(dirty_io_schedulers), 458 SortedStats = lists:sort(Stats), 459 ct:pal("Stats: ~p~n", [SortedStats]), 460 SchedulersStats = 461 lists:sublist(SortedStats, 1, SchedulersOnline), 462 DirtyCPUSchedulersStats = 463 lists:sublist(SortedStats, Schedulers+1, DirtyCPUSchedulersOnline), 464 DirtyIOSchedulersStats = 465 lists:sublist(SortedStats, Schedulers + DirtyCPUSchedulers+1, DirtyIOSchedulersOnline), 466 SchedulersStats ++ DirtyCPUSchedulersStats ++ DirtyIOSchedulersStats. 467 468load_percentage([{Id, WN, TN}|Ss], [{Id, WP, TP}|Ps]) -> 469 [100*(WN-WP) div (TN-TP)|load_percentage(Ss, Ps)]; 470load_percentage([], []) -> []. 471 472count(0) -> 473 ok; 474count(N) -> 475 count(N-1). 476 477msb_swt_hog(true) -> 478 count(1000000), 479 erts_debug:dirty_cpu(wait, 10), 480 erts_debug:dirty_io(wait, 10), 481 msb_swt_hog(true); 482msb_swt_hog(false) -> 483 count(1000000), 484 msb_swt_hog(false). 485 486msb_scheduler_wall_time(_Config) -> 487 erlang:system_flag(scheduler_wall_time, true), 488 Dirty = erlang:system_info(dirty_cpu_schedulers) /= 0, 489 Hogs = lists:map(fun (_) -> 490 spawn_opt(fun () -> 491 msb_swt_hog(Dirty) 492 end, [{priority,low}, link, monitor]) 493 end, lists:seq(1,10)), 494 erlang:system_flag(multi_scheduling, block), 495 try 496 SWT1 = lists:sort(statistics(scheduler_wall_time_all)), 497 %% io:format("SWT1 = ~p~n", [SWT1]), 498 receive after 4000 -> ok end, 499 SWT2 = lists:sort(statistics(scheduler_wall_time_all)), 500 %% io:format("SWT2 = ~p~n", [SWT2]), 501 SWT = lists:zip(SWT1, SWT2), 502 io:format("SU = ~p~n", [lists:map(fun({{I, A0, T0}, {I, A1, T1}}) -> 503 {I, (A1 - A0)/(T1 - T0)} end, 504 SWT)]), 505 {A, T} = lists:foldl(fun({{_, A0, T0}, {_, A1, T1}}, {Ai,Ti}) -> 506 {Ai + (A1 - A0), Ti + (T1 - T0)} 507 end, 508 {0, 0}, 509 SWT), 510 TSU = A/T, 511 WSU = ((TSU * (erlang:system_info(schedulers) 512 + erlang:system_info(dirty_cpu_schedulers) 513 + erlang:system_info(dirty_io_schedulers))) 514 / 1), 515 %% Weighted scheduler utilization should be 516 %% very close to 1.0, i.e., we execute the 517 %% same time as one thread executing all 518 %% the time... 519 io:format("WSU = ~p~n", [WSU]), 520 true = 0.9 < WSU andalso WSU < 1.1, 521 ok 522 after 523 erlang:system_flag(multi_scheduling, unblock), 524 erlang:system_flag(scheduler_wall_time, false), 525 lists:foreach(fun ({HP, _HM}) -> 526 unlink(HP), 527 exit(HP, kill) 528 end, Hogs), 529 lists:foreach(fun ({HP, HM}) -> 530 receive 531 {'DOWN', HM, process, HP, _} -> 532 ok 533 end 534 end, Hogs), 535 ok 536 end. 537 538%% Tests that statistics(garbage_collection) is callable. 539%% It is not clear how to test anything more. 540garbage_collection(Config) when is_list(Config) -> 541 Bin = list_to_binary(lists:duplicate(19999, 42)), 542 case statistics(garbage_collection) of 543 {Gcs0,R,0} when is_integer(Gcs0), is_integer(R) -> 544 io:format("Reclaimed: ~p", [R]), 545 Gcs = garbage_collection_1(Gcs0, Bin), 546 io:format("Reclaimed: ~p", 547 [element(2, statistics(garbage_collection))]), 548 {comment,integer_to_list(Gcs-Gcs0)++" GCs"} 549 end. 550 551garbage_collection_1(Gcs0, Bin) -> 552 case statistics(garbage_collection) of 553 {Gcs,Reclaimed,0} when Gcs >= Gcs0 -> 554 if 555 Reclaimed > 16#7ffffff -> 556 Gcs; 557 true -> 558 _ = binary_to_list(Bin), 559 erlang:garbage_collect(), 560 garbage_collection_1(Gcs, Bin) 561 end 562 end. 563 564%% Tests that statistics(io) is callable. 565%% This could be improved to test something more. 566io(Config) when is_list(Config) -> 567 case statistics(io) of 568 {{input,In},{output,Out}} when is_integer(In), is_integer(Out) -> ok 569 end. 570 571%% Tests that some illegal arguments to statistics fails. 572badarg(Config) when is_list(Config) -> 573 case catch statistics(1) of 574 {'EXIT', {badarg, _}} -> ok 575 end, 576 case catch statistics(bad_atom) of 577 {'EXIT', {badarg, _}} -> ok 578 end. 579 580tok_loop() -> 581 tok_loop(). 582 583run_queues_lengths_active_tasks(_Config) -> 584 TokLoops = lists:map(fun (_) -> 585 spawn_opt(fun () -> 586 tok_loop() 587 end, 588 [link, {priority, low}]) 589 end, 590 lists:seq(1,10)), 591 592 593 594 TRQLs0 = statistics(total_run_queue_lengths), 595 TRQLAs0 = statistics(total_run_queue_lengths_all), 596 TATs0 = statistics(total_active_tasks), 597 TATAs0 = statistics(total_active_tasks_all), 598 true = is_integer(TRQLs0), 599 true = is_integer(TATs0), 600 true = TRQLs0 >= 0, 601 true = TRQLAs0 >= 0, 602 true = TATs0 >= 11, 603 true = TATAs0 >= 11, 604 605 NoScheds = erlang:system_info(schedulers), 606 {DefRqs, 607 AllRqs} = case erlang:system_info(dirty_cpu_schedulers) of 608 0 -> {NoScheds, NoScheds}; 609 _ -> {NoScheds+1, NoScheds+2} 610 end, 611 RQLs0 = statistics(run_queue_lengths), 612 RQLAs0 = statistics(run_queue_lengths_all), 613 ATs0 = statistics(active_tasks), 614 ATAs0 = statistics(active_tasks_all), 615 DefRqs = length(RQLs0), 616 AllRqs = length(RQLAs0), 617 DefRqs = length(ATs0), 618 AllRqs = length(ATAs0), 619 true = lists:sum(RQLs0) >= 0, 620 true = lists:sum(RQLAs0) >= 0, 621 true = lists:sum(ATs0) >= 11, 622 true = lists:sum(ATAs0) >= 11, 623 624 SO = erlang:system_flag(schedulers_online, 1), 625 626 %% Give newly suspended schedulers some time to 627 %% migrate away work from their run queues... 628 receive after 1000 -> ok end, 629 630 TRQLs1 = statistics(total_run_queue_lengths), 631 TATs1 = statistics(total_active_tasks), 632 true = TRQLs1 >= 10, 633 true = TATs1 >= 11, 634 NoScheds = erlang:system_info(schedulers), 635 636 RQLs1 = statistics(run_queue_lengths), 637 ATs1 = statistics(active_tasks), 638 DefRqs = length(RQLs1), 639 DefRqs = length(ATs1), 640 TRQLs2 = lists:sum(RQLs1), 641 TATs2 = lists:sum(ATs1), 642 true = TRQLs2 >= 10, 643 true = TATs2 >= 11, 644 [TRQLs2|_] = RQLs1, 645 [TATs2|_] = ATs1, 646 647 erlang:system_flag(schedulers_online, SO), 648 649 lists:foreach(fun (P) -> 650 unlink(P), 651 exit(P, bang) 652 end, 653 TokLoops), 654 655 ok. 656 657%% Tests that statistics(microstate_statistics) works. 658msacc(Config) -> 659 660 %% Test if crypto nif is available 661 Niff = try crypto:strong_rand_bytes(1), ok catch _:_ -> nok end, 662 TmpFile = filename:join(proplists:get_value(priv_dir,Config),"file.tmp"), 663 664 false = erlang:system_flag(microstate_accounting, true), 665 666 msacc_test(TmpFile), 667 668 true = erlang:system_flag(microstate_accounting, false), 669 670 MsaccStats = erlang:statistics(microstate_accounting), 671 672 case os:type() of 673 {win32, _} -> 674 %% Some windows have a very poor accuracy on their 675 %% timing primitives, so we just make sure that 676 %% some state besides sleep has been triggered. 677 Sum = lists:sum( 678 lists:map(fun({sleep, _V}) -> 0; 679 ({_, V}) -> V 680 end, maps:to_list(msacc_sum_states())) 681 ), 682 if Sum > 0 -> 683 ok; 684 true -> 685 ct:fail({no_states_triggered, MsaccStats}) 686 end; 687 _ -> 688 689 %% Make sure that all states were triggered at least once 690 maps:map(fun(nif, 0) -> 691 case Niff of 692 ok -> 693 ct:fail({zero_state,nif}); 694 nok -> 695 ok 696 end; 697 (aux, 0) -> 698 %% aux will be zero if we do not have smp support 699 %% or no async threads 700 case erlang:system_info(thread_pool_size) > 0 of 701 false -> 702 ok; 703 true -> 704 ct:log("msacc: ~p",[MsaccStats]), 705 ct:fail({zero_state,aux}) 706 end; 707 (Key, 0) -> 708 ct:log("msacc: ~p",[MsaccStats]), 709 ct:fail({zero_state,Key}); 710 (_,_) -> ok 711 end, msacc_sum_states()) 712 end, 713 714 erlang:system_flag(microstate_accounting, reset), 715 716 msacc_test(TmpFile), 717 718 %% Make sure all counters are zero after stopping and resetting 719 maps:map(fun(_Key, 0) -> ok; 720 (Key,_) -> 721 ct:log("msacc: ~p",[erlang:statistics(microstate_accounting)]), 722 ct:fail({non_zero_state,Key}) 723 end,msacc_sum_states()). 724 725%% This test tries to make sure to trigger all of the different available states 726msacc_test(TmpFile) -> 727 728 %% We write some data 729 [file:write_file(TmpFile,<<0:(1024*1024*8)>>,[raw]) || _ <- lists:seq(1,100)], 730 731 %% Do some ETS operations 732 Tid = ets:new(table, []), 733 ets:insert(Tid, {1, hello}), 734 ets:delete(Tid), 735 736 %% Check some IO 737 {ok, L} = gen_tcp:listen(0, [{active, true},{reuseaddr,true}]), 738 {ok, Port} = inet:port(L), 739 _Pid = spawn(fun() -> 740 {ok, _S} = gen_tcp:accept(L), 741 (fun F() -> receive _M -> F() end end)() 742 end), 743 {ok, C} = gen_tcp:connect("localhost", Port, []), 744 [begin gen_tcp:send(C,"hello"),timer:sleep(1) end || _ <- lists:seq(1,100)], 745 746 %% Collect some garbage 747 [erlang:garbage_collect() || _ <- lists:seq(1,100)], 748 749 %% Send some messages 750 [begin self() ! {hello},receive _ -> ok end end || _ <- lists:seq(1,100)], 751 752 %% Setup some timers 753 Refs = [erlang:send_after(10000,self(),ok) || _ <- lists:seq(1,100)], 754 755 %% Do some nif work 756 catch [crypto:strong_rand_bytes(128) || _ <- lists:seq(1,100)], 757 758 %% Cancel some timers 759 [erlang:cancel_timer(R) || R <- Refs], 760 761 %% Wait for a while 762 timer:sleep(100). 763 764msacc_sum_states() -> 765 Stats = erlang:statistics(microstate_accounting), 766 [#{ counters := C }|_] = Stats, 767 InitialCounters = maps:map(fun(_,_) -> 0 end,C), 768 lists:foldl(fun(#{ counters := Counters }, Cnt) -> 769 maps:fold(fun(Key, Value, Acc) -> 770 NewValue = Value+maps:get(Key,Acc), 771 maps:update(Key, NewValue, Acc) 772 end, Cnt, Counters) 773 end,InitialCounters,Stats). 774