1%%
2%% %CopyrightBegin%
3%%
4%% Copyright Ericsson AB 2006-2020. All Rights Reserved.
5%%
6%% Licensed under the Apache License, Version 2.0 (the "License");
7%% you may not use this file except in compliance with the License.
8%% You may obtain a copy of the License at
9%%
10%%     http://www.apache.org/licenses/LICENSE-2.0
11%%
12%% Unless required by applicable law or agreed to in writing, software
13%% distributed under the License is distributed on an "AS IS" BASIS,
14%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15%% See the License for the specific language governing permissions and
16%% limitations under the License.
17%%
18%% %CopyrightEnd%
19%%
20
21%%%-------------------------------------------------------------------
22%%% File    : z_SUITE.erl
23%%% Author  : Rickard Green <rickard.s.green@ericsson.com>
24%%% Description : Misc tests that should be run last
25%%%
26%%% Created : 15 Jul 2005 by Rickard Green <rickard.s.green@ericsson.com>
27%%%-------------------------------------------------------------------
28-module(z_SUITE).
29-author('rickard.s.green@ericsson.com').
30
31%-define(line_trace, 1).
32
33-include_lib("common_test/include/ct.hrl").
34
35-export([all/0, suite/0, init_per_testcase/2, end_per_testcase/2]).
36
37-export([used_thread_specific_events/1, schedulers_alive/1,
38         node_container_refc_check/1,
39	 long_timers/1, pollset_size/1,
40	 check_io_debug/1, get_check_io_info/0,
41         lc_graph/1,
42         leaked_processes/1,
43         literal_area_collector/1]).
44
45suite() ->
46    [{ct_hooks,[ts_install_cth]},
47     {timetrap, {minutes, 5}}].
48
49all() ->
50    [used_thread_specific_events, schedulers_alive,
51     node_container_refc_check,
52     long_timers, pollset_size, check_io_debug,
53     lc_graph,
54     %% Make sure that the leaked_processes/1 is always
55     %% run last.
56     leaked_processes,
57     literal_area_collector].
58
59init_per_testcase(schedulers_alive, Config) ->
60    case erlang:system_info(schedulers) of
61        1 ->
62            {skip, "Needs more schedulers to run"};
63        _ ->
64            Config
65    end;
66init_per_testcase(_, Config) ->
67    Config.
68
69
70end_per_testcase(_Name, Config) ->
71    Config.
72
73%%%
74%%% The test cases -------------------------------------------------------------
75%%%
76
77used_thread_specific_events(Config) when is_list(Config) ->
78    Pid = whereis(used_thread_specific_events_holder),
79    Mon = monitor(process, Pid),
80    Pid ! {get_used_tse, self()},
81    UsedTSE = erlang:system_info(ethread_used_tse),
82    receive
83        {used_tse, InitialUsedTSE} ->
84            io:format("InitialUsedTSE=~p UsedTSE=~p~n", [InitialUsedTSE, UsedTSE]),
85            case os:type() of
86                {win32,_} ->
87                    %% The windows poll implementation creates threads on demand
88                    %% which in turn will get thread specific events allocated.
89                    %% We don't know how many such threads are created, so we
90                    %% just have to guess and test that the amount of events is
91                    %% not huge.
92                    Extra = 100, %% Value take out of the blue...
93                    if UsedTSE =< InitialUsedTSE+Extra -> ok;
94                       true -> ct:fail("An unexpected large amount of thread specific events used!")
95                    end;
96                _ ->
97                    if UsedTSE =< InitialUsedTSE -> ok;
98                       true -> ct:fail("An increased amount of thread specific events used!")
99                    end
100            end,
101            exit(Pid, kill),
102            receive {'DOWN', Mon, process, Pid, _} -> ok end;
103        {'DOWN', Mon, process, Pid, Reason} ->
104            ct:fail({used_thread_specific_events_holder, Reason})
105    end.
106
107%% Tests that all schedulers are actually used
108schedulers_alive(Config) when is_list(Config) ->
109    Master = self(),
110    NoSchedulersOnline = erlang:system_flag(
111                           schedulers_online,
112                           erlang:system_info(schedulers)),
113    NoSchedulers = erlang:system_info(schedulers),
114    UsedScheds =
115      try
116          io:format("Number of schedulers configured: ~p~n", [NoSchedulers]),
117          case erlang:system_info(multi_scheduling) of
118              blocked ->
119                  ct:fail(multi_scheduling_blocked);
120              disabled ->
121                  ok;
122              enabled ->
123                  io:format("Testing blocking process exit~n"),
124                  BF = fun () ->
125                               blocked_normal = erlang:system_flag(multi_scheduling,
126								   block_normal),
127                               Master ! {self(), blocking},
128                               receive after infinity -> ok end
129                       end,
130                  Blocker = spawn_link(BF),
131                  Mon = erlang:monitor(process, Blocker),
132                  receive {Blocker, blocking} -> ok end,
133                  [Blocker]
134                  = erlang:system_info(normal_multi_scheduling_blockers),
135                  unlink(Blocker),
136                  exit(Blocker, kill),
137                  receive {'DOWN', Mon, _, _, _} -> ok end,
138                  enabled = erlang:system_info(multi_scheduling),
139                  [] = erlang:system_info(normal_multi_scheduling_blockers),
140                  ok
141          end,
142          io:format("Testing blocked~n"),
143          erlang:system_flag(multi_scheduling, block_normal),
144          case erlang:system_info(multi_scheduling) of
145              enabled ->
146                  ct:fail(multi_scheduling_enabled);
147              blocked_normal ->
148                  [Master] = erlang:system_info(normal_multi_scheduling_blockers);
149              disabled -> ok
150          end,
151          Ps = lists:map(
152                 fun (_) ->
153                         spawn_link(fun () ->
154                                            run_on_schedulers(none,
155                                                              [],
156                                                              Master)
157                                    end)
158                 end,
159                 lists:seq(1,NoSchedulers)),
160          receive after 1000 -> ok end,
161          {_, 1} = verify_all_schedulers_used({[],0}, 1),
162          lists:foreach(fun (P) ->
163                                unlink(P),
164                                exit(P, bang)
165                        end, Ps),
166          case erlang:system_flag(multi_scheduling, unblock_normal) of
167              blocked_normal -> ct:fail(multi_scheduling_blocked);
168              disabled -> ok;
169              enabled -> ok
170          end,
171          erts_debug:set_internal_state(available_internal_state, true),
172          %% node_and_dist_references will use emulator interal thread blocking...
173          erts_debug:get_internal_state(node_and_dist_references),
174          erts_debug:set_internal_state(available_internal_state, false),
175          io:format("Testing not blocked~n"),
176          Ps2 = lists:map(
177                  fun (_) ->
178                          spawn_link(fun () ->
179                                             run_on_schedulers(none,
180                                                               [],
181                                                               Master)
182                                     end)
183                  end,
184                  lists:seq(1,NoSchedulers)),
185          receive after 1000 -> ok end,
186          {_, NoSIDs} = verify_all_schedulers_used({[],0},NoSchedulers),
187          lists:foreach(fun (P) ->
188                                unlink(P),
189                                exit(P, bang)
190                        end, Ps2),
191          NoSIDs
192      after
193          NoSchedulers = erlang:system_flag(schedulers_online,
194                                            NoSchedulersOnline),
195          NoSchedulersOnline = erlang:system_info(schedulers_online)
196      end,
197        {comment, "Number of schedulers " ++ integer_to_list(UsedScheds)}.
198
199
200run_on_schedulers(LastSID, SIDs, ReportTo) ->
201    case erlang:system_info(scheduler_id) of
202	LastSID ->
203	    erlang:yield(),
204	    run_on_schedulers(LastSID, SIDs, ReportTo);
205	SID ->
206	    NewSIDs = case lists:member(SID, SIDs) of
207			  true ->
208			      SIDs;
209			  false ->
210			      ReportTo ! {scheduler_used, SID},
211			      [SID | SIDs]
212		      end,
213	    erlang:yield(),
214	    run_on_schedulers(SID, NewSIDs, ReportTo)
215    end.
216
217wait_on_used_scheduler({SIDs, SIDsLen} = State) ->
218    receive
219	{scheduler_used, SID} ->
220	    case lists:member(SID, SIDs) of
221		true ->
222		    wait_on_used_scheduler(State);
223		false ->
224		    io:format("Scheduler ~p used~n", [SID]),
225		    {[SID|SIDs], SIDsLen+1}
226	    end
227    end.
228
229verify_all_schedulers_used({UsedSIDs, UsedSIDsLen} = State, NoSchedulers) ->
230    case NoSchedulers of
231	      UsedSIDsLen ->
232		  State;
233	      NoSchdlrs when NoSchdlrs < UsedSIDsLen ->
234		  ct:fail({more_schedulers_used_than_exist,
235				 {existing_schedulers, NoSchdlrs},
236				 {used_schedulers, UsedSIDsLen},
237				 {used_scheduler_ids, UsedSIDs}});
238	      _ ->
239		  NewState = wait_on_used_scheduler(State),
240		  verify_all_schedulers_used(NewState, NoSchedulers)
241	  end.
242
243node_container_refc_check(Config) when is_list(Config) ->
244    node_container_SUITE:node_container_refc_check(node()),
245    ok.
246
247long_timers(Config) when is_list(Config) ->
248    case long_timers_test:check_result() of
249	ok -> ok;
250	high_cpu -> {comment, "Ignored failures due to high CPU utilization"};
251	missing_cpu_info -> {comment, "Ignored failures due to missing CPU utilization information"};
252	Fail -> ct:fail(Fail)
253    end.
254
255
256pollset_size(Config) when is_list(Config) ->
257    Name = pollset_size_testcase_initial_state_holder,
258    Mon = erlang:monitor(process, Name),
259    (catch Name ! {get_initial_check_io_result, self()}),
260    InitChkIo = receive
261			  {initial_check_io_result, ICIO} ->
262			      erlang:demonitor(Mon, [flush]),
263			      ICIO;
264			  {'DOWN', Mon, _, _, Reason} ->
265			      ct:fail({non_existing, Name, Reason})
266		      end,
267    FinChkIo = get_check_io_info(),
268    io:format("Initial: ~p~nFinal: ~p~n", [InitChkIo, FinChkIo]),
269    InitPollsetSize = lists:keysearch(total_poll_set_size, 1, InitChkIo),
270    FinPollsetSize = lists:keysearch(total_poll_set_size, 1, FinChkIo),
271    HasGethost = case has_gethost() of true -> 1; _ -> 0 end,
272    case InitPollsetSize =:= FinPollsetSize of
273	      true ->
274		  case InitPollsetSize of
275		      {value, {total_poll_set_size, Size}} ->
276			  {comment,
277				 "Pollset size: " ++ integer_to_list(Size)};
278		      _ ->
279			  {skipped,
280				 "Pollset size information not available"}
281		  end;
282	      false ->
283		  %% Sometimes we have fewer descriptors in the
284		  %% pollset at the end than when we started, but
285		  %% that is ok as long as there are at least 2
286		  %% descriptors (dist listen socket and
287		  %% epmd socket) in the pollset.
288		  {value, {total_poll_set_size, InitSize}}
289		      = InitPollsetSize,
290		  {value, {total_poll_set_size, FinSize}}
291		      = FinPollsetSize,
292		  true = FinSize < (InitSize + HasGethost),
293		  true = 2 =< FinSize,
294		  {comment,
295			 "Start pollset size: "
296			 ++ integer_to_list(InitSize)
297			 ++ " End pollset size: "
298			 ++ integer_to_list(FinSize)}
299	  end.
300
301check_io_debug(Config) when is_list(Config) ->
302    case lists:keysearch(name, 1, hd(erlang:system_info(check_io))) of
303	      {value, {name, erts_poll}} -> check_io_debug_test();
304	      _ -> {skipped, "Not implemented in this emulator"}
305	  end.
306
307check_io_debug_test() ->
308    erlang:display(get_check_io_info()),
309    erts_debug:set_internal_state(available_internal_state, true),
310    {NoErrorFds, NoUsedFds, NoDrvSelStructs, NoDrvEvStructs} = CheckIoDebug
311	= erts_debug:get_internal_state(check_io_debug),
312    erts_debug:set_internal_state(available_internal_state, false),
313    HasGetHost = has_gethost(),
314    ct:log("check_io_debug: ~p~n"
315           "HasGetHost: ~p",[CheckIoDebug, HasGetHost]),
316    0 = NoErrorFds,
317    if
318        NoUsedFds == NoDrvSelStructs ->
319            ok;
320        HasGetHost andalso (NoUsedFds == (NoDrvSelStructs - 1)) ->
321            %% If the inet_gethost port is alive, we may have
322            %% one extra used fd that is not selected on.
323            %% This happens when the initial setup of the
324            %% port returns an EAGAIN
325            ok
326    end,
327    0 = NoDrvEvStructs,
328    ok.
329
330has_gethost() ->
331    has_gethost(erlang:ports()).
332has_gethost([P|T]) ->
333    case erlang:port_info(P, name) of
334        {name,"inet_gethost"++_} ->
335            true;
336        _ ->
337            has_gethost(T)
338    end;
339has_gethost([]) ->
340    false.
341
342lc_graph(Config) when is_list(Config) ->
343    %% Create "lc_graph" file in current working dir
344    %% if lock checker is enabled
345    erts_debug:lc_graph(),
346    ok.
347
348leaked_processes(Config) when is_list(Config) ->
349    %% Replace the defualt timetrap with a timetrap with
350    %% known pid.
351    test_server:timetrap_cancel(),
352    Dog = test_server:timetrap(test_server:minutes(5)),
353
354    Name = leaked_processes__process_holder,
355    Name ! {get_initial_processes, self()},
356    receive
357        {initial_processes, Initial0} -> ok
358    end,
359    Initial = ordsets:from_list(Initial0),
360
361    KnownPids = ordsets:from_list([self(),Dog]),
362    Now0 = ordsets:from_list(processes()),
363    Now = ordsets:subtract(Now0, KnownPids),
364    Leaked = ordsets:subtract(Now, Initial),
365
366    _ = [begin
367             Info = process_info(P) ++ process_info(P, [current_stacktrace]),
368             io:format("~p: ~p\n", [P,Info])
369         end || P <- Leaked],
370    Comment = lists:flatten(io_lib:format("~p process(es)",
371                                          [length(Leaked)])),
372    {comment, Comment}.
373
374literal_area_collector(Config) when is_list(Config) ->
375    literal_area_collector_test:check_idle(10000).
376
377%%
378%% Internal functions...
379%%
380
381
382display_check_io(ChkIo) ->
383    catch erlang:display('--- CHECK IO INFO ---'),
384    catch erlang:display(ChkIo),
385    catch erts_debug:set_internal_state(available_internal_state, true),
386    NoOfErrorFds = (catch element(1, erts_debug:get_internal_state(check_io_debug))),
387    catch erlang:display({'NoOfErrorFds', NoOfErrorFds}),
388    catch erts_debug:set_internal_state(available_internal_state, false),
389    catch erlang:display('--- CHECK IO INFO ---'),
390    ok.
391
392get_check_io_info() ->
393    ChkIo = driver_SUITE:get_check_io_total(erlang:system_info(check_io)),
394    PendUpdNo = case lists:keysearch(pending_updates, 1, ChkIo) of
395		    {value, {pending_updates, PendNo}} ->
396			PendNo;
397		    false ->
398			0
399		end,
400    {value, {active_fds, ActFds}} = lists:keysearch(active_fds, 1, ChkIo),
401    case {PendUpdNo, ActFds} of
402	{0, 0} ->
403	    display_check_io(ChkIo),
404	    ChkIo;
405	_ ->
406	    receive after 100 -> ok end,
407	    get_check_io_info()
408    end.
409