1%%
2%% %CopyrightBegin%
3%%
4%% Copyright Ericsson AB 2006-2018. All Rights Reserved.
5%%
6%% Licensed under the Apache License, Version 2.0 (the "License");
7%% you may not use this file except in compliance with the License.
8%% You may obtain a copy of the License at
9%%
10%%     http://www.apache.org/licenses/LICENSE-2.0
11%%
12%% Unless required by applicable law or agreed to in writing, software
13%% distributed under the License is distributed on an "AS IS" BASIS,
14%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15%% See the License for the specific language governing permissions and
16%% limitations under the License.
17%%
18%% %CopyrightEnd%
19%%
20
21%%%-------------------------------------------------------------------
22%%% File    : z_SUITE.erl
23%%% Author  : Rickard Green <rickard.s.green@ericsson.com>
24%%% Description : Misc tests that should be run last
25%%%
26%%% Created : 15 Jul 2005 by Rickard Green <rickard.s.green@ericsson.com>
27%%%-------------------------------------------------------------------
28-module(z_SUITE).
29-author('rickard.s.green@ericsson.com').
30
31%-define(line_trace, 1).
32
33-include_lib("common_test/include/ct.hrl").
34
35-export([all/0, suite/0]).
36
37-export([used_thread_specific_events/1, schedulers_alive/1,
38         node_container_refc_check/1,
39	 long_timers/1, pollset_size/1,
40	 check_io_debug/1, get_check_io_info/0,
41         lc_graph/1,
42         leaked_processes/1,
43         literal_area_collector/1]).
44
45suite() ->
46    [{ct_hooks,[ts_install_cth]},
47     {timetrap, {minutes, 5}}].
48
49all() ->
50    [used_thread_specific_events, schedulers_alive,
51     node_container_refc_check,
52     long_timers, pollset_size, check_io_debug,
53     lc_graph,
54     %% Make sure that the leaked_processes/1 is always
55     %% run last.
56     leaked_processes,
57     literal_area_collector].
58
59%%%
60%%% The test cases -------------------------------------------------------------
61%%%
62
63used_thread_specific_events(Config) when is_list(Config) ->
64    Pid = whereis(used_thread_specific_events_holder),
65    Mon = monitor(process, Pid),
66    Pid ! {get_used_tse, self()},
67    UsedTSE = erlang:system_info(ethread_used_tse),
68    receive
69        {used_tse, InitialUsedTSE} ->
70            io:format("InitialUsedTSE=~p UsedTSE=~p~n", [InitialUsedTSE, UsedTSE]),
71            case os:type() of
72                {win32,_} ->
73                    %% The windows poll implementation creates threads on demand
74                    %% which in turn will get thread specific events allocated.
75                    %% We don't know how many such threads are created, so we
76                    %% just have to guess and test that the amount of events is
77                    %% not huge.
78                    Extra = 100, %% Value take out of the blue...
79                    if UsedTSE =< InitialUsedTSE+Extra -> ok;
80                       true -> ct:fail("An unexpected large amount of thread specific events used!")
81                    end;
82                _ ->
83                    if UsedTSE =< InitialUsedTSE -> ok;
84                       true -> ct:fail("An increased amount of thread specific events used!")
85                    end
86            end,
87            exit(Pid, kill),
88            receive {'DOWN', Mon, process, Pid, _} -> ok end;
89        {'DOWN', Mon, process, Pid, Reason} ->
90            ct:fail({used_thread_specific_events_holder, Reason})
91    end.
92
93%% Tests that all schedulers are actually used
94schedulers_alive(Config) when is_list(Config) ->
95    Master = self(),
96    NoSchedulersOnline = erlang:system_flag(
97                           schedulers_online,
98                           erlang:system_info(schedulers)),
99    NoSchedulers = erlang:system_info(schedulers),
100    UsedScheds =
101      try
102          io:format("Number of schedulers configured: ~p~n", [NoSchedulers]),
103          case erlang:system_info(multi_scheduling) of
104              blocked ->
105                  ct:fail(multi_scheduling_blocked);
106              disabled ->
107                  ok;
108              enabled ->
109                  io:format("Testing blocking process exit~n"),
110                  BF = fun () ->
111                               blocked_normal = erlang:system_flag(multi_scheduling,
112								   block_normal),
113                               Master ! {self(), blocking},
114                               receive after infinity -> ok end
115                       end,
116                  Blocker = spawn_link(BF),
117                  Mon = erlang:monitor(process, Blocker),
118                  receive {Blocker, blocking} -> ok end,
119                  [Blocker]
120                  = erlang:system_info(normal_multi_scheduling_blockers),
121                  unlink(Blocker),
122                  exit(Blocker, kill),
123                  receive {'DOWN', Mon, _, _, _} -> ok end,
124                  enabled = erlang:system_info(multi_scheduling),
125                  [] = erlang:system_info(normal_multi_scheduling_blockers),
126                  ok
127          end,
128          io:format("Testing blocked~n"),
129          erlang:system_flag(multi_scheduling, block_normal),
130          case erlang:system_info(multi_scheduling) of
131              enabled ->
132                  ct:fail(multi_scheduling_enabled);
133              blocked_normal ->
134                  [Master] = erlang:system_info(normal_multi_scheduling_blockers);
135              disabled -> ok
136          end,
137          Ps = lists:map(
138                 fun (_) ->
139                         spawn_link(fun () ->
140                                            run_on_schedulers(none,
141                                                              [],
142                                                              Master)
143                                    end)
144                 end,
145                 lists:seq(1,NoSchedulers)),
146          receive after 1000 -> ok end,
147          {_, 1} = verify_all_schedulers_used({[],0}, 1),
148          lists:foreach(fun (P) ->
149                                unlink(P),
150                                exit(P, bang)
151                        end, Ps),
152          case erlang:system_flag(multi_scheduling, unblock_normal) of
153              blocked_normal -> ct:fail(multi_scheduling_blocked);
154              disabled -> ok;
155              enabled -> ok
156          end,
157          erts_debug:set_internal_state(available_internal_state, true),
158          %% node_and_dist_references will use emulator interal thread blocking...
159          erts_debug:get_internal_state(node_and_dist_references),
160          erts_debug:set_internal_state(available_internal_state, false),
161          io:format("Testing not blocked~n"),
162          Ps2 = lists:map(
163                  fun (_) ->
164                          spawn_link(fun () ->
165                                             run_on_schedulers(none,
166                                                               [],
167                                                               Master)
168                                     end)
169                  end,
170                  lists:seq(1,NoSchedulers)),
171          receive after 1000 -> ok end,
172          {_, NoSIDs} = verify_all_schedulers_used({[],0},NoSchedulers),
173          lists:foreach(fun (P) ->
174                                unlink(P),
175                                exit(P, bang)
176                        end, Ps2),
177          NoSIDs
178      after
179          NoSchedulers = erlang:system_flag(schedulers_online,
180                                            NoSchedulersOnline),
181          NoSchedulersOnline = erlang:system_info(schedulers_online)
182      end,
183        {comment, "Number of schedulers " ++ integer_to_list(UsedScheds)}.
184
185
186run_on_schedulers(LastSID, SIDs, ReportTo) ->
187    case erlang:system_info(scheduler_id) of
188	LastSID ->
189	    erlang:yield(),
190	    run_on_schedulers(LastSID, SIDs, ReportTo);
191	SID ->
192	    NewSIDs = case lists:member(SID, SIDs) of
193			  true ->
194			      SIDs;
195			  false ->
196			      ReportTo ! {scheduler_used, SID},
197			      [SID | SIDs]
198		      end,
199	    erlang:yield(),
200	    run_on_schedulers(SID, NewSIDs, ReportTo)
201    end.
202
203wait_on_used_scheduler({SIDs, SIDsLen} = State) ->
204    receive
205	{scheduler_used, SID} ->
206	    case lists:member(SID, SIDs) of
207		true ->
208		    wait_on_used_scheduler(State);
209		false ->
210		    io:format("Scheduler ~p used~n", [SID]),
211		    {[SID|SIDs], SIDsLen+1}
212	    end
213    end.
214
215verify_all_schedulers_used({UsedSIDs, UsedSIDsLen} = State, NoSchedulers) ->
216    case NoSchedulers of
217	      UsedSIDsLen ->
218		  State;
219	      NoSchdlrs when NoSchdlrs < UsedSIDsLen ->
220		  ct:fail({more_schedulers_used_than_exist,
221				 {existing_schedulers, NoSchdlrs},
222				 {used_schedulers, UsedSIDsLen},
223				 {used_scheduler_ids, UsedSIDs}});
224	      _ ->
225		  NewState = wait_on_used_scheduler(State),
226		  verify_all_schedulers_used(NewState, NoSchedulers)
227	  end.
228
229node_container_refc_check(Config) when is_list(Config) ->
230    node_container_SUITE:node_container_refc_check(node()),
231    ok.
232
233long_timers(Config) when is_list(Config) ->
234    case long_timers_test:check_result() of
235	ok -> ok;
236	high_cpu -> {comment, "Ignored failures due to high CPU utilization"};
237	missing_cpu_info -> {comment, "Ignored failures due to missing CPU utilization information"};
238	Fail -> ct:fail(Fail)
239    end.
240
241
242pollset_size(Config) when is_list(Config) ->
243    Name = pollset_size_testcase_initial_state_holder,
244    Mon = erlang:monitor(process, Name),
245    (catch Name ! {get_initial_check_io_result, self()}),
246    InitChkIo = receive
247			  {initial_check_io_result, ICIO} ->
248			      erlang:demonitor(Mon, [flush]),
249			      ICIO;
250			  {'DOWN', Mon, _, _, Reason} ->
251			      ct:fail({non_existing, Name, Reason})
252		      end,
253    FinChkIo = get_check_io_info(),
254    io:format("Initial: ~p~nFinal: ~p~n", [InitChkIo, FinChkIo]),
255    InitPollsetSize = lists:keysearch(total_poll_set_size, 1, InitChkIo),
256    FinPollsetSize = lists:keysearch(total_poll_set_size, 1, FinChkIo),
257    HasGethost = case has_gethost() of true -> 1; _ -> 0 end,
258    case InitPollsetSize =:= FinPollsetSize of
259	      true ->
260		  case InitPollsetSize of
261		      {value, {total_poll_set_size, Size}} ->
262			  {comment,
263				 "Pollset size: " ++ integer_to_list(Size)};
264		      _ ->
265			  {skipped,
266				 "Pollset size information not available"}
267		  end;
268	      false ->
269		  %% Sometimes we have fewer descriptors in the
270		  %% pollset at the end than when we started, but
271		  %% that is ok as long as there are at least 2
272		  %% descriptors (dist listen socket and
273		  %% epmd socket) in the pollset.
274		  {value, {total_poll_set_size, InitSize}}
275		      = InitPollsetSize,
276		  {value, {total_poll_set_size, FinSize}}
277		      = FinPollsetSize,
278		  true = FinSize < (InitSize + HasGethost),
279		  true = 2 =< FinSize,
280		  {comment,
281			 "Start pollset size: "
282			 ++ integer_to_list(InitSize)
283			 ++ " End pollset size: "
284			 ++ integer_to_list(FinSize)}
285	  end.
286
287check_io_debug(Config) when is_list(Config) ->
288    case lists:keysearch(name, 1, hd(erlang:system_info(check_io))) of
289	      {value, {name, erts_poll}} -> check_io_debug_test();
290	      _ -> {skipped, "Not implemented in this emulator"}
291	  end.
292
293check_io_debug_test() ->
294    erlang:display(get_check_io_info()),
295    erts_debug:set_internal_state(available_internal_state, true),
296    {NoErrorFds, NoUsedFds, NoDrvSelStructs, NoDrvEvStructs} = CheckIoDebug
297	= erts_debug:get_internal_state(check_io_debug),
298    erts_debug:set_internal_state(available_internal_state, false),
299    HasGetHost = has_gethost(),
300    ct:log("check_io_debug: ~p~n"
301           "HasGetHost: ~p",[CheckIoDebug, HasGetHost]),
302    0 = NoErrorFds,
303    if
304        NoUsedFds == NoDrvSelStructs ->
305            ok;
306        HasGetHost andalso (NoUsedFds == (NoDrvSelStructs - 1)) ->
307            %% If the inet_gethost port is alive, we may have
308            %% one extra used fd that is not selected on.
309            %% This happens when the initial setup of the
310            %% port returns an EAGAIN
311            ok
312    end,
313    0 = NoDrvEvStructs,
314    ok.
315
316has_gethost() ->
317    has_gethost(erlang:ports()).
318has_gethost([P|T]) ->
319    case erlang:port_info(P, name) of
320        {name,"inet_gethost"++_} ->
321            true;
322        _ ->
323            has_gethost(T)
324    end;
325has_gethost([]) ->
326    false.
327
328lc_graph(Config) when is_list(Config) ->
329    %% Create "lc_graph" file in current working dir
330    %% if lock checker is enabled
331    erts_debug:lc_graph(),
332    ok.
333
334leaked_processes(Config) when is_list(Config) ->
335    %% Replace the defualt timetrap with a timetrap with
336    %% known pid.
337    test_server:timetrap_cancel(),
338    Dog = test_server:timetrap(test_server:minutes(5)),
339
340    Name = leaked_processes__process_holder,
341    Name ! {get_initial_processes, self()},
342    receive
343        {initial_processes, Initial0} -> ok
344    end,
345    Initial = ordsets:from_list(Initial0),
346
347    KnownPids = ordsets:from_list([self(),Dog]),
348    Now0 = ordsets:from_list(processes()),
349    Now = ordsets:subtract(Now0, KnownPids),
350    Leaked = ordsets:subtract(Now, Initial),
351
352    _ = [begin
353             Info = process_info(P) ++ process_info(P, [current_stacktrace]),
354             io:format("~p: ~p\n", [P,Info])
355         end || P <- Leaked],
356    Comment = lists:flatten(io_lib:format("~p process(es)",
357                                          [length(Leaked)])),
358    {comment, Comment}.
359
360literal_area_collector(Config) when is_list(Config) ->
361    literal_area_collector_test:check_idle(10000).
362
363%%
364%% Internal functions...
365%%
366
367
368display_check_io(ChkIo) ->
369    catch erlang:display('--- CHECK IO INFO ---'),
370    catch erlang:display(ChkIo),
371    catch erts_debug:set_internal_state(available_internal_state, true),
372    NoOfErrorFds = (catch element(1, erts_debug:get_internal_state(check_io_debug))),
373    catch erlang:display({'NoOfErrorFds', NoOfErrorFds}),
374    catch erts_debug:set_internal_state(available_internal_state, false),
375    catch erlang:display('--- CHECK IO INFO ---'),
376    ok.
377
378get_check_io_info() ->
379    ChkIo = driver_SUITE:get_check_io_total(erlang:system_info(check_io)),
380    PendUpdNo = case lists:keysearch(pending_updates, 1, ChkIo) of
381		    {value, {pending_updates, PendNo}} ->
382			PendNo;
383		    false ->
384			0
385		end,
386    {value, {active_fds, ActFds}} = lists:keysearch(active_fds, 1, ChkIo),
387    case {PendUpdNo, ActFds} of
388	{0, 0} ->
389	    display_check_io(ChkIo),
390	    ChkIo;
391	_ ->
392	    receive after 100 -> ok end,
393	    get_check_io_info()
394    end.
395