@ -1,22 +0,0 @@ | |||
-module(fmtTest). | |||
-compile([export_all, nowarn_export_all]). | |||
tt1(FmtBinStr) -> | |||
binary:split(FmtBinStr, <<"~">>). | |||
tt2(FmtBinStr) -> | |||
binary:split(FmtBinStr, persistent_term:get(eFmtPtMc)). | |||
tt5(F) -> | |||
string:split(F, "~"). | |||
tt3(<<>>) -> | |||
ok; | |||
tt3(<<_F/utf8, L/binary>>) -> | |||
tt3(L). | |||
tt4([]) -> | |||
ok; | |||
tt4([_F | L]) -> | |||
tt4(L). |
@ -1,713 +0,0 @@ | |||
%%% @author Fred Hebert <mononcqc@ferd.ca> | |||
%%% [http://ferd.ca/] | |||
%%% @doc Recon, as a module, provides access to the high-level functionality | |||
%%% contained in the Recon application. | |||
%%% | |||
%%% It has functions in five main categories: | |||
%%% | |||
%%% <dl> | |||
%%% <dt>1. State information</dt> | |||
%%% <dd>Process information is everything that has to do with the | |||
%%% general state of the node. Functions such as {@link info/1} | |||
%%% and {@link info/3} are wrappers to provide more details than | |||
%%% `erlang:process_info/1', while providing it in a production-safe | |||
%%% manner. They have equivalents to `erlang:process_info/2' in | |||
%%% the functions {@link info/2} and {@link info/4}, respectively.</dd> | |||
%%% <dd>{@link proc_count/2} and {@link proc_window/3} are to be used | |||
%%% when you require information about processes in a larger sense: | |||
%%% biggest consumers of given process information (say memory or | |||
%%% reductions), either absolutely or over a sliding time window, | |||
%%% respectively.</dd> | |||
%%% <dd>{@link bin_leak/1} is a function that can be used to try and | |||
%%% see if your Erlang node is leaking refc binaries. See the function | |||
%%% itself for more details.</dd> | |||
%%% <dd>Functions to access node statistics, in a manner somewhat similar | |||
%%% to what <a href="https://github.com/ferd/vmstats">vmstats</a> | |||
%%% provides as a library. There are 3 of them: | |||
%%% {@link node_stats_print/2}, which displays them, | |||
%%% {@link node_stats_list/2}, which returns them in a list, and | |||
%%% {@link node_stats/4}, which provides a fold-like interface | |||
%%% for stats gathering. For CPU usage specifically, see | |||
%%% {@link scheduler_usage/1}.</dd> | |||
%%% | |||
%%% <dt>2. OTP tools</dt> | |||
%%% <dd>This category provides tools to interact with pieces of OTP | |||
%%% more easily. At this point, the only function included is | |||
%%% {@link get_state/1}, which works as a wrapper around | |||
%%% {@link get_state/2}, which works as a wrapper around | |||
%%% `sys:get_state/1' in R16B01, and provides the required | |||
%%% functionality for older versions of Erlang.</dd> | |||
%%% | |||
%%% <dt>3. Code Handling</dt> | |||
%%% <dd>Specific functions are in `recon' for the sole purpose | |||
%%% of interacting with source and compiled code. | |||
%%% {@link remote_load/1} and {@link remote_load/2} will allow | |||
%%% to take a local module, and load it remotely (in a diskless | |||
%%% manner) on another Erlang node you're connected to.</dd> | |||
%%% <dd>{@link source/1} allows to print the source of a loaded module, | |||
%%% in case it's not available in the currently running node.</dd> | |||
%%% | |||
%%% <dt>4. Ports and Sockets</dt> | |||
%%% <dd>To make it simpler to debug some network-related issues, | |||
%%% recon contains functions to deal with Erlang ports (raw, file | |||
%%% handles, or inet). Functions {@link tcp/0}, {@link udp/0}, | |||
%%% {@link sctp/0}, {@link files/0}, and {@link port_types/0} will | |||
%%% list all the Erlang ports of a given type. The latter function | |||
%%% prints counts of all individual types.</dd> | |||
%%% <dd>Port state information can be useful to figure out why certain | |||
%%% parts of the system misbehave. Functions such as | |||
%%% {@link port_info/1} and {@link port_info/2} are wrappers to provide | |||
%%% more similar or more details than `erlang:port_info/1-2', and, for | |||
%%% inet ports, statistics and options for each socket.</dd> | |||
%%% <dd>Finally, the functions {@link inet_count/2} and {@link inet_window/3} | |||
%%% provide the absolute or sliding window functionality of | |||
%%% {@link proc_count/2} and {@link proc_count/3} to inet ports | |||
%%% and connections currently on the node.</dd> | |||
%%% | |||
%%% <dt>5. RPC</dt> | |||
%%% <dd>These are wrappers to make RPC work simpler with clusters of | |||
%%% Erlang nodes. Default RPC mechanisms (from the `rpc' module) | |||
%%% make it somewhat painful to call shell-defined funs over node | |||
%%% boundaries. The functions {@link rpc/1}, {@link rpc/2}, and | |||
%%% {@link rpc/3} will do it with a simpler interface.</dd> | |||
%%% <dd>Additionally, when you're running diagnostic code on remote | |||
%%% nodes and want to know which node evaluated what result, using | |||
%%% {@link named_rpc/1}, {@link named_rpc/2}, and {@link named_rpc/3} | |||
%%% will wrap the results in a tuple that tells you which node it's | |||
%%% coming from, making it easier to identify bad nodes.</dd> | |||
%%% </dl> | |||
%%% @end | |||
-module(recon). | |||
-export([info/1, info/2, info/3, info/4, | |||
proc_count/2, proc_window/3, | |||
bin_leak/1, | |||
node_stats_print/2, node_stats_list/2, node_stats/4, | |||
scheduler_usage/1]). | |||
-export([get_state/1, get_state/2]). | |||
-export([remote_load/1, remote_load/2, | |||
source/1]). | |||
-export([tcp/0, udp/0, sctp/0, files/0, port_types/0, | |||
inet_count/2, inet_window/3, | |||
port_info/1, port_info/2]). | |||
-export([rpc/1, rpc/2, rpc/3, | |||
named_rpc/1, named_rpc/2, named_rpc/3]). | |||
%%%%%%%%%%%%% | |||
%%% TYPES %%% | |||
%%%%%%%%%%%%% | |||
-type proc_attrs() :: {pid(), | |||
Attr :: _, | |||
[Name :: atom() | |||
|{current_function, mfa()} | |||
|{initial_call, mfa()}, ...]}. | |||
-type inet_attrs() :: {port(), | |||
Attr :: _, | |||
[{atom(), term()}]}. | |||
-type pid_term() :: pid() | atom() | string() | |||
| {global, term()} | {via, module(), term()} | |||
| {non_neg_integer(), non_neg_integer(), non_neg_integer()}. | |||
-type info_type() :: meta | signals | location | memory_used | work. | |||
-type info_meta_key() :: registered_name | dictionary | group_leader | status. | |||
-type info_signals_key() :: links | monitors | monitored_by | trap_exit. | |||
-type info_location_key() :: initial_call | current_stacktrace. | |||
-type info_memory_key() :: memory | message_queue_len | heap_size | |||
| total_heap_size | garbage_collection. | |||
-type info_work_key() :: reductions. | |||
-type info_key() :: info_meta_key() | info_signals_key() | info_location_key() | |||
| info_memory_key() | info_work_key(). | |||
-type port_term() :: port() | string() | atom() | pos_integer(). | |||
-type port_info_type() :: meta | signals | io | memory_used | specific. | |||
-type port_info_meta_key() :: registered_name | id | name | os_pid. | |||
-type port_info_signals_key() :: connected | links | monitors. | |||
-type port_info_io_key() :: input | output. | |||
-type port_info_memory_key() :: memory | queue_size. | |||
-type port_info_specific_key() :: atom(). | |||
-type port_info_key() :: port_info_meta_key() | port_info_signals_key() | |||
| port_info_io_key() | port_info_memory_key() | |||
| port_info_specific_key(). | |||
-export_type([proc_attrs/0, inet_attrs/0, pid_term/0, port_term/0]). | |||
-export_type([info_type/0, info_key/0, | |||
info_meta_key/0, info_signals_key/0, info_location_key/0, | |||
info_memory_key/0, info_work_key/0]). | |||
-export_type([port_info_type/0, port_info_key/0, | |||
port_info_meta_key/0, port_info_signals_key/0, port_info_io_key/0, | |||
port_info_memory_key/0, port_info_specific_key/0]). | |||
%%%%%%%%%%%%%%%%%% | |||
%%% PUBLIC API %%% | |||
%%%%%%%%%%%%%%%%%% | |||
%%% Process Info %%% | |||
%% @doc Equivalent to `info(<A.B.C>)' where `A', `B', and `C' are integers part | |||
%% of a pid | |||
-spec info(N, N, N) -> [{info_type(), [{info_key(), term()}]}, ...] when | |||
N :: non_neg_integer(). | |||
info(A, B, C) -> info(recon_lib:triple_to_pid(A, B, C)). | |||
%% @doc Equivalent to `info(<A.B.C>, Key)' where `A', `B', and `C' are integers part | |||
%% of a pid | |||
-spec info(N, N, N, Key) -> term() when | |||
N :: non_neg_integer(), | |||
Key :: info_type() | [atom()] | atom(). | |||
info(A, B, C, Key) -> info(recon_lib:triple_to_pid(A, B, C), Key). | |||
%% @doc Allows to be similar to `erlang:process_info/1', but excludes fields | |||
%% such as the mailbox, which have a tendency to grow and be unsafe when called | |||
%% in production systems. Also includes a few more fields than what is usually | |||
%% given (`monitors', `monitored_by', etc.), and separates the fields in a more | |||
%% readable format based on the type of information contained. | |||
%% | |||
%% Moreover, it will fetch and read information on local processes that were | |||
%% registered locally (an atom), globally (`{global, Name}'), or through | |||
%% another registry supported in the `{via, Module, Name}' syntax (must have a | |||
%% `Module:whereis_name/1' function). Pids can also be passed in as a string | |||
%% (`"<0.39.0>"') or a triple (`{0,39,0}') and will be converted to be used. | |||
-spec info(pid_term()) -> [{info_type(), [{info_key(), Value}]}, ...] when | |||
Value :: term(). | |||
info(PidTerm) -> | |||
Pid = recon_lib:term_to_pid(PidTerm), | |||
[info(Pid, Type) || Type <- [meta, signals, location, memory_used, work]]. | |||
%% @doc Allows to be similar to `erlang:process_info/2', but allows to | |||
%% sort fields by safe categories and pre-selections, avoiding items such | |||
%% as the mailbox, which may have a tendency to grow and be unsafe when | |||
%% called in production systems. | |||
%% | |||
%% Moreover, it will fetch and read information on local processes that were | |||
%% registered locally (an atom), globally (`{global, Name}'), or through | |||
%% another registry supported in the `{via, Module, Name}' syntax (must have a | |||
%% `Module:whereis_name/1' function). Pids can also be passed in as a string | |||
%% (`"<0.39.0>"') or a triple (`{0,39,0}') and will be converted to be used. | |||
%% | |||
%% Although the type signature doesn't show it in generated documentation, | |||
%% a list of arguments or individual arguments accepted by | |||
%% `erlang:process_info/2' and return them as that function would. | |||
%% | |||
%% A fake attribute `binary_memory' is also available to return the | |||
%% amount of memory used by refc binaries for a process. | |||
-spec info(pid_term(), info_type()) -> {info_type(), [{info_key(), term()}]} | |||
; (pid_term(), [atom()]) -> [{atom(), term()}] | |||
; (pid_term(), atom()) -> {atom(), term()}. | |||
info(PidTerm, meta) -> | |||
info_type(PidTerm, meta, [registered_name, dictionary, group_leader, | |||
status]); | |||
info(PidTerm, signals) -> | |||
info_type(PidTerm, signals, [links, monitors, monitored_by, trap_exit]); | |||
info(PidTerm, location) -> | |||
info_type(PidTerm, location, [initial_call, current_stacktrace]); | |||
info(PidTerm, memory_used) -> | |||
info_type(PidTerm, memory_used, [memory, message_queue_len, heap_size, | |||
total_heap_size, garbage_collection]); | |||
info(PidTerm, work) -> | |||
info_type(PidTerm, work, [reductions]); | |||
info(PidTerm, Keys) -> | |||
proc_info(recon_lib:term_to_pid(PidTerm), Keys). | |||
%% @private makes access to `info_type()' calls simpler. | |||
-spec info_type(pid_term(), info_type(), [info_key()]) -> | |||
{info_type(), [{info_key(), term()}]}. | |||
info_type(PidTerm, Type, Keys) -> | |||
Pid = recon_lib:term_to_pid(PidTerm), | |||
{Type, proc_info(Pid, Keys)}. | |||
%% @private wrapper around `erlang:process_info/2' that allows special | |||
%% attribute handling for items like `binary_memory'. | |||
proc_info(Pid, binary_memory) -> | |||
{binary, Bins} = erlang:process_info(Pid, binary), | |||
{binary_memory, recon_lib:binary_memory(Bins)}; | |||
proc_info(Pid, Term) when is_atom(Term) -> | |||
erlang:process_info(Pid, Term); | |||
proc_info(Pid, List) when is_list(List) -> | |||
case lists:member(binary_memory, List) of | |||
false -> | |||
erlang:process_info(Pid, List); | |||
true -> | |||
Res = erlang:process_info(Pid, replace(binary_memory, binary, List)), | |||
proc_fake(List, Res) | |||
end. | |||
%% @private Replace keys around | |||
replace(_, _, []) -> []; | |||
replace(H, Val, [H | T]) -> [Val | replace(H, Val, T)]; | |||
replace(R, Val, [H | T]) -> [H | replace(R, Val, T)]. | |||
proc_fake([], []) -> | |||
[]; | |||
proc_fake([binary_memory | T1], [{binary, Bins} | T2]) -> | |||
[{binary_memory, recon_lib:binary_memory(Bins)} | |||
| proc_fake(T1, T2)]; | |||
proc_fake([_ | T1], [H | T2]) -> | |||
[H | proc_fake(T1, T2)]. | |||
%% @doc Fetches a given attribute from all processes (except the | |||
%% caller) and returns the biggest `Num' consumers. | |||
-spec proc_count(AttributeName, Num) -> [proc_attrs()] when | |||
AttributeName :: atom(), | |||
Num :: non_neg_integer(). | |||
proc_count(AttrName, Num) -> | |||
recon_lib:sublist_top_n_attrs(recon_lib:proc_attrs(AttrName), Num). | |||
%% @doc Fetches a given attribute from all processes (except the | |||
%% caller) and returns the biggest entries, over a sliding time window. | |||
%% | |||
%% This function is particularly useful when processes on the node | |||
%% are mostly short-lived, usually too short to inspect through other | |||
%% tools, in order to figure out what kind of processes are eating | |||
%% through a lot resources on a given node. | |||
%% | |||
%% It is important to see this function as a snapshot over a sliding | |||
%% window. A program's timeline during sampling might look like this: | |||
%% | |||
%% `--w---- [Sample1] ---x-------------y----- [Sample2] ---z--->' | |||
%% | |||
%% Some processes will live between `w' and die at `x', some between `y' and | |||
%% `z', and some between `x' and `y'. These samples will not be too significant | |||
%% as they're incomplete. If the majority of your processes run between a time | |||
%% interval `x'...`y' (in absolute terms), you should make sure that your | |||
%% sampling time is smaller than this so that for many processes, their | |||
%% lifetime spans the equivalent of `w' and `z'. Not doing this can skew the | |||
%% results: long-lived processes, that have 10 times the time to accumulate | |||
%% data (say reductions) will look like bottlenecks when they're not one. | |||
%% | |||
%% Warning: this function depends on data gathered at two snapshots, and then | |||
%% building a dictionary with entries to differentiate them. This can take a | |||
%% heavy toll on memory when you have many dozens of thousands of processes. | |||
-spec proc_window(AttributeName, Num, Milliseconds) -> [proc_attrs()] when | |||
AttributeName :: atom(), | |||
Num :: non_neg_integer(), | |||
Milliseconds :: pos_integer(). | |||
proc_window(AttrName, Num, Time) -> | |||
Sample = fun() -> recon_lib:proc_attrs(AttrName) end, | |||
{First, Last} = recon_lib:sample(Time, Sample), | |||
recon_lib:sublist_top_n_attrs(recon_lib:sliding_window(First, Last), Num). | |||
%% @doc Refc binaries can be leaking when barely-busy processes route them | |||
%% around and do little else, or when extremely busy processes reach a stable | |||
%% amount of memory allocated and do the vast majority of their work with refc | |||
%% binaries. When this happens, it may take a very long while before references | |||
%% get deallocated and refc binaries get to be garbage collected, leading to | |||
%% Out Of Memory crashes. | |||
%% This function fetches the number of refc binary references in each process | |||
%% of the node, garbage collects them, and compares the resulting number of | |||
%% references in each of them. The function then returns the `N' processes | |||
%% that freed the biggest amount of binaries, potentially highlighting leaks. | |||
%% | |||
%% See <a href="http://www.erlang.org/doc/efficiency_guide/binaryhandling.html#id65722">The efficiency guide</a> | |||
%% for more details on refc binaries | |||
-spec bin_leak(pos_integer()) -> [proc_attrs()]. | |||
bin_leak(N) -> | |||
Procs = recon_lib:sublist_top_n_attrs([ | |||
try | |||
{ok, {_, Pre, Id}} = recon_lib:proc_attrs(binary, Pid), | |||
erlang:garbage_collect(Pid), | |||
{ok, {_, Post, _}} = recon_lib:proc_attrs(binary, Pid), | |||
{Pid, length(Pre) - length(Post), Id} | |||
catch | |||
_:_ -> {Pid, 0, []} | |||
end || Pid <- processes() | |||
], N), | |||
[{Pid, -Val, Id} || {Pid, Val, Id} <- Procs]. | |||
%% @doc Shorthand for `node_stats(N, Interval, fun(X,_) -> io:format("~p~n",[X]) end, nostate)'. | |||
-spec node_stats_print(Repeat, Interval) -> term() when | |||
Repeat :: non_neg_integer(), | |||
Interval :: pos_integer(). | |||
node_stats_print(N, Interval) -> | |||
node_stats(N, Interval, fun(X, _) -> io:format("~p~n", [X]) end, ok). | |||
%% @doc Because Erlang CPU usage as reported from `top' isn't the most | |||
%% reliable value (due to schedulers doing idle spinning to avoid going | |||
%% to sleep and impacting latency), a metric exists that is based on | |||
%% scheduler wall time. | |||
%% | |||
%% For any time interval, Scheduler wall time can be used as a measure | |||
%% of how 'busy' a scheduler is. A scheduler is busy when: | |||
%% | |||
%% <ul> | |||
%% <li>executing process code</li> | |||
%% <li>executing driver code</li> | |||
%% <li>executing NIF code</li> | |||
%% <li>executing BIFs</li> | |||
%% <li>garbage collecting</li> | |||
%% <li>doing memory management</li> | |||
%% </ul> | |||
%% | |||
%% A scheduler isn't busy when doing anything else. | |||
-spec scheduler_usage(Millisecs) -> undefined | [{SchedulerId, Usage}] when | |||
Millisecs :: non_neg_integer(), | |||
SchedulerId :: pos_integer(), | |||
Usage :: number(). | |||
scheduler_usage(Interval) when is_integer(Interval) -> | |||
%% We start and stop the scheduler_wall_time system flag if | |||
%% it wasn't in place already. Usually setting the flag should | |||
%% have a CPU impact (making it higher) only when under low usage. | |||
FormerFlag = erlang:system_flag(scheduler_wall_time, true), | |||
First = erlang:statistics(scheduler_wall_time), | |||
timer:sleep(Interval), | |||
Last = erlang:statistics(scheduler_wall_time), | |||
erlang:system_flag(scheduler_wall_time, FormerFlag), | |||
recon_lib:scheduler_usage_diff(First, Last). | |||
%% @doc Shorthand for `node_stats(N, Interval, fun(X,Acc) -> [X|Acc] end, [])' | |||
%% with the results reversed to be in the right temporal order. | |||
-spec node_stats_list(Repeat, Interval) -> [Stats] when | |||
Repeat :: non_neg_integer(), | |||
Interval :: pos_integer(), | |||
Stats :: {[Absolutes :: {atom(), term()}], | |||
[Increments :: {atom(), term()}]}. | |||
node_stats_list(N, Interval) -> | |||
lists:reverse(node_stats(N, Interval, fun(X, Acc) -> [X | Acc] end, [])). | |||
%% @doc Gathers statistics `N' time, waiting `Interval' milliseconds between | |||
%% each run, and accumulates results using a folding function `FoldFun'. | |||
%% The function will gather statistics in two forms: Absolutes and Increments. | |||
%% | |||
%% Absolutes are values that keep changing with time, and are useful to know | |||
%% about as a datapoint: process count, size of the run queue, error_logger | |||
%% queue length in versions before OTP-21 or those thar run it explicitely, | |||
%% and the memory of the node (total, processes, atoms, binaries, | |||
%% and ets tables). | |||
%% | |||
%% Increments are values that are mostly useful when compared to a previous | |||
%% one to have an idea what they're doing, because otherwise they'd never | |||
%% stop increasing: bytes in and out of the node, number of garbage colelctor | |||
%% runs, words of memory that were garbage collected, and the global reductions | |||
%% count for the node. | |||
-spec node_stats(N, Interval, FoldFun, Acc) -> Acc when | |||
N :: non_neg_integer(), | |||
Interval :: pos_integer(), | |||
FoldFun :: fun((Stats, Acc) -> Acc), | |||
Acc :: term(), | |||
Stats :: {[Absolutes :: {atom(), term()}], | |||
[Increments :: {atom(), term()}]}. | |||
node_stats(N, Interval, FoldFun, Init) -> | |||
Logger = case whereis(error_logger) of | |||
undefined -> logger; | |||
_ -> error_logger | |||
end, | |||
%% Turn on scheduler wall time if it wasn't there already | |||
FormerFlag = erlang:system_flag(scheduler_wall_time, true), | |||
%% Stats is an ugly fun, but it does its thing. | |||
Stats = fun({{OldIn, OldOut}, {OldGCs, OldWords, _}, SchedWall}) -> | |||
%% Absolutes | |||
ProcC = erlang:system_info(process_count), | |||
RunQ = erlang:statistics(run_queue), | |||
LogQ = case Logger of | |||
error_logger -> | |||
{_, LogQLen} = process_info(whereis(error_logger), | |||
message_queue_len), | |||
LogQLen; | |||
_ -> | |||
undefined | |||
end, | |||
%% Mem (Absolutes) | |||
Mem = erlang:memory(), | |||
Tot = proplists:get_value(total, Mem), | |||
ProcM = proplists:get_value(processes_used, Mem), | |||
Atom = proplists:get_value(atom_used, Mem), | |||
Bin = proplists:get_value(binary, Mem), | |||
Ets = proplists:get_value(ets, Mem), | |||
%% Incremental | |||
{{input, In}, {output, Out}} = erlang:statistics(io), | |||
GC = {GCs, Words, _} = erlang:statistics(garbage_collection), | |||
BytesIn = In - OldIn, | |||
BytesOut = Out - OldOut, | |||
GCCount = GCs - OldGCs, | |||
GCWords = Words - OldWords, | |||
{_, Reds} = erlang:statistics(reductions), | |||
SchedWallNew = erlang:statistics(scheduler_wall_time), | |||
SchedUsage = recon_lib:scheduler_usage_diff(SchedWall, SchedWallNew), | |||
%% Stats Results | |||
{{[{process_count, ProcC}, {run_queue, RunQ}] ++ | |||
[{error_logger_queue_len, LogQ} || LogQ =/= undefined] ++ | |||
[{memory_total, Tot}, | |||
{memory_procs, ProcM}, {memory_atoms, Atom}, | |||
{memory_bin, Bin}, {memory_ets, Ets}], | |||
[{bytes_in, BytesIn}, {bytes_out, BytesOut}, | |||
{gc_count, GCCount}, {gc_words_reclaimed, GCWords}, | |||
{reductions, Reds}, {scheduler_usage, SchedUsage}]}, | |||
%% New State | |||
{{In, Out}, GC, SchedWallNew}} | |||
end, | |||
{{input, In}, {output, Out}} = erlang:statistics(io), | |||
Gc = erlang:statistics(garbage_collection), | |||
SchedWall = erlang:statistics(scheduler_wall_time), | |||
Result = recon_lib:time_fold( | |||
N, Interval, Stats, | |||
{{In, Out}, Gc, SchedWall}, | |||
FoldFun, Init), | |||
%% Set scheduler wall time back to what it was | |||
erlang:system_flag(scheduler_wall_time, FormerFlag), | |||
Result. | |||
%%% OTP & Manipulations %%% | |||
%% @doc Shorthand call to `recon:get_state(PidTerm, 5000)' | |||
-spec get_state(pid_term()) -> term(). | |||
get_state(PidTerm) -> get_state(PidTerm, 5000). | |||
%% @doc Fetch the internal state of an OTP process. | |||
%% Calls `sys:get_state/2' directly in R16B01+, and fetches | |||
%% it dynamically on older versions of OTP. | |||
-spec get_state(pid_term(), Ms :: non_neg_integer() | 'infinity') -> term(). | |||
get_state(PidTerm, Timeout) -> | |||
Proc = recon_lib:term_to_pid(PidTerm), | |||
try | |||
sys:get_state(Proc, Timeout) | |||
catch | |||
error:undef -> | |||
case sys:get_status(Proc, Timeout) of | |||
{status, _Pid, {module, gen_server}, Data} -> | |||
{data, Props} = lists:last(lists:nth(5, Data)), | |||
proplists:get_value("State", Props); | |||
{status, _Pod, {module, gen_fsm}, Data} -> | |||
{data, Props} = lists:last(lists:nth(5, Data)), | |||
proplists:get_value("StateData", Props) | |||
end | |||
end. | |||
%%% Code & Stuff %%% | |||
%% @equiv remote_load(nodes(), Mod) | |||
-spec remote_load(module()) -> term(). | |||
remote_load(Mod) -> remote_load(nodes(), Mod). | |||
%% @doc Loads one or more modules remotely, in a diskless manner. Allows to | |||
%% share code loaded locally with a remote node that doesn't have it | |||
-spec remote_load(Nodes, module()) -> term() when | |||
Nodes :: [node(), ...] | node(). | |||
remote_load(Nodes = [_ | _], Mod) when is_atom(Mod) -> | |||
{Mod, Bin, File} = code:get_object_code(Mod), | |||
erpc:multicall(Nodes, code, load_binary, [Mod, File, Bin]); | |||
remote_load(Nodes = [_ | _], Modules) when is_list(Modules) -> | |||
[remote_load(Nodes, Mod) || Mod <- Modules]; | |||
remote_load(Node, Mod) -> | |||
remote_load([Node], Mod). | |||
%% @doc Obtain the source code of a module compiled with `debug_info'. | |||
%% The returned list sadly does not allow to format the types and typed | |||
%% records the way they look in the original module, but instead goes to | |||
%% an intermediary form used in the AST. They will still be placed | |||
%% in the right module attributes, however. | |||
%% @todo Figure out a way to pretty-print typespecs and records. | |||
-spec source(module()) -> iolist(). | |||
source(Module) -> | |||
Path = code:which(Module), | |||
{ok, {_, [{abstract_code, {_, AC}}]}} = beam_lib:chunks(Path, [abstract_code]), | |||
erl_prettypr:format(erl_syntax:form_list(AC)). | |||
%%% Ports Info %%% | |||
%% @doc returns a list of all TCP ports (the data type) open on the node. | |||
-spec tcp() -> [port()]. | |||
tcp() -> recon_lib:port_list(name, "tcp_inet"). | |||
%% @doc returns a list of all UDP ports (the data type) open on the node. | |||
-spec udp() -> [port()]. | |||
udp() -> recon_lib:port_list(name, "udp_inet"). | |||
%% @doc returns a list of all SCTP ports (the data type) open on the node. | |||
-spec sctp() -> [port()]. | |||
sctp() -> recon_lib:port_list(name, "sctp_inet"). | |||
%% @doc returns a list of all file handles open on the node. | |||
%% @deprecated Starting with OTP-21, files are implemented as NIFs | |||
%% and can no longer be listed. This function returns an empty list | |||
%% in such a case. | |||
-spec files() -> [port()]. | |||
files() -> recon_lib:port_list(name, "efile"). | |||
%% @doc Shows a list of all different ports on the node with their respective | |||
%% types. | |||
-spec port_types() -> [{Type :: string(), Count :: pos_integer()}]. | |||
port_types() -> | |||
lists:usort( | |||
%% sorts by biggest count, smallest type | |||
fun({KA, VA}, {KB, VB}) -> {VA, KB} > {VB, KA} end, | |||
recon_lib:count([Name || {_, Name} <- recon_lib:port_list(name)]) | |||
). | |||
%% @doc Fetches a given attribute from all inet ports (TCP, UDP, SCTP) | |||
%% and returns the biggest `Num' consumers. | |||
%% | |||
%% The values to be used can be the number of octets (bytes) sent, received, | |||
%% or both (`send_oct', `recv_oct', `oct', respectively), or the number | |||
%% of packets sent, received, or both (`send_cnt', `recv_cnt', `cnt', | |||
%% respectively). Individual absolute values for each metric will be returned | |||
%% in the 3rd position of the resulting tuple. | |||
-spec inet_count(AttributeName, Num) -> [inet_attrs()] when | |||
AttributeName :: 'recv_cnt' | 'recv_oct' | 'send_cnt' | 'send_oct' | |||
| 'cnt' | 'oct', | |||
Num :: non_neg_integer(). | |||
inet_count(Attr, Num) -> | |||
recon_lib:sublist_top_n_attrs(recon_lib:inet_attrs(Attr), Num). | |||
%% @doc Fetches a given attribute from all inet ports (TCP, UDP, SCTP) | |||
%% and returns the biggest entries, over a sliding time window. | |||
%% | |||
%% Warning: this function depends on data gathered at two snapshots, and then | |||
%% building a dictionary with entries to differentiate them. This can take a | |||
%% heavy toll on memory when you have many dozens of thousands of ports open. | |||
%% | |||
%% The values to be used can be the number of octets (bytes) sent, received, | |||
%% or both (`send_oct', `recv_oct', `oct', respectively), or the number | |||
%% of packets sent, received, or both (`send_cnt', `recv_cnt', `cnt', | |||
%% respectively). Individual absolute values for each metric will be returned | |||
%% in the 3rd position of the resulting tuple. | |||
-spec inet_window(AttributeName, Num, Milliseconds) -> [inet_attrs()] when | |||
AttributeName :: 'recv_cnt' | 'recv_oct' | 'send_cnt' | 'send_oct' | |||
| 'cnt' | 'oct', | |||
Num :: non_neg_integer(), | |||
Milliseconds :: pos_integer(). | |||
inet_window(Attr, Num, Time) when is_atom(Attr) -> | |||
Sample = fun() -> recon_lib:inet_attrs(Attr) end, | |||
{First, Last} = recon_lib:sample(Time, Sample), | |||
recon_lib:sublist_top_n_attrs(recon_lib:sliding_window(First, Last), Num). | |||
%% @doc Allows to be similar to `erlang:port_info/1', but allows | |||
%% more flexible port usage: usual ports, ports that were registered | |||
%% locally (an atom), ports represented as strings (`"#Port<0.2013>"'), | |||
%% or through an index lookup (`2013', for the same result as | |||
%% `"#Port<0.2013>"'). | |||
%% | |||
%% Moreover, the function will try to fetch implementation-specific | |||
%% details based on the port type (only inet ports have this feature | |||
%% so far). For example, TCP ports will include information about the | |||
%% remote peer, transfer statistics, and socket options being used. | |||
%% | |||
%% The information-specific and the basic port info are sorted and | |||
%% categorized in broader categories ({@link port_info_type()}). | |||
-spec port_info(port_term()) -> [{port_info_type(), | |||
[{port_info_key(), term()}]}, ...]. | |||
port_info(PortTerm) -> | |||
Port = recon_lib:term_to_port(PortTerm), | |||
[port_info(Port, Type) || Type <- [meta, signals, io, memory_used, | |||
specific]]. | |||
%% @doc Allows to be similar to `erlang:port_info/2', but allows | |||
%% more flexible port usage: usual ports, ports that were registered | |||
%% locally (an atom), ports represented as strings (`"#Port<0.2013>"'), | |||
%% or through an index lookup (`2013', for the same result as | |||
%% `"#Port<0.2013>"'). | |||
%% | |||
%% Moreover, the function allows to to fetch information by category | |||
%% as defined in {@link port_info_type()}, and although the type signature | |||
%% doesn't show it in the generated documentation, individual items | |||
%% accepted by `erlang:port_info/2' are accepted, and lists of them too. | |||
-spec port_info(port_term(), port_info_type()) -> {port_info_type(), | |||
[{port_info_key(), _}]} | |||
; (port_term(), [atom()]) -> [{atom(), term()}] | |||
; (port_term(), atom()) -> {atom(), term()}. | |||
port_info(PortTerm, meta) -> | |||
{meta, List} = port_info_type(PortTerm, meta, [id, name, os_pid]), | |||
case port_info(PortTerm, registered_name) of | |||
[] -> {meta, List}; | |||
Name -> {meta, [Name | List]} | |||
end; | |||
port_info(PortTerm, signals) -> | |||
port_info_type(PortTerm, signals, [connected, links, monitors]); | |||
port_info(PortTerm, io) -> | |||
port_info_type(PortTerm, io, [input, output]); | |||
port_info(PortTerm, memory_used) -> | |||
port_info_type(PortTerm, memory_used, [memory, queue_size]); | |||
port_info(PortTerm, specific) -> | |||
Port = recon_lib:term_to_port(PortTerm), | |||
Props = case erlang:port_info(Port, name) of | |||
{_, Type} when Type =:= "udp_inet"; | |||
Type =:= "tcp_inet"; | |||
Type =:= "sctp_inet" -> | |||
case inet:getstat(Port) of | |||
{ok, Stats} -> [{statistics, Stats}]; | |||
_ -> [] | |||
end ++ | |||
case inet:peername(Port) of | |||
{ok, Peer} -> [{peername, Peer}]; | |||
{error, _} -> [] | |||
end ++ | |||
case inet:sockname(Port) of | |||
{ok, Local} -> [{sockname, Local}]; | |||
{error, _} -> [] | |||
end ++ | |||
case inet:getopts(Port, [active, broadcast, buffer, delay_send, | |||
dontroute, exit_on_close, header, | |||
high_watermark, ipv6_v6only, keepalive, | |||
linger, low_watermark, mode, nodelay, | |||
packet, packet_size, priority, | |||
read_packets, recbuf, reuseaddr, | |||
send_timeout, sndbuf]) of | |||
{ok, Opts} -> [{options, Opts}]; | |||
{error, _} -> [] | |||
end; | |||
{_, "efile"} -> | |||
%% would be nice to support file-specific info, but things | |||
%% are too vague with the file_server and how it works in | |||
%% order to make this work efficiently | |||
[]; | |||
_ -> | |||
[] | |||
end, | |||
{type, Props}; | |||
port_info(PortTerm, Keys) when is_list(Keys) -> | |||
Port = recon_lib:term_to_port(PortTerm), | |||
[erlang:port_info(Port, Key) || Key <- Keys]; | |||
port_info(PortTerm, Key) when is_atom(Key) -> | |||
erlang:port_info(recon_lib:term_to_port(PortTerm), Key). | |||
%% @private makes access to `port_info_type()' calls simpler. | |||
%-spec port_info_type(pid_term(), port_info_type(), [port_info_key()]) -> | |||
% {port_info_type(), [{port_info_key(), term()}]}. | |||
port_info_type(PortTerm, Type, Keys) -> | |||
Port = recon_lib:term_to_port(PortTerm), | |||
{Type, [erlang:port_info(Port, Key) || Key <- Keys]}. | |||
%%% RPC Utils %%% | |||
%% @doc Shorthand for `rpc([node()|nodes()], Fun)'. | |||
-spec rpc(fun(() -> term())) -> {[Success :: _], [Fail :: _]}. | |||
rpc(Fun) -> | |||
rpc([node() | nodes()], Fun). | |||
%% @doc Shorthand for `rpc(Nodes, Fun, infinity)'. | |||
-spec rpc(node()|[node(), ...], fun(() -> term())) -> {[Success :: _], [Fail :: _]}. | |||
rpc(Nodes, Fun) -> | |||
rpc(Nodes, Fun, infinity). | |||
%% @doc Runs an arbitrary fun (of arity 0) over one or more nodes. | |||
-spec rpc(node()|[node(), ...], fun(() -> term()), timeout()) -> {[Success :: _], [Fail :: _]}. | |||
rpc(Nodes = [_ | _], Fun, Timeout) when is_function(Fun, 0) -> | |||
erpc:multicall(Nodes, erlang, apply, [Fun, []], Timeout); | |||
rpc(Node, Fun, Timeout) when is_atom(Node) -> | |||
rpc([Node], Fun, Timeout). | |||
%% @doc Shorthand for `named_rpc([node()|nodes()], Fun)'. | |||
-spec named_rpc(fun(() -> term())) -> {[Success :: _], [Fail :: _]}. | |||
named_rpc(Fun) -> | |||
named_rpc([node() | nodes()], Fun). | |||
%% @doc Shorthand for `named_rpc(Nodes, Fun, infinity)'. | |||
-spec named_rpc(node()|[node(), ...], fun(() -> term())) -> {[Success :: _], [Fail :: _]}. | |||
named_rpc(Nodes, Fun) -> | |||
named_rpc(Nodes, Fun, infinity). | |||
%% @doc Runs an arbitrary fun (of arity 0) over one or more nodes, and returns the | |||
%% name of the node that computed a given result along with it, in a tuple. | |||
-spec named_rpc(node()|[node(), ...], fun(() -> term()), timeout()) -> {[Success :: _], [Fail :: _]}. | |||
named_rpc(Nodes = [_ | _], Fun, Timeout) when is_function(Fun, 0) -> | |||
erpc:multicall(Nodes, erlang, apply, [fun() -> {node(), Fun()} end, []], Timeout); | |||
named_rpc(Node, Fun, Timeout) when is_atom(Node) -> | |||
named_rpc([Node], Fun, Timeout). | |||
@ -1,778 +0,0 @@ | |||
%%% @author Fred Hebert <mononcqc@ferd.ca> | |||
%%% [http://ferd.ca/] | |||
%%% @author Lukas Larsson <lukas@erlang.org> | |||
%%% @doc Functions to deal with | |||
%%% <a href="http://www.erlang.org/doc/man/erts_alloc.html">Erlang's memory | |||
%%% allocators</a>, or particularly, to try to present the allocator data | |||
%%% in a way that makes it simpler to discover possible problems. | |||
%%% | |||
%%% Tweaking Erlang memory allocators and their behaviour is a very tricky | |||
%%% ordeal whenever you have to give up the default settings. This module | |||
%%% (and its documentation) will try and provide helpful pointers to help | |||
%%% in this task. | |||
%%% | |||
%%% This module should mostly be helpful to figure out <em>if</em> there is | |||
%%% a problem, but will offer little help to figure out <em>what</em> is wrong. | |||
%%% | |||
%%% To figure this out, you need to dig deeper into the allocator data | |||
%%% (obtainable with {@link allocators/0}), and/or have some precise knowledge | |||
%%% about the type of load and work done by the VM to be able to assess what | |||
%%% each reaction to individual tweak should be. | |||
%%% | |||
%%% A lot of trial and error might be required to figure out if tweaks have | |||
%%% helped or not, ultimately. | |||
%%% | |||
%%% In order to help do offline debugging of memory allocator problems | |||
%%% recon_alloc also has a few functions that store snapshots of the | |||
%%% memory statistics. | |||
%%% These snapshots can be used to freeze the current allocation values so that | |||
%%% they do not change during analysis while using the regular functionality of | |||
%%% this module, so that the allocator values can be saved, or that | |||
%%% they can be shared, dumped, and reloaded for further analysis using files. | |||
%%% See {@link snapshot_load/1} for a simple use-case. | |||
%%% | |||
%%% Glossary: | |||
%%% <dl> | |||
%%% <dt>sys_alloc</dt> | |||
%%% <dd>System allocator, usually just malloc</dd> | |||
%%% | |||
%%% <dt>mseg_alloc</dt> | |||
%%% <dd>Used by other allocators, can do mmap. Caches allocations</dd> | |||
%%% | |||
%%% <dt>temp_alloc</dt> | |||
%%% <dd>Used for temporary allocations</dd> | |||
%%% | |||
%%% <dt>eheap_alloc</dt> | |||
%%% <dd>Heap data (i.e. process heaps) allocator</dd> | |||
%%% | |||
%%% <dt>binary_alloc</dt> | |||
%%% <dd>Global binary heap allocator</dd> | |||
%%% | |||
%%% <dt>ets_alloc</dt> | |||
%%% <dd>ETS data allocator</dd> | |||
%%% | |||
%%% <dt>driver_alloc</dt> | |||
%%% <dd>Driver data allocator</dd> | |||
%%% | |||
%%% <dt>sl_alloc</dt> | |||
%%% <dd>Short-lived memory blocks allocator</dd> | |||
%%% | |||
%%% <dt>ll_alloc</dt> | |||
%%% <dd>Long-lived data (i.e. Erlang code itself) allocator</dd> | |||
%%% | |||
%%% <dt>fix_alloc</dt> | |||
%%% <dd>Frequently used fixed-size data allocator</dd> | |||
%%% | |||
%%% <dt>std_alloc</dt> | |||
%%% <dd>Allocator for other memory blocks</dd> | |||
%%% | |||
%%% <dt>carrier</dt> | |||
%%% <dd>When a given area of memory is allocated by the OS to the | |||
%%% VM (through sys_alloc or mseg_alloc), it is put into a 'carrier'. There | |||
%%% are two kinds of carriers: multiblock and single block. The default | |||
%%% carriers data is sent to are multiblock carriers, owned by a specific | |||
%%% allocator (ets_alloc, binary_alloc, etc.). The specific allocator can | |||
%%% thus do allocation for specific Erlang requirements within bits of | |||
%%% memory that has been preallocated before. This allows more reuse, | |||
%%% and we can even measure the cache hit rates {@link cache_hit_rates/0}. | |||
%%% | |||
%%% There is however a threshold above which an item in memory won't fit | |||
%%% a multiblock carrier. When that happens, the specific allocator does | |||
%%% a special allocation to a single block carrier. This is done by the | |||
%%% allocator basically asking for space directly from sys_alloc or | |||
%%% mseg_alloc rather than a previously multiblock area already obtained | |||
%%% before. | |||
%%% | |||
%%% This leads to various allocation strategies where you decide to | |||
%%% choose: | |||
%%% <ol> | |||
%%% <li>which multiblock carrier you're going to (if at all)</li> | |||
%%% <li>which block in that carrier you're going to</li> | |||
%%% </ol> | |||
%%% | |||
%%% See <a href="http://www.erlang.org/doc/man/erts_alloc.html">the official | |||
%%% documentation on erts_alloc</a> for more details. | |||
%%% </dd> | |||
%%% | |||
%%% <dt>mbcs</dt> | |||
%%% <dd>Multiblock carriers.</dd> | |||
%%% | |||
%%% <dt>sbcs</dt> | |||
%%% <dd>Single block carriers.</dd> | |||
%%% | |||
%%% <dt>lmbcs</dt> | |||
%%% <dd>Largest multiblock carrier size</dd> | |||
%%% | |||
%%% <dt>smbcs</dt> | |||
%%% <dd>Smallest multiblock carrier size</dd> | |||
%%% | |||
%%% <dt>sbct</dt> | |||
%%% <dd>Single block carrier threshold</dd> | |||
%%% </dl> | |||
%%% | |||
%%% By default all sizes returned by this module are in bytes. You can change | |||
%%% this by calling {@link set_unit/1}. | |||
%%% | |||
-module(recon_alloc). | |||
-define(UTIL_ALLOCATORS, [temp_alloc, | |||
eheap_alloc, | |||
binary_alloc, | |||
ets_alloc, | |||
driver_alloc, | |||
sl_alloc, | |||
ll_alloc, | |||
fix_alloc, | |||
std_alloc | |||
]). | |||
-type allocator() :: temp_alloc | eheap_alloc | binary_alloc | ets_alloc | |||
| driver_alloc | sl_alloc | ll_alloc | fix_alloc | |||
| std_alloc. | |||
-type instance() :: non_neg_integer(). | |||
-type allocdata(T) :: {{allocator(), instance()}, T}. | |||
-type allocdata_types(T) :: {{allocator(), [instance()]}, T}. | |||
-export_type([allocator/0, instance/0, allocdata/1]). | |||
-define(CURRENT_POS, 2). % pos in sizes tuples for current value | |||
-define(MAX_POS, 4). % pos in sizes tuples for max value | |||
-export([memory/1, memory/2, fragmentation/1, cache_hit_rates/0, | |||
average_block_sizes/1, sbcs_to_mbcs/1, allocators/0, | |||
allocators/1]). | |||
%% Snapshot handling | |||
-type memory() :: [{atom(), atom()}]. | |||
-type snapshot() :: {memory(), [allocdata(term())]}. | |||
-export_type([memory/0, snapshot/0]). | |||
-export([snapshot/0, snapshot_clear/0, | |||
snapshot_print/0, snapshot_get/0, | |||
snapshot_save/1, snapshot_load/1]). | |||
%% Unit handling | |||
-export([set_unit/1]). | |||
%%%%%%%%%%%%%% | |||
%%% Public %%% | |||
%%%%%%%%%%%%%% | |||
%% @doc Equivalent to `memory(Key, current)'. | |||
-spec memory(used | allocated | unused) -> pos_integer() | |||
; (usage) -> number() | |||
; (allocated_types | allocated_instances) -> | |||
[{allocator(), pos_integer()}]. | |||
memory(Key) -> memory(Key, current). | |||
%% @doc reports one of multiple possible memory values for the entire | |||
%% node depending on what is to be reported: | |||
%% | |||
%% <ul> | |||
%% <li>`used' reports the memory that is actively used for allocated | |||
%% Erlang data;</li> | |||
%% <li>`allocated' reports the memory that is reserved by the VM. It | |||
%% includes the memory used, but also the memory yet-to-be-used but still | |||
%% given by the OS. This is the amount you want if you're dealing with | |||
%% ulimit and OS-reported values. </li> | |||
%% <li>`allocated_types' report the memory that is reserved by the | |||
%% VM grouped into the different util allocators.</li> | |||
%% <li>`allocated_instances' report the memory that is reserved | |||
%% by the VM grouped into the different schedulers. Note that | |||
%% instance id 0 is the global allocator used to allocate data from | |||
%% non-managed threads, i.e. async and driver threads.</li> | |||
%% <li>`unused' reports the amount of memory reserved by the VM that | |||
%% is not being allocated. | |||
%% Equivalent to `allocated - used'.</li> | |||
%% <li>`usage' returns a percentage (0.0 .. 1.0) of `used/allocated' | |||
%% memory ratios.</li> | |||
%% </ul> | |||
%% | |||
%% The memory reported by `allocated' should roughly | |||
%% match what the OS reports. If this amount is different by a large margin, | |||
%% it may be the sign that someone is allocating memory in C directly, outside | |||
%% of Erlang's own allocator -- a big warning sign. There are currently | |||
%% three sources of memory alloction that are not counted towards this value: | |||
%% The cached segments in the mseg allocator, any memory allocated as a | |||
%% super carrier, and small pieces of memory allocated during startup | |||
%% before the memory allocators are initialized. | |||
%% | |||
%% Also note that low memory usages can be the sign of fragmentation in | |||
%% memory, in which case exploring which specific allocator is at fault | |||
%% is recommended (see {@link fragmentation/1}) | |||
-spec memory(used | allocated | unused, current | max) -> pos_integer() | |||
; (usage, current | max) -> number() | |||
; (allocated_types|allocated_instances, current | max) -> | |||
[{allocator(), pos_integer()}]. | |||
memory(used, Keyword) -> | |||
lists:sum(lists:map(fun({_, Prop}) -> | |||
container_size(Prop, Keyword, blocks_size) | |||
end, util_alloc())); | |||
memory(allocated, Keyword) -> | |||
lists:sum(lists:map(fun({_, Prop}) -> | |||
container_size(Prop, Keyword, carriers_size) | |||
end, util_alloc())); | |||
memory(allocated_types, Keyword) -> | |||
lists:foldl(fun({{Alloc, _N}, Props}, Acc) -> | |||
CZ = container_size(Props, Keyword, carriers_size), | |||
orddict:update_counter(Alloc, CZ, Acc) | |||
end, orddict:new(), util_alloc()); | |||
memory(allocated_instances, Keyword) -> | |||
lists:foldl(fun({{_Alloc, N}, Props}, Acc) -> | |||
CZ = container_size(Props, Keyword, carriers_size), | |||
orddict:update_counter(N, CZ, Acc) | |||
end, orddict:new(), util_alloc()); | |||
memory(unused, Keyword) -> | |||
memory(allocated, Keyword) - memory(used, Keyword); | |||
memory(usage, Keyword) -> | |||
memory(used, Keyword) / memory(allocated, Keyword). | |||
%% @doc Compares the block sizes to the carrier sizes, both for | |||
%% single block (`sbcs') and multiblock (`mbcs') carriers. | |||
%% | |||
%% The returned results are sorted by a weight system that is | |||
%% somewhat likely to return the most fragmented allocators first, | |||
%% based on their percentage of use and the total size of the carriers, | |||
%% for both `sbcs' and `mbcs'. | |||
%% | |||
%% The values can both be returned for `current' allocator values, and | |||
%% for `max' allocator values. The current values hold the present allocation | |||
%% numbers, and max values, the values at the peak. Comparing both together | |||
%% can give an idea of whether the node is currently being at its memory peak | |||
%% when possibly leaky, or if it isn't. This information can in turn | |||
%% influence the tuning of allocators to better fit sizes of blocks and/or | |||
%% carriers. | |||
-spec fragmentation(current | max) -> [allocdata([{atom(), term()}])]. | |||
fragmentation(Keyword) -> | |||
WeighedData = [begin | |||
BlockSbcs = container_value(Props, Keyword, sbcs, blocks_size), | |||
CarSbcs = container_value(Props, Keyword, sbcs, carriers_size), | |||
BlockMbcs = container_value(Props, Keyword, mbcs, blocks_size), | |||
CarMbcs = container_value(Props, Keyword, mbcs, carriers_size), | |||
{Weight, Vals} = weighed_values({BlockSbcs, CarSbcs}, | |||
{BlockMbcs, CarMbcs}), | |||
{Weight, {Allocator, N}, Vals} | |||
end || {{Allocator, N}, Props} <- util_alloc()], | |||
[{Key, Val} || {_W, Key, Val} <- lists:reverse(lists:sort(WeighedData))]. | |||
%% @doc looks at the `mseg_alloc' allocator (allocator used by all the | |||
%% allocators in {@link allocator()}) and returns information relative to | |||
%% the cache hit rates. Unless memory has expected spiky behaviour, it should | |||
%% usually be above 0.80 (80%). | |||
%% | |||
%% Cache can be tweaked using three VM flags: `+MMmcs', `+MMrmcbf', and | |||
%% `+MMamcbf'. | |||
%% | |||
%% `+MMmcs' stands for the maximum amount of cached memory segments. Its | |||
%% default value is '10' and can be anything from 0 to 30. Increasing | |||
%% it first and verifying if cache hits get better should be the first | |||
%% step taken. | |||
%% | |||
%% The two other options specify what are the maximal values of a segment | |||
%% to cache, in relative (in percent) and absolute terms (in kilobytes), | |||
%% respectively. Increasing these may allow more segments to be cached, but | |||
%% should also add overheads to memory allocation. An Erlang node that has | |||
%% limited memory and increases these values may make things worse on | |||
%% that point. | |||
%% | |||
%% The values returned by this function are sorted by a weight combining | |||
%% the lower cache hit joined to the largest memory values allocated. | |||
-spec cache_hit_rates() -> [{{instance, instance()}, [{Key, Val}]}] when | |||
Key :: hit_rate | hits | calls, | |||
Val :: term(). | |||
cache_hit_rates() -> | |||
WeighedData = [begin | |||
Mem = proplists:get_value(memkind, Props), | |||
{_, Hits} = lists:keyfind(cache_hits, 1, proplists:get_value(status, Mem)), | |||
{_, Giga, Ones} = lists:keyfind(mseg_alloc, 1, proplists:get_value(calls, Mem)), | |||
Calls = 1000000000 * Giga + Ones, | |||
HitRate = usage(Hits, Calls), | |||
Weight = (1.00 - HitRate) * Calls, | |||
{Weight, {instance, N}, [{hit_rate, HitRate}, {hits, Hits}, {calls, Calls}]} | |||
end || {{_, N}, Props} <- alloc([mseg_alloc])], | |||
[{Key, Val} || {_W, Key, Val} <- lists:reverse(lists:sort(WeighedData))]. | |||
%% @doc Checks all allocators in {@link allocator()} and returns the average | |||
%% block sizes being used for `mbcs' and `sbcs'. This value is interesting | |||
%% to use because it will tell us how large most blocks are. | |||
%% This can be related to the VM's largest multiblock carrier size | |||
%% (`lmbcs') and smallest multiblock carrier size (`smbcs') to specify | |||
%% allocation strategies regarding the carrier sizes to be used. | |||
%% | |||
%% This function isn't exceptionally useful unless you know you have some | |||
%% specific problem, say with sbcs/mbcs ratios (see {@link sbcs_to_mbcs/0}) | |||
%% or fragmentation for a specific allocator, and want to figure out what | |||
%% values to pick to increase or decrease sizes compared to the currently | |||
%% configured value. | |||
%% | |||
%% Do note that values for `lmbcs' and `smbcs' are going to be rounded up | |||
%% to the next power of two when configuring them. | |||
-spec average_block_sizes(current | max) -> [{allocator(), [{Key, Val}]}] when | |||
Key :: mbcs | sbcs, | |||
Val :: number(). | |||
average_block_sizes(Keyword) -> | |||
Dict = lists:foldl(fun({{Instance, _}, Props}, Dict0) -> | |||
CarSbcs = container_value(Props, Keyword, sbcs, blocks), | |||
SizeSbcs = container_value(Props, Keyword, sbcs, blocks_size), | |||
CarMbcs = container_value(Props, Keyword, mbcs, blocks), | |||
SizeMbcs = container_value(Props, Keyword, mbcs, blocks_size), | |||
Dict1 = dict:update_counter({Instance, sbcs, count}, CarSbcs, Dict0), | |||
Dict2 = dict:update_counter({Instance, sbcs, size}, SizeSbcs, Dict1), | |||
Dict3 = dict:update_counter({Instance, mbcs, count}, CarMbcs, Dict2), | |||
Dict4 = dict:update_counter({Instance, mbcs, size}, SizeMbcs, Dict3), | |||
Dict4 | |||
end, | |||
dict:new(), | |||
util_alloc()), | |||
average_group(average_calc(lists:sort(dict:to_list(Dict)))). | |||
%% @doc compares the amount of single block carriers (`sbcs') vs the | |||
%% number of multiblock carriers (`mbcs') for each individual allocator in | |||
%% {@link allocator()}. | |||
%% | |||
%% When a specific piece of data is allocated, it is compared to a threshold, | |||
%% called the 'single block carrier threshold' (`sbct'). When the data is | |||
%% larger than the `sbct', it gets sent to a single block carrier. When the | |||
%% data is smaller than the `sbct', it gets placed into a multiblock carrier. | |||
%% | |||
%% mbcs are to be preferred to sbcs because they basically represent pre- | |||
%% allocated memory, whereas sbcs will map to one call to sys_alloc | |||
%% or mseg_alloc, which is more expensive than redistributing | |||
%% data that was obtained for multiblock carriers. Moreover, the VM is able to | |||
%% do specific work with mbcs that should help reduce fragmentation in ways | |||
%% sys_alloc or mmap usually won't. | |||
%% | |||
%% Ideally, most of the data should fit inside multiblock carriers. If | |||
%% most of the data ends up in `sbcs', you may need to adjust the multiblock | |||
%% carrier sizes, specifically the maximal value (`lmbcs') and the threshold | |||
%% (`sbct'). On 32 bit VMs, `sbct' is limited to 8MBs, but 64 bit VMs can go | |||
%% to pretty much any practical size. | |||
%% | |||
%% Given the value returned is a ratio of sbcs/mbcs, the higher the value, | |||
%% the worst the condition. The list is sorted accordingly. | |||
-spec sbcs_to_mbcs(max | current) -> [allocdata(term())]. | |||
sbcs_to_mbcs(Keyword) -> | |||
WeightedList = [begin | |||
Sbcs = container_value(Props, Keyword, sbcs, blocks), | |||
Mbcs = container_value(Props, Keyword, mbcs, blocks), | |||
Ratio = case {Sbcs, Mbcs} of | |||
{0, 0} -> 0; | |||
{_, 0} -> infinity; % that is bad! | |||
{_, _} -> Sbcs / Mbcs | |||
end, | |||
{Ratio, {Allocator, N}} | |||
end || {{Allocator, N}, Props} <- util_alloc()], | |||
[{Alloc, Ratio} || {Ratio, Alloc} <- lists:reverse(lists:sort(WeightedList))]. | |||
%% @doc returns a dump of all allocator settings and values | |||
-spec allocators() -> [allocdata(term())]. | |||
allocators() -> | |||
UtilAllocators = erlang:system_info(alloc_util_allocators), | |||
Allocators = [sys_alloc, mseg_alloc | UtilAllocators], | |||
[{{A, N}, format_alloc(A, Props)} || | |||
A <- Allocators, | |||
Allocs <- [erlang:system_info({allocator, A})], | |||
Allocs =/= false, | |||
{_, N, Props} <- Allocs]. | |||
format_alloc(Alloc, Props) -> | |||
%% {versions,_,_} is implicitly deleted in order to allow the use of the | |||
%% orddict api, and never really having come across a case where it was | |||
%% useful to know. | |||
[{K, format_blocks(Alloc, K, V)} || {K, V} <- lists:sort(Props)]. | |||
format_blocks(_, _, []) -> | |||
[]; | |||
format_blocks(Alloc, Key, [{blocks, L} | List]) when is_list(L) -> | |||
%% OTP-22 introduces carrier migrations across types, and OTP-23 changes the | |||
%% format of data reported to be a bit richer; however it's not compatible | |||
%% with most calculations made for this library. | |||
%% So what we do here for `blocks' is merge all the info into the one the | |||
%% library expects (`blocks' and `blocks_size'), then keep the original | |||
%% one in case it is further needed. | |||
%% There were further changes to `mbcs_pool' changing `foreign_blocks', | |||
%% `blocks' and `blocks_size' into just `blocks' with a proplist, so we're breaking | |||
%% up to use that one too. | |||
%% In the end we go from `{blocks, [{Alloc, [...]}]}' to: | |||
%% - `{blocks, ...}' (4-tuple in mbcs and sbcs, 2-tuple in mbcs_pool) | |||
%% - `{blocks_size, ...}' (4-tuple in mbcs and sbcs, 2-tuple in mbcs_pool) | |||
%% - `{foreign_blocks, [...]}' (just append lists =/= `Alloc') | |||
%% - `{raw_blocks, [...]}' (original value) | |||
Foreign = lists:filter(fun({A, _Props}) -> A =/= Alloc end, L), | |||
Type = case Key of | |||
mbcs_pool -> int; | |||
_ -> quadruple | |||
end, | |||
MergeF = fun(K) -> | |||
fun({_A, Props}, Acc) -> | |||
case lists:keyfind(K, 1, Props) of | |||
{K, Cur, Last, Max} -> {Cur, Last, Max}; | |||
{K, V} -> Acc + V | |||
end | |||
end | |||
end, | |||
%% Since tuple sizes change, hack around it using tuple_to_list conversion | |||
%% and set the accumulator to a list so it defaults to not putting anything | |||
{Blocks, BlocksSize} = case Type of | |||
int -> | |||
{{blocks, lists:foldl(MergeF(count), 0, L)}, | |||
{blocks_size, lists:foldl(MergeF(size), 0, L)}}; | |||
quadruple -> | |||
{list_to_tuple([blocks | tuple_to_list(lists:foldl(MergeF(count), {0, 0, 0}, L))]), | |||
list_to_tuple([blocks_size | tuple_to_list(lists:foldl(MergeF(size), {0, 0, 0}, L))])} | |||
end, | |||
[Blocks, BlocksSize, {foreign_blocks, Foreign}, {raw_blocks, L} | |||
| format_blocks(Alloc, Key, List)]; | |||
format_blocks(Alloc, Key, [H | T]) -> | |||
[H | format_blocks(Alloc, Key, T)]. | |||
%% @doc returns a dump of all allocator settings and values modified | |||
%% depending on the argument. | |||
%% <ul> | |||
%% <li>`types' report the settings and accumulated values for each | |||
%% allocator type. This is useful when looking for anomalies | |||
%% in the system as a whole and not specific instances.</li> | |||
%% </ul> | |||
-spec allocators(types) -> [allocdata_types(term())]. | |||
allocators(types) -> | |||
allocators_types(alloc(), []). | |||
allocators_types([{{Type, No}, Vs} | T], As) -> | |||
case lists:keytake(Type, 1, As) of | |||
false -> | |||
allocators_types(T, [{Type, [No], sort_values(Type, Vs)} | As]); | |||
{value, {Type, Nos, OVs}, NAs} -> | |||
MergedValues = merge_values(sort_values(Type, Vs), OVs), | |||
allocators_types(T, [{Type, [No | Nos], MergedValues} | NAs]) | |||
end; | |||
allocators_types([], As) -> | |||
[{{Type, Nos}, Vs} || {Type, Nos, Vs} <- As]. | |||
merge_values([{Key, Vs} | T1], [{Key, OVs} | T2]) when Key =:= memkind -> | |||
[{Key, merge_values(Vs, OVs)} | merge_values(T1, T2)]; | |||
merge_values([{Key, Vs} | T1], [{Key, OVs} | T2]) when Key =:= calls; | |||
Key =:= fix_types; | |||
Key =:= sbmbcs; | |||
Key =:= mbcs; | |||
Key =:= mbcs_pool; | |||
Key =:= sbcs; | |||
Key =:= status -> | |||
[{Key, lists:map( | |||
fun({{K, MV1, V1}, {K, MV2, V2}}) -> | |||
%% Merge the MegaVs + Vs into one | |||
V = MV1 * 1000000 + V1 + MV2 * 1000000 + V2, | |||
{K, V div 1000000, V rem 1000000}; | |||
({{K, V1}, {K, V2}}) when K =:= segments_watermark -> | |||
%% We take the maximum watermark as that is | |||
%% a value that we can use somewhat. Ideally | |||
%% maybe the average should be used, but the | |||
%% value is very rarely important so leave it | |||
%% like this for now. | |||
{K, lists:max([V1, V2])}; | |||
({{K, V1}, {K, V2}}) when K =:= foreign_blocks; K =:= raw_blocks -> | |||
%% foreign blocks are just merged as a bigger list. | |||
{K, V1 ++ V2}; | |||
({{K, V1}, {K, V2}}) -> | |||
{K, V1 + V2}; | |||
({{K, C1, L1, M1}, {K, C2, L2, M2}}) -> | |||
%% Merge the Curr, Last, Max into one | |||
{K, C1 + C2, L1 + L2, M1 + M2} | |||
end, lists:zip(Vs, OVs))} | merge_values(T1, T2)]; | |||
merge_values([{Type, _Vs} = E | T1], T2) when Type =:= mbcs_pool -> | |||
%% For values never showing up in instance 0 but in all other | |||
[E | merge_values(T1, T2)]; | |||
merge_values(T1, [{Type, _Vs} = E | T2]) when Type =:= fix_types -> | |||
%% For values only showing up in instance 0 | |||
[E | merge_values(T1, T2)]; | |||
merge_values([E | T1], [E | T2]) -> | |||
%% For values that are constant | |||
[E | merge_values(T1, T2)]; | |||
merge_values([{options, _Vs1} | T1], [{options, _Vs2} = E | T2]) -> | |||
%% Options change a but in between instance 0 and the other, | |||
%% We show the others as they are the most interesting. | |||
[E | merge_values(T1, T2)]; | |||
merge_values([], []) -> | |||
[]. | |||
sort_values(mseg_alloc, Vs) -> | |||
{value, {memkind, MemKindVs}, OVs} = lists:keytake(memkind, 1, Vs), | |||
lists:sort([{memkind, lists:sort(MemKindVs)} | OVs]); | |||
sort_values(_Type, Vs) -> | |||
lists:sort(Vs). | |||
%%%%%%%%%%%%%%%%%%%%%%%%% | |||
%%% Snapshot handling %%% | |||
%%%%%%%%%%%%%%%%%%%%%%%%% | |||
%% @doc Take a new snapshot of the current memory allocator statistics. | |||
%% The snapshot is stored in the process dictionary of the calling process, | |||
%% with all the limitations that it implies (i.e. no garbage-collection). | |||
%% To unsert the snapshot, see {@link snapshot_clear/1}. | |||
-spec snapshot() -> snapshot() | undefined. | |||
snapshot() -> | |||
put(recon_alloc_snapshot, snapshot_int()). | |||
%% @doc clear the current snapshot in the process dictionary, if present, | |||
%% and return the value it had before being unset. | |||
%% @end | |||
%% Maybe we should use erlang:delete(Key) here instead? | |||
-spec snapshot_clear() -> snapshot() | undefined. | |||
snapshot_clear() -> | |||
put(recon_alloc_snapshot, undefined). | |||
%% @doc print a dump of the current snapshot stored by {@link snapshot/0} | |||
%% Prints `undefined' if no snapshot has been taken. | |||
-spec snapshot_print() -> ok. | |||
snapshot_print() -> | |||
io:format("~p.~n", [snapshot_get()]). | |||
%% @doc returns the current snapshot stored by {@link snapshot/0}. | |||
%% Returns `undefined' if no snapshot has been taken. | |||
-spec snapshot_get() -> snapshot() | undefined. | |||
snapshot_get() -> | |||
get(recon_alloc_snapshot). | |||
%% @doc save the current snapshot taken by {@link snapshot/0} to a file. | |||
%% If there is no current snapshot, a snaphot of the current allocator | |||
%% statistics will be written to the file. | |||
-spec snapshot_save(Filename) -> ok when | |||
Filename :: file:name(). | |||
snapshot_save(Filename) -> | |||
Snapshot = case snapshot_get() of | |||
undefined -> | |||
snapshot_int(); | |||
Snap -> | |||
Snap | |||
end, | |||
case file:write_file(Filename, io_lib:format("~p.~n", [Snapshot])) of | |||
ok -> ok; | |||
{error, Reason} -> | |||
erlang:error(Reason, [Filename]) | |||
end. | |||
%% @doc load a snapshot from a given file. The format of the data in the | |||
%% file can be either the same as output by {@link snapshot_save()}, | |||
%% or the output obtained by calling | |||
%% `{erlang:memory(),[{A,erlang:system_info({allocator,A})} || A <- erlang:system_info(alloc_util_allocators)++[sys_alloc,mseg_alloc]]}.' | |||
%% and storing it in a file. | |||
%% If the latter option is taken, please remember to add a full stop at the end | |||
%% of the resulting Erlang term, as this function uses `file:consult/1' to load | |||
%% the file. | |||
%% | |||
%% Example usage: | |||
%% | |||
%%```On target machine: | |||
%% 1> recon_alloc:snapshot(). | |||
%% undefined | |||
%% 2> recon_alloc:memory(used). | |||
%% 18411064 | |||
%% 3> recon_alloc:snapshot_save("recon_snapshot.terms"). | |||
%% ok | |||
%% | |||
%% On other machine: | |||
%% 1> recon_alloc:snapshot_load("recon_snapshot.terms"). | |||
%% undefined | |||
%% 2> recon_alloc:memory(used). | |||
%% 18411064''' | |||
%% | |||
-spec snapshot_load(Filename) -> snapshot() | undefined when | |||
Filename :: file:name(). | |||
snapshot_load(Filename) -> | |||
{ok, [Terms]} = file:consult(Filename), | |||
Snapshot = | |||
case Terms of | |||
%% We handle someone using | |||
%% {erlang:memory(), | |||
%% [{A,erlang:system_info({allocator,A})} || | |||
%% A <- erlang:system_info(alloc_util_allocators)++[sys_alloc,mseg_alloc]]} | |||
%% to dump data. | |||
{M, [{Alloc, _D} | _] = Allocs} when is_atom(Alloc) -> | |||
{M, [{{A, N}, lists:sort(proplists:delete(versions, Props))} || | |||
{A, Instances = [_ | _]} <- Allocs, | |||
{_, N, Props} <- Instances]}; | |||
%% We assume someone used recon_alloc:snapshot() to store this one | |||
{M, Allocs} -> | |||
{M, [{AN, lists:sort(proplists:delete(versions, Props))} || | |||
{AN, Props} <- Allocs]} | |||
end, | |||
put(recon_alloc_snapshot, Snapshot). | |||
%%%%%%%%%%%%%%%%%%%%%%%%% | |||
%%% Handling of units %%% | |||
%%%%%%%%%%%%%%%%%%%%%%%%% | |||
%% @doc set the current unit to be used by recon_alloc. This effects all | |||
%% functions that return bytes. | |||
%% | |||
%% Eg. | |||
%% ```1> recon_alloc:memory(used,current). | |||
%% 17548752 | |||
%% 2> recon_alloc:set_unit(kilobyte). | |||
%% undefined | |||
%% 3> recon_alloc:memory(used,current). | |||
%% 17576.90625''' | |||
%% | |||
-spec set_unit(byte | kilobyte | megabyte | gigabyte) -> ok. | |||
set_unit(byte) -> | |||
put(recon_alloc_unit, undefined); | |||
set_unit(kilobyte) -> | |||
put(recon_alloc_unit, 1024); | |||
set_unit(megabyte) -> | |||
put(recon_alloc_unit, 1024 * 1024); | |||
set_unit(gigabyte) -> | |||
put(recon_alloc_unit, 1024 * 1024 * 1024). | |||
conv({Mem, Allocs} = D) -> | |||
case get(recon_alloc_unit) of | |||
undefined -> | |||
D; | |||
Factor -> | |||
{conv_mem(Mem, Factor), conv_alloc(Allocs, Factor)} | |||
end. | |||
conv_mem(Mem, Factor) -> | |||
[{T, M / Factor} || {T, M} <- Mem]. | |||
conv_alloc([{{sys_alloc, _I}, _Props} = Alloc | R], Factor) -> | |||
[Alloc | conv_alloc(R, Factor)]; | |||
conv_alloc([{{mseg_alloc, _I} = AI, Props} | R], Factor) -> | |||
MemKind = orddict:fetch(memkind, Props), | |||
Status = orddict:fetch(status, MemKind), | |||
{segments_size, Curr, Last, Max} = lists:keyfind(segments_size, 1, Status), | |||
NewSegSize = {segments_size, Curr / Factor, Last / Factor, Max / Factor}, | |||
NewStatus = lists:keyreplace(segments_size, 1, Status, NewSegSize), | |||
NewProps = orddict:store(memkind, orddict:store(status, NewStatus, MemKind), | |||
Props), | |||
[{AI, NewProps} | conv_alloc(R, Factor)]; | |||
conv_alloc([{AI, Props} | R], Factor) -> | |||
FactorFun = fun({T, Curr}) when | |||
T =:= blocks_size; T =:= carriers_size -> | |||
{T, Curr / Factor}; | |||
({T, Curr, Last, Max}) when | |||
T =:= blocks_size; T =:= carriers_size; | |||
T =:= mseg_alloc_carriers_size; | |||
T =:= sys_alloc_carriers_size -> | |||
{T, Curr / Factor, Last / Factor, Max / Factor}; | |||
(T) -> | |||
T | |||
end, | |||
NewMbcsProp = [FactorFun(Prop) || Prop <- orddict:fetch(mbcs, Props)], | |||
NewSbcsProp = [FactorFun(Prop) || Prop <- orddict:fetch(sbcs, Props)], | |||
NewProps = orddict:store(sbcs, NewSbcsProp, | |||
orddict:store(mbcs, NewMbcsProp, Props)), | |||
case orddict:find(mbcs_pool, Props) of | |||
error -> | |||
[{AI, NewProps} | conv_alloc(R, Factor)]; | |||
{ok, MbcsPoolProps} -> | |||
NewMbcsPoolProp = [FactorFun(Prop) || Prop <- MbcsPoolProps], | |||
NewPoolProps = orddict:store(mbcs_pool, NewMbcsPoolProp, NewProps), | |||
[{AI, NewPoolProps} | conv_alloc(R, Factor)] | |||
end; | |||
conv_alloc([], _Factor) -> | |||
[]. | |||
%%%%%%%%%%%%%%% | |||
%%% Private %%% | |||
%%%%%%%%%%%%%%% | |||
%% Sort on small usage vs large size. | |||
%% The weight cares about both the sbcs and mbcs values, and also | |||
%% returns a proplist of possibly interesting values. | |||
weighed_values({SbcsBlockSize, SbcsCarrierSize}, | |||
{MbcsBlockSize, MbcsCarrierSize}) -> | |||
SbcsUsage = usage(SbcsBlockSize, SbcsCarrierSize), | |||
MbcsUsage = usage(MbcsBlockSize, MbcsCarrierSize), | |||
SbcsWeight = (1.00 - SbcsUsage) * SbcsCarrierSize, | |||
MbcsWeight = (1.00 - MbcsUsage) * MbcsCarrierSize, | |||
Weight = SbcsWeight + MbcsWeight, | |||
{Weight, [{sbcs_usage, SbcsUsage}, | |||
{mbcs_usage, MbcsUsage}, | |||
{sbcs_block_size, SbcsBlockSize}, | |||
{sbcs_carriers_size, SbcsCarrierSize}, | |||
{mbcs_block_size, MbcsBlockSize}, | |||
{mbcs_carriers_size, MbcsCarrierSize}]}. | |||
%% Returns the `BlockSize/CarrierSize' as a 0.0 -> 1.0 percentage, | |||
%% but also takes 0/0 to be 100% to make working with sorting and | |||
%% weights simpler. | |||
usage(0, 0) -> 1.00; | |||
usage(0.0, 0.0) -> 1.00; | |||
%usage(N,0) -> ???; | |||
usage(Block, Carrier) -> Block / Carrier. | |||
%% Calculation for the average of blocks being used. | |||
average_calc([]) -> | |||
[]; | |||
average_calc([{{Instance, Type, count}, Ct}, {{Instance, Type, size}, Size} | Rest]) -> | |||
case {Size, Ct} of | |||
{_, 0} when Size == 0 -> [{Instance, Type, 0} | average_calc(Rest)]; | |||
_ -> [{Instance, Type, Size / Ct} | average_calc(Rest)] | |||
end. | |||
%% Regrouping/merging values together in proplists | |||
average_group([]) -> []; | |||
average_group([{Instance, Type1, N}, {Instance, Type2, M} | Rest]) -> | |||
[{Instance, [{Type1, N}, {Type2, M}]} | average_group(Rest)]. | |||
%% Get the total carrier size | |||
container_size(Props, Keyword, Container) -> | |||
Sbcs = container_value(Props, Keyword, sbcs, Container), | |||
Mbcs = container_value(Props, Keyword, mbcs, Container), | |||
Sbcs + Mbcs. | |||
container_value(Props, Keyword, Type, Container) | |||
when is_atom(Keyword) -> | |||
container_value(Props, key2pos(Keyword), Type, Container); | |||
container_value(Props, Pos, mbcs = Type, Container) | |||
when Pos == ?CURRENT_POS, | |||
((Container =:= blocks) or (Container =:= blocks_size) | |||
or (Container =:= carriers) or (Container =:= carriers_size)) -> | |||
%% We include the mbcs_pool into the value for mbcs. | |||
%% The mbcs_pool contains carriers that have been abandoned | |||
%% by the specific allocator instance and can therefore be | |||
%% grabbed by another instance of the same type. | |||
%% The pool was added in R16B02 and enabled by default in 17.0. | |||
%% See erts/emulator/internal_docs/CarrierMigration.md in | |||
%% Erlang/OTP repo for more details. | |||
Pool = case proplists:get_value(mbcs_pool, Props) of | |||
PoolProps when PoolProps =/= undefined -> | |||
element(Pos, lists:keyfind(Container, 1, PoolProps)); | |||
_ -> 0 | |||
end, | |||
TypeProps = proplists:get_value(Type, Props), | |||
Pool + element(Pos, lists:keyfind(Container, 1, TypeProps)); | |||
container_value(Props, Pos, Type, Container) | |||
when Type =:= sbcs; Type =:= mbcs -> | |||
TypeProps = proplists:get_value(Type, Props), | |||
element(Pos, lists:keyfind(Container, 1, TypeProps)). | |||
%% Create a new snapshot | |||
snapshot_int() -> | |||
{erlang:memory(), allocators()}. | |||
%% If no snapshot has been taken/loaded then we use current values | |||
snapshot_get_int() -> | |||
case snapshot_get() of | |||
undefined -> | |||
conv(snapshot_int()); | |||
Snapshot -> | |||
conv(Snapshot) | |||
end. | |||
%% Get the alloc part of a snapshot | |||
alloc() -> | |||
{_Mem, Allocs} = snapshot_get_int(), | |||
Allocs. | |||
alloc(Type) -> | |||
[{{T, Instance}, Props} || {{T, Instance}, Props} <- alloc(), | |||
lists:member(T, Type)]. | |||
%% Get only alloc_util allocs | |||
util_alloc() -> | |||
alloc(?UTIL_ALLOCATORS). | |||
key2pos(current) -> | |||
?CURRENT_POS; | |||
key2pos(max) -> | |||
?MAX_POS. |
@ -1,285 +0,0 @@ | |||
%%% @author Fred Hebert <mononcqc@ferd.ca> | |||
%%% [http://ferd.ca/] | |||
%%% @doc Regroups useful functionality used by recon when dealing with data | |||
%%% from the node. The functions in this module allow quick runtime access | |||
%%% to fancier behaviour than what would be done using recon module itself. | |||
%%% @end | |||
-module(recon_lib). | |||
-export([sliding_window/2, sample/2, count/1, | |||
port_list/1, port_list/2, | |||
proc_attrs/1, proc_attrs/2, | |||
inet_attrs/1, inet_attrs/2, | |||
triple_to_pid/3, term_to_pid/1, | |||
term_to_port/1, | |||
time_map/5, time_fold/6, | |||
scheduler_usage_diff/2, | |||
sublist_top_n_attrs/2]). | |||
%% private exports | |||
-export([binary_memory/1]). | |||
-type diff() :: [recon:proc_attrs() | recon:inet_attrs()]. | |||
%% @doc Compare two samples and return a list based on some key. The type mentioned | |||
%% for the structure is `diff()' (`{Key,Val,Other}'), which is compatible with | |||
%% the {@link recon:proc_attrs()} type. | |||
-spec sliding_window(First :: diff(), Last :: diff()) -> diff(). | |||
sliding_window(First, Last) -> | |||
Dict = lists:foldl( | |||
fun({Key, {Current, Other}}, Acc) -> | |||
dict:update(Key, | |||
fun({Old, _Other}) -> {Current - Old, Other} end, | |||
{Current, Other}, | |||
Acc) | |||
end, | |||
dict:from_list([{K, {V, O}} || {K, V, O} <- First]), | |||
[{K, {V, O}} || {K, V, O} <- Last] | |||
), | |||
[{K, V, O} || {K, {V, O}} <- dict:to_list(Dict)]. | |||
%% @doc Runs a fun once, waits `Ms', runs the fun again, | |||
%% and returns both results. | |||
-spec sample(Ms :: non_neg_integer(), fun(() -> term())) -> | |||
{First :: term(), Second :: term()}. | |||
sample(Delay, Fun) -> | |||
First = Fun(), | |||
timer:sleep(Delay), | |||
Second = Fun(), | |||
{First, Second}. | |||
%% @doc Takes a list of terms, and counts how often each of | |||
%% them appears in the list. The list returned is in no | |||
%% particular order. | |||
-spec count([term()]) -> [{term(), Count :: integer()}]. | |||
count(Terms) -> | |||
Dict = lists:foldl( | |||
fun(Val, Acc) -> dict:update_counter(Val, 1, Acc) end, | |||
dict:new(), | |||
Terms | |||
), | |||
dict:to_list(Dict). | |||
%% @doc Returns a list of all the open ports in the VM, coupled with | |||
%% one of the properties desired from `erlang:port_info/1-2'. | |||
-spec port_list(Attr :: atom()) -> [{port(), term()}]. | |||
port_list(Attr) -> | |||
[{Port, Val} || Port <- erlang:ports(), | |||
{_, Val} <- [erlang:port_info(Port, Attr)]]. | |||
%% @doc Returns a list of all the open ports in the VM, but only | |||
%% if the `Attr''s resulting value matches `Val'. `Attr' must be | |||
%% a property accepted by `erlang:port_info/2'. | |||
-spec port_list(Attr :: atom(), term()) -> [port()]. | |||
port_list(Attr, Val) -> | |||
[Port || Port <- erlang:ports(), | |||
{Attr, Val} =:= erlang:port_info(Port, Attr)]. | |||
%% @doc Returns the attributes ({@link recon:proc_attrs()}) of | |||
%% all processes of the node, except the caller. | |||
-spec proc_attrs(term()) -> [recon:proc_attrs()]. | |||
proc_attrs(AttrName) -> | |||
Self = self(), | |||
[Attrs || Pid <- processes(), | |||
Pid =/= Self, | |||
{ok, Attrs} <- [proc_attrs(AttrName, Pid)] | |||
]. | |||
%% @doc Returns the attributes of a given process. This form of attributes | |||
%% is standard for most comparison functions for processes in recon. | |||
%% | |||
%% A special attribute is `binary_memory', which will reduce the memory used | |||
%% by the process for binary data on the global heap. | |||
-spec proc_attrs(term(), pid()) -> {ok, recon:proc_attrs()} | {error, term()}. | |||
proc_attrs(binary_memory, Pid) -> | |||
case process_info(Pid, [binary, registered_name, | |||
current_function, initial_call]) of | |||
[{_, Bins}, {registered_name, Name}, Init, Cur] -> | |||
{ok, {Pid, binary_memory(Bins), [Name || is_atom(Name)] ++ [Init, Cur]}}; | |||
undefined -> | |||
{error, undefined} | |||
end; | |||
proc_attrs(AttrName, Pid) -> | |||
case process_info(Pid, [AttrName, registered_name, | |||
current_function, initial_call]) of | |||
[{_, Attr}, {registered_name, Name}, Init, Cur] -> | |||
{ok, {Pid, Attr, [Name || is_atom(Name)] ++ [Init, Cur]}}; | |||
undefined -> | |||
{error, undefined} | |||
end. | |||
%% @doc Returns the attributes ({@link recon:inet_attrs()}) of | |||
%% all inet ports (UDP, SCTP, TCP) of the node. | |||
-spec inet_attrs(term()) -> [recon:inet_attrs()]. | |||
inet_attrs(AttrName) -> | |||
Ports = [Port || Port <- erlang:ports(), | |||
{_, Name} <- [erlang:port_info(Port, name)], | |||
Name =:= "tcp_inet" orelse | |||
Name =:= "udp_inet" orelse | |||
Name =:= "sctp_inet"], | |||
[Attrs || Port <- Ports, | |||
{ok, Attrs} <- [inet_attrs(AttrName, Port)]]. | |||
%% @doc Returns the attributes required for a given inet port (UDP, | |||
%% SCTP, TCP). This form of attributes is standard for most comparison | |||
%% functions for processes in recon. | |||
-spec inet_attrs(AttributeName, port()) -> {ok, recon:inet_attrs()} | |||
| {error, term()} when | |||
AttributeName :: 'recv_cnt' | 'recv_oct' | 'send_cnt' | 'send_oct' | |||
| 'cnt' | 'oct'. | |||
inet_attrs(Attr, Port) -> | |||
Attrs = case Attr of | |||
cnt -> [recv_cnt, send_cnt]; | |||
oct -> [recv_oct, send_oct]; | |||
_ -> [Attr] | |||
end, | |||
case inet:getstat(Port, Attrs) of | |||
{ok, Props} -> | |||
ValSum = lists:foldl(fun({_, X}, Y) -> X + Y end, 0, Props), | |||
{ok, {Port, ValSum, Props}}; | |||
{error, Reason} -> | |||
{error, Reason} | |||
end. | |||
%% @doc Equivalent of `pid(X,Y,Z)' in the Erlang shell. | |||
-spec triple_to_pid(N, N, N) -> pid() when | |||
N :: non_neg_integer(). | |||
triple_to_pid(X, Y, Z) -> | |||
list_to_pid("<" ++ integer_to_list(X) ++ "." ++ | |||
integer_to_list(Y) ++ "." ++ | |||
integer_to_list(Z) ++ ">"). | |||
%% @doc Transforms a given term to a pid. | |||
-spec term_to_pid(recon:pid_term()) -> pid(). | |||
term_to_pid(Pid) when is_pid(Pid) -> Pid; | |||
term_to_pid(Name) when is_atom(Name) -> whereis(Name); | |||
term_to_pid(List = "<0." ++ _) -> list_to_pid(List); | |||
term_to_pid(Binary = <<"<0.", _/binary>>) -> list_to_pid(binary_to_list(Binary)); | |||
term_to_pid({global, Name}) -> global:whereis_name(Name); | |||
term_to_pid({via, Module, Name}) -> Module:whereis_name(Name); | |||
term_to_pid({X, Y, Z}) when is_integer(X), is_integer(Y), is_integer(Z) -> | |||
triple_to_pid(X, Y, Z). | |||
%% @doc Transforms a given term to a port | |||
-spec term_to_port(recon:port_term()) -> port(). | |||
term_to_port(Port) when is_port(Port) -> Port; | |||
term_to_port(Name) when is_atom(Name) -> whereis(Name); | |||
term_to_port("#Port<0." ++ Id) -> | |||
N = list_to_integer(lists:sublist(Id, length(Id) - 1)), % drop trailing '>' | |||
term_to_port(N); | |||
term_to_port(N) when is_integer(N) -> | |||
%% We rebuild the term from the int received: | |||
%% http://www.erlang.org/doc/apps/erts/erl_ext_dist.html#id86892 | |||
Name = iolist_to_binary(atom_to_list(node())), | |||
NameLen = iolist_size(Name), | |||
Vsn = binary:last(term_to_binary(self())), | |||
Bin = <<131, % term encoding value | |||
102, % port tag | |||
100, % atom ext tag, used for node name | |||
NameLen:2/unit:8, | |||
Name:NameLen/binary, | |||
N:4/unit:8, % actual counter value | |||
Vsn:8>>, % version | |||
binary_to_term(Bin). | |||
%% @doc Calls a given function every `Interval' milliseconds and supports | |||
%% a map-like interface (each result is modified and returned) | |||
-spec time_map(N, Interval, Fun, State, MapFun) -> [term()] when | |||
N :: non_neg_integer(), | |||
Interval :: pos_integer(), | |||
Fun :: fun((State) -> {term(), State}), | |||
State :: term(), | |||
MapFun :: fun((_) -> term()). | |||
time_map(0, _, _, _, _) -> | |||
[]; | |||
time_map(N, Interval, Fun, State, MapFun) -> | |||
{Res, NewState} = Fun(State), | |||
timer:sleep(Interval), | |||
[MapFun(Res) | time_map(N - 1, Interval, Fun, NewState, MapFun)]. | |||
%% @doc Calls a given function every `Interval' milliseconds and supports | |||
%% a fold-like interface (each result is modified and accumulated) | |||
-spec time_fold(N, Interval, Fun, State, FoldFun, Init) -> [term()] when | |||
N :: non_neg_integer(), | |||
Interval :: pos_integer(), | |||
Fun :: fun((State) -> {term(), State}), | |||
State :: term(), | |||
FoldFun :: fun((term(), Init) -> Init), | |||
Init :: term(). | |||
time_fold(0, _, _, _, _, Acc) -> | |||
Acc; | |||
time_fold(N, Interval, Fun, State, FoldFun, Init) -> | |||
timer:sleep(Interval), | |||
{Res, NewState} = Fun(State), | |||
Acc = FoldFun(Res, Init), | |||
time_fold(N - 1, Interval, Fun, NewState, FoldFun, Acc). | |||
%% @doc Diffs two runs of erlang:statistics(scheduler_wall_time) and | |||
%% returns usage metrics in terms of cores and 0..1 percentages. | |||
-spec scheduler_usage_diff(SchedTime, SchedTime) -> undefined | [{SchedulerId, Usage}] when | |||
SchedTime :: [{SchedulerId, ActiveTime, TotalTime}], | |||
SchedulerId :: pos_integer(), | |||
Usage :: number(), | |||
ActiveTime :: non_neg_integer(), | |||
TotalTime :: non_neg_integer(). | |||
scheduler_usage_diff(First, Last) when First =:= undefined orelse Last =:= undefined -> | |||
undefined; | |||
scheduler_usage_diff(First, Last) -> | |||
lists:map( | |||
fun({{I, _A0, T}, {I, _A1, T}}) -> {I, 0.0}; % Avoid divide by zero | |||
({{I, A0, T0}, {I, A1, T1}}) -> {I, (A1 - A0) / (T1 - T0)} | |||
end, | |||
lists:zip(lists:sort(First), lists:sort(Last)) | |||
). | |||
%% @doc Returns the top n element of a list of process or inet attributes | |||
-spec sublist_top_n_attrs([Attrs], pos_integer()) -> [Attrs] | |||
when Attrs :: recon:proc_attrs() | recon:inet_attrs(). | |||
sublist_top_n_attrs(_, 0) -> | |||
%% matching lists:sublist/2 behaviour | |||
[]; | |||
sublist_top_n_attrs(List, Len) -> | |||
pheap_fill(List, Len, []). | |||
%% @private crush binaries from process_info into their amount of place | |||
%% taken in memory. | |||
binary_memory(Bins) -> | |||
lists:foldl(fun({_, Mem, _}, Tot) -> Mem + Tot end, 0, Bins). | |||
%%%%%%%%%%%%%%% | |||
%%% PRIVATE %%% | |||
%%%%%%%%%%%%%%% | |||
pheap_fill(List, 0, Heap) -> | |||
pheap_full(List, Heap); | |||
pheap_fill([], _, Heap) -> | |||
pheap_to_list(Heap, []); | |||
pheap_fill([{Y, X, _} = H | T], N, Heap) -> | |||
pheap_fill(T, N - 1, insert({{X, Y}, H}, Heap)). | |||
pheap_full([], Heap) -> | |||
pheap_to_list(Heap, []); | |||
pheap_full([{Y, X, _} = H | T], [{K, _} | HeapT] = Heap) -> | |||
case {X, Y} of | |||
N when N > K -> | |||
pheap_full(T, insert({N, H}, merge_pairs(HeapT))); | |||
_ -> | |||
pheap_full(T, Heap) | |||
end. | |||
pheap_to_list([], Acc) -> Acc; | |||
pheap_to_list([{_, H} | T], Acc) -> | |||
pheap_to_list(merge_pairs(T), [H | Acc]). | |||
-compile({inline, [insert/2, merge/2]}). | |||
insert(E, []) -> [E]; %% merge([E], H) | |||
insert(E, [E2 | _] = H) when E =< E2 -> [E, H]; | |||
insert(E, [E2 | H]) -> [E2, [E] | H]. | |||
merge(H1, []) -> H1; | |||
merge([E1 | H1], [E2 | _] = H2) when E1 =< E2 -> [E1, H2 | H1]; | |||
merge(H1, [E2 | H2]) -> [E2, H1 | H2]. | |||
merge_pairs([]) -> []; | |||
merge_pairs([H]) -> H; | |||
merge_pairs([A, B | T]) -> merge(merge(A, B), merge_pairs(T)). | |||
@ -1,208 +0,0 @@ | |||
%%%------------------------------------------------------------------- | |||
%%% @author bartlomiej.gorny@erlang-solutions.com | |||
%%% @doc | |||
%%% This module handles formatting maps. | |||
%% It allows for trimming output to selected fields, or to nothing at all. It also adds a label | |||
%% to a printout. | |||
%% To set up a limit for a map, you need to give recon a way to tell the map you want to | |||
%% trim from all the other maps, so you have to provide something like a 'type definition'. | |||
%% It can be either another map which is compared to the arg, or a fun. | |||
%%% @end | |||
%%%------------------------------------------------------------------- | |||
-module(recon_map). | |||
-author("bartlomiej.gorny@erlang-solutions.com"). | |||
%% API | |||
-export([limit/3, list/0, is_active/0, clear/0, remove/1, rename/2]). | |||
-export([process_map/1]). | |||
-type map_label() :: atom(). | |||
-type pattern() :: map() | function(). | |||
-type limit() :: all | none | atom() | binary() | [any()]. | |||
%% @doc quickly check if we want to do any record formatting | |||
-spec is_active() -> boolean(). | |||
is_active() -> | |||
case whereis(recon_ets_maps) of | |||
undefined -> false; | |||
_ -> true | |||
end. | |||
%% @doc remove all imported definitions, destroy the table, clean up | |||
clear() -> | |||
maybe_kill(recon_ets_maps), | |||
ok. | |||
%% @doc Limit output to selected keys of a map (can be 'none', 'all', a key or a list of keys). | |||
%% Pattern selects maps to process: a "pattern" is just a map, and if all key/value pairs of a pattern | |||
%% are present in a map (in other words, the pattern is a subset), then we say the map matches | |||
%% and we process it accordingly (apply the limit). | |||
%% | |||
%% Patterns are applied in alphabetical order, until a match is found. | |||
%% | |||
%% Instead of a pattern you can also provide a function which will take a map and return a boolean. | |||
%% @end | |||
-spec limit(map_label(), pattern(), limit()) -> ok | {error, any()}. | |||
limit(Label, #{} = Pattern, Limit) when is_atom(Label) -> | |||
store_pattern(Label, Pattern, Limit); | |||
limit(Label, Pattern, Limit) when is_atom(Label), is_function(Pattern) -> | |||
store_pattern(Label, Pattern, Limit). | |||
%% @doc prints out all "known" map definitions and their limit settings. | |||
%% Printout tells a map's name, the matching fields required, and the limit options. | |||
%% @end | |||
list() -> | |||
ensure_table_exists(), | |||
io:format("~nmap definitions and limits:~n"), | |||
list(ets:tab2list(patterns_table_name())). | |||
%% @doc remove a given map entry | |||
-spec remove(map_label()) -> true. | |||
remove(Label) -> | |||
ensure_table_exists(), | |||
ets:delete(patterns_table_name(), Label). | |||
%% @doc rename a given map entry, which allows to to change priorities for | |||
%% matching. The first argument is the current name, and the second | |||
%% argument is the new name. | |||
-spec rename(map_label(), map_label()) -> renamed | missing. | |||
rename(Name, NewName) -> | |||
ensure_table_exists(), | |||
case ets:lookup(patterns_table_name(), Name) of | |||
[{Name, Pattern, Limit}] -> | |||
ets:insert(patterns_table_name(), {NewName, Pattern, Limit}), | |||
ets:delete(patterns_table_name(), Name), | |||
renamed; | |||
[] -> | |||
missing | |||
end. | |||
%% @doc prints out all "known" map filter definitions and their settings. | |||
%% Printout tells the map's label, the matching patterns, and the limit options | |||
%% @end | |||
list([]) -> | |||
io:format("~n"), | |||
ok; | |||
list([{Label, Pattern, Limit} | Rest]) -> | |||
io:format("~p: ~p -> ~p~n", [Label, Pattern, Limit]), | |||
list(Rest). | |||
%% @private given a map, scans saved patterns for one that matches; if found, returns a label | |||
%% and a map with limits applied; otherwise returns 'none' and original map. | |||
%% Pattern can be: | |||
%% <ul> | |||
%% <li> a map - then each key in pattern is checked for equality with the map in question</li> | |||
%% <li> a fun(map()) -> boolean()</li> | |||
%% </ul> | |||
-spec process_map(map()) -> map() | {atom(), map()}. | |||
process_map(M) -> | |||
process_map(M, ets:tab2list(patterns_table_name())). | |||
process_map(M, []) -> | |||
M; | |||
process_map(M, [{Label, Pattern, Limit} | Rest]) -> | |||
case map_matches(M, Pattern) of | |||
true -> | |||
{Label, apply_map_limits(Limit, M)}; | |||
false -> | |||
process_map(M, Rest) | |||
end. | |||
map_matches(#{} = M, Pattern) when is_function(Pattern) -> | |||
Pattern(M); | |||
map_matches(_, []) -> | |||
true; | |||
map_matches(M, [{K, V} | Rest]) -> | |||
case maps:is_key(K, M) of | |||
true -> | |||
case maps:get(K, M) of | |||
V -> | |||
map_matches(M, Rest); | |||
_ -> | |||
false | |||
end; | |||
false -> | |||
false | |||
end. | |||
apply_map_limits(none, M) -> | |||
M; | |||
apply_map_limits(all, _) -> | |||
#{}; | |||
apply_map_limits(Fields, M) -> | |||
maps:with(Fields, M). | |||
patterns_table_name() -> recon_map_patterns. | |||
store_pattern(Label, Pattern, Limit) -> | |||
ensure_table_exists(), | |||
ets:insert(patterns_table_name(), {Label, prepare_pattern(Pattern), prepare_limit(Limit)}), | |||
ok. | |||
prepare_limit(all) -> all; | |||
prepare_limit(none) -> none; | |||
prepare_limit(Limit) when is_binary(Limit) -> [Limit]; | |||
prepare_limit(Limit) when is_atom(Limit) -> [Limit]; | |||
prepare_limit(Limit) when is_list(Limit) -> Limit. | |||
prepare_pattern(Pattern) when is_function(Pattern) -> Pattern; | |||
prepare_pattern(Pattern) when is_map(Pattern) -> maps:to_list(Pattern). | |||
ensure_table_exists() -> | |||
case ets:info(patterns_table_name()) of | |||
undefined -> | |||
case whereis(recon_ets_maps) of | |||
undefined -> | |||
Parent = self(), | |||
Ref = make_ref(), | |||
%% attach to the currently running session | |||
{Pid, MonRef} = spawn_monitor(fun() -> | |||
register(recon_ets_maps, self()), | |||
ets:new(patterns_table_name(), [ordered_set, public, named_table]), | |||
Parent ! Ref, | |||
ets_keeper() | |||
end), | |||
receive | |||
Ref -> | |||
erlang:demonitor(MonRef, [flush]), | |||
Pid; | |||
{'DOWN', MonRef, _, _, Reason} -> | |||
error(Reason) | |||
end; | |||
Pid -> | |||
Pid | |||
end; | |||
Pid -> | |||
Pid | |||
end. | |||
ets_keeper() -> | |||
receive | |||
stop -> ok; | |||
_ -> ets_keeper() | |||
end. | |||
%%%%%%%%%%%%%%% | |||
%%% HELPERS %%% | |||
%%%%%%%%%%%%%%% | |||
maybe_kill(Name) -> | |||
case whereis(Name) of | |||
undefined -> | |||
ok; | |||
Pid -> | |||
unlink(Pid), | |||
exit(Pid, kill), | |||
wait_for_death(Pid, Name) | |||
end. | |||
wait_for_death(Pid, Name) -> | |||
case is_process_alive(Pid) orelse whereis(Name) =:= Pid of | |||
true -> | |||
timer:sleep(10), | |||
wait_for_death(Pid, Name); | |||
false -> | |||
ok | |||
end. | |||
@ -1,279 +0,0 @@ | |||
%%%------------------------------------------------------------------- | |||
%%% @author bartlomiej.gorny@erlang-solutions.com | |||
%%% @doc | |||
%%% This module handles formatting records for known record types. | |||
%%% Record definitions are imported from modules by user. Definitions are | |||
%%% distinguished by record name and its arity, if you have multiple records | |||
%%% of the same name and size, you have to choose one of them and some of your | |||
%%% records may be wrongly labelled. You can manipulate your definition list by | |||
%%% using import/1 and clear/1, and check which definitions are in use by executing | |||
%%% list/0. | |||
%%% @end | |||
%%%------------------------------------------------------------------- | |||
-module(recon_rec). | |||
-author("bartlomiej.gorny@erlang-solutions.com"). | |||
%% API | |||
-export([is_active/0]). | |||
-export([import/1, clear/1, clear/0, list/0, get_list/0, limit/3]). | |||
-export([format_tuple/1]). | |||
-ifdef(TEST). | |||
-export([lookup_record/2]). | |||
-endif. | |||
% basic types | |||
-type field() :: atom(). | |||
-type record_name() :: atom(). | |||
% compound | |||
-type limit() :: all | none | field() | [field()]. | |||
-type listentry() :: {module(), record_name(), [field()], limit()}. | |||
-type import_result() :: {imported, module(), record_name(), arity()} | |||
| {overwritten, module(), record_name(), arity()} | |||
| {ignored, module(), record_name(), arity(), module()}. | |||
%% @doc import record definitions from a module. If a record definition of the same name | |||
%% and arity has already been imported from another module then the new | |||
%% definition is ignored (returned info tells you from which module the existing definition was imported). | |||
%% You have to choose one and possibly remove the old one using | |||
%% clear/1. Supports importing multiple modules at once (by giving a list of atoms as | |||
%% an argument). | |||
%% @end | |||
-spec import(module() | [module()]) -> import_result() | [import_result()]. | |||
import(Modules) when is_list(Modules) -> | |||
lists:foldl(fun import/2, [], Modules); | |||
import(Module) -> | |||
import(Module, []). | |||
%% @doc quickly check if we want to do any record formatting | |||
-spec is_active() -> boolean(). | |||
is_active() -> | |||
case whereis(recon_ets) of | |||
undefined -> false; | |||
_ -> true | |||
end. | |||
%% @doc remove definitions imported from a module. | |||
clear(Module) -> | |||
lists:map(fun(R) -> rem_for_module(R, Module) end, ets:tab2list(records_table_name())). | |||
%% @doc remove all imported definitions, destroy the table, clean up | |||
clear() -> | |||
maybe_kill(recon_ets), | |||
ok. | |||
%% @doc prints out all "known" (imported) record definitions and their limit settings. | |||
%% Printout tells module a record originates from, its name and a list of field names, | |||
%% plus the record's arity (may be handy if handling big records) and a list of field it | |||
%% limits its output to, if set. | |||
%% @end | |||
list() -> | |||
F = fun({Module, Name, Fields, Limits}) -> | |||
Fnames = lists:map(fun atom_to_list/1, Fields), | |||
Flds = join(",", Fnames), | |||
io:format("~p: #~p(~p){~s} ~p~n", | |||
[Module, Name, length(Fields), Flds, Limits]) | |||
end, | |||
io:format("Module: #Name(Size){<Fields>} Limits~n==========~n", []), | |||
lists:foreach(F, get_list()). | |||
%% @doc returns a list of active record definitions | |||
-spec get_list() -> [listentry()]. | |||
get_list() -> | |||
ensure_table_exists(), | |||
Lst = lists:map(fun make_list_entry/1, ets:tab2list(records_table_name())), | |||
lists:sort(Lst). | |||
%% @doc Limit output to selected fields of a record (can be 'none', 'all', a field or a list of fields). | |||
%% Limit set to 'none' means there is no limit, and all fields are displayed; limit 'all' means that | |||
%% all fields are squashed and only record name will be shown. | |||
%% @end | |||
-spec limit(record_name(), arity(), limit()) -> ok | {error, any()}. | |||
limit(Name, Arity, Limit) when is_atom(Name), is_integer(Arity) -> | |||
case lookup_record(Name, Arity) of | |||
[] -> | |||
{error, record_unknown}; | |||
[{Key, Fields, Mod, _}] -> | |||
ets:insert(records_table_name(), {Key, Fields, Mod, Limit}), | |||
ok | |||
end. | |||
%% @private if a tuple is a known record, formats is as "#recname{field=value}", otherwise returns | |||
%% just a printout of a tuple. | |||
format_tuple(Tuple) -> | |||
ensure_table_exists(), | |||
First = element(1, Tuple), | |||
format_tuple(First, Tuple). | |||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |||
%% PRIVATE | |||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |||
make_list_entry({{Name, _}, Fields, Module, Limits}) -> | |||
FmtLimit = case Limits of | |||
[] -> none; | |||
Other -> Other | |||
end, | |||
{Module, Name, Fields, FmtLimit}. | |||
import(Module, ResultList) -> | |||
ensure_table_exists(), | |||
lists:foldl(fun(Rec, Res) -> store_record(Rec, Module, Res) end, | |||
ResultList, | |||
get_record_defs(Module)). | |||
store_record(Rec, Module, ResultList) -> | |||
{Name, Fields} = Rec, | |||
Arity = length(Fields), | |||
Result = case lookup_record(Name, Arity) of | |||
[] -> | |||
ets:insert(records_table_name(), rec_info(Rec, Module)), | |||
{imported, Module, Name, Arity}; | |||
[{_, _, Module, _}] -> | |||
ets:insert(records_table_name(), rec_info(Rec, Module)), | |||
{overwritten, Module, Name, Arity}; | |||
[{_, _, Mod, _}] -> | |||
{ignored, Module, Name, Arity, Mod} | |||
end, | |||
[Result | ResultList]. | |||
get_record_defs(Module) -> | |||
Path = code:which(Module), | |||
{ok, {_, [{abstract_code, {_, AC}}]}} = beam_lib:chunks(Path, [abstract_code]), | |||
lists:foldl(fun get_record/2, [], AC). | |||
get_record({attribute, _, record, Rec}, Acc) -> [Rec | Acc]; | |||
get_record(_, Acc) -> Acc. | |||
%% @private | |||
lookup_record(RecName, FieldCount) -> | |||
ensure_table_exists(), | |||
ets:lookup(records_table_name(), {RecName, FieldCount}). | |||
%% @private | |||
ensure_table_exists() -> | |||
case ets:info(records_table_name()) of | |||
undefined -> | |||
case whereis(recon_ets) of | |||
undefined -> | |||
Parent = self(), | |||
Ref = make_ref(), | |||
%% attach to the currently running session | |||
{Pid, MonRef} = spawn_monitor(fun() -> | |||
register(recon_ets, self()), | |||
ets:new(records_table_name(), [set, public, named_table]), | |||
Parent ! Ref, | |||
ets_keeper() | |||
end), | |||
receive | |||
Ref -> | |||
erlang:demonitor(MonRef, [flush]), | |||
Pid; | |||
{'DOWN', MonRef, _, _, Reason} -> | |||
error(Reason) | |||
end; | |||
Pid -> | |||
Pid | |||
end; | |||
Pid -> | |||
Pid | |||
end. | |||
records_table_name() -> recon_record_definitions. | |||
rec_info({Name, Fields}, Module) -> | |||
{{Name, length(Fields)}, field_names(Fields), Module, none}. | |||
rem_for_module({_, _, Module, _} = Rec, Module) -> | |||
ets:delete_object(records_table_name(), Rec); | |||
rem_for_module(_, _) -> | |||
ok. | |||
ets_keeper() -> | |||
receive | |||
stop -> ok; | |||
_ -> ets_keeper() | |||
end. | |||
field_names(Fields) -> | |||
lists:map(fun field_name/1, Fields). | |||
field_name({record_field, _, {atom, _, Name}}) -> Name; | |||
field_name({record_field, _, {atom, _, Name}, _Default}) -> Name; | |||
field_name({typed_record_field, Field, _Type}) -> field_name(Field). | |||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |||
%% FORMATTER | |||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |||
format_tuple(Name, Rec) when is_atom(Name) -> | |||
case lookup_record(Name, size(Rec) - 1) of | |||
[RecDef] -> format_record(Rec, RecDef); | |||
_ -> | |||
List = tuple_to_list(Rec), | |||
["{", join(", ", [recon_trace:format_trace_output(true, El) || El <- List]), "}"] | |||
end; | |||
format_tuple(_, Tuple) -> | |||
format_default(Tuple). | |||
format_default(Val) -> | |||
io_lib:format("~p", [Val]). | |||
format_record(Rec, {{Name, Arity}, Fields, _, Limits}) -> | |||
ExpectedLength = Arity + 1, | |||
case tuple_size(Rec) of | |||
ExpectedLength -> | |||
[_ | Values] = tuple_to_list(Rec), | |||
List = lists:zip(Fields, Values), | |||
LimitedList = apply_limits(List, Limits), | |||
["#", atom_to_list(Name), "{", | |||
join(", ", [format_kv(Key, Val) || {Key, Val} <- LimitedList]), | |||
"}"]; | |||
_ -> | |||
format_default(Rec) | |||
end. | |||
format_kv(Key, Val) -> | |||
%% Some messy mutually recursive calls we can't avoid | |||
[recon_trace:format_trace_output(true, Key), "=", recon_trace:format_trace_output(true, Val)]. | |||
apply_limits(List, none) -> List; | |||
apply_limits(_List, all) -> []; | |||
apply_limits(List, Field) when is_atom(Field) -> | |||
[{Field, proplists:get_value(Field, List)}, {more, '...'}]; | |||
apply_limits(List, Limits) -> | |||
lists:filter(fun({K, _}) -> lists:member(K, Limits) end, List) ++ [{more, '...'}]. | |||
%%%%%%%%%%%%%%% | |||
%%% HELPERS %%% | |||
%%%%%%%%%%%%%%% | |||
maybe_kill(Name) -> | |||
case whereis(Name) of | |||
undefined -> | |||
ok; | |||
Pid -> | |||
unlink(Pid), | |||
exit(Pid, kill), | |||
wait_for_death(Pid, Name) | |||
end. | |||
wait_for_death(Pid, Name) -> | |||
case is_process_alive(Pid) orelse whereis(Name) =:= Pid of | |||
true -> | |||
timer:sleep(10), | |||
wait_for_death(Pid, Name); | |||
false -> | |||
ok | |||
end. | |||
-ifdef(OTP_RELEASE). | |||
-spec join(term(), [term()]) -> [term()]. | |||
join(Sep, List) -> | |||
lists:join(Sep, List). | |||
-else. | |||
-spec join(string(), [string()]) -> string(). | |||
join(Sep, List) -> | |||
string:join(List, Sep). | |||
-endif. |
@ -1,733 +0,0 @@ | |||
%%% @author Fred Hebert <mononcqc@ferd.ca> | |||
%%% [http://ferd.ca/] | |||
%%% @doc | |||
%%% `recon_trace' is a module that handles tracing in a safe manner for single | |||
%%% Erlang nodes, currently for function calls only. Functionality includes: | |||
%%% | |||
%%% <ul> | |||
%%% <li>Nicer to use interface (arguably) than `dbg' or trace BIFs.</li> | |||
%%% <li>Protection against dumb decisions (matching all calls on a node | |||
%%% being traced, for example)</li> | |||
%%% <li>Adding safe guards in terms of absolute trace count or | |||
%%% rate-limitting</li> | |||
%%% <li>Nicer formatting than default traces</li> | |||
%%% </ul> | |||
%%% | |||
%%% == Tracing Erlang Code == | |||
%%% | |||
%%% The Erlang Trace BIFs allow to trace any Erlang code at all. They work in | |||
%%% two parts: pid specifications, and trace patterns. | |||
%%% | |||
%%% Pid specifications let you decide which processes to target. They can be | |||
%%% specific pids, `all' pids, `existing' pids, or `new' pids (those not | |||
%%% spawned at the time of the function call). | |||
%%% | |||
%%% The trace patterns represent functions. Functions can be specified in two | |||
%%% parts: specifying the modules, functions, and arguments, and then with | |||
%%% Erlang match specifications to add constraints to arguments (see | |||
%%% {@link calls/3} for details). | |||
%%% | |||
%%% What defines whether you get traced or not is the intersection of both: | |||
%%% | |||
%%% ``` | |||
%%% _,--------,_ _,--------,_ | |||
%%% ,-' `-,,-' `-, | |||
%%% ,-' ,-' '-, `-, | |||
%%% | Matching -' '- Matching | | |||
%%% | Pids | Getting | Trace | | |||
%%% | | Traced | Patterns | | |||
%%% | -, ,- | | |||
%%% '-, '-, ,-' ,-' | |||
%%% '-,_ _,-''-,_ _,-' | |||
%%% '--------' '--------' | |||
%%% ''' | |||
%%% | |||
%%% If either the pid specification excludes a process or a trace pattern | |||
%%% excludes a given call, no trace will be received. | |||
%%% | |||
%%% == Example Session == | |||
%%% | |||
%%% First let's trace the `queue:new' functions in any process: | |||
%%% | |||
%%% ``` | |||
%%% 1> recon_trace:calls({queue, new, '_'}, 1). | |||
%%% 1 | |||
%%% 13:14:34.086078 <0.44.0> queue:new() | |||
%%% Recon tracer rate limit tripped. | |||
%%% ''' | |||
%%% | |||
%%% The limit was set to `1' trace message at most, and `recon' let us | |||
%%% know when that limit was reached. | |||
%%% | |||
%%% Let's instead look for all the `queue:in/2' calls, to see what it is | |||
%%% we're inserting in queues: | |||
%%% | |||
%%% ``` | |||
%%% 2> recon_trace:calls({queue, in, 2}, 1). | |||
%%% 1 | |||
%%% 13:14:55.365157 <0.44.0> queue:in(a, {[],[]}) | |||
%%% Recon tracer rate limit tripped. | |||
%%% ''' | |||
%%% | |||
%%% In order to see the content we want, we should change the trace patterns | |||
%%% to use a `fun' that matches on all arguments in a list (`_') and returns | |||
%%% `return_trace()'. This last part will generate a second trace for each | |||
%%% call that includes the return value: | |||
%%% | |||
%%% ``` | |||
%%% 3> recon_trace:calls({queue, in, fun(_) -> return_trace() end}, 3). | |||
%%% 1 | |||
%%% | |||
%%% 13:15:27.655132 <0.44.0> queue:in(a, {[],[]}) | |||
%%% | |||
%%% 13:15:27.655467 <0.44.0> queue:in/2 --> {[a],[]} | |||
%%% | |||
%%% 13:15:27.757921 <0.44.0> queue:in(a, {[],[]}) | |||
%%% Recon tracer rate limit tripped. | |||
%%% ''' | |||
%%% | |||
%%% Matching on argument lists can be done in a more complex manner: | |||
%%% | |||
%%% ``` | |||
%%% 4> recon_trace:calls( | |||
%%% 4> {queue, '_', fun([A,_]) when is_list(A); is_integer(A) andalso A > 1 -> return_trace() end}, | |||
%%% 4> {10,100} | |||
%%% 4> ). | |||
%%% 32 | |||
%%% | |||
%%% 13:24:21.324309 <0.38.0> queue:in(3, {[],[]}) | |||
%%% | |||
%%% 13:24:21.371473 <0.38.0> queue:in/2 --> {[3],[]} | |||
%%% | |||
%%% 13:25:14.694865 <0.53.0> queue:split(4, {[10,9,8,7],[1,2,3,4,5,6]}) | |||
%%% | |||
%%% 13:25:14.695194 <0.53.0> queue:split/2 --> {{[4,3,2],[1]},{[10,9,8,7],[5,6]}} | |||
%%% | |||
%%% 5> recon_trace:clear(). | |||
%%% ok | |||
%%% ''' | |||
%%% | |||
%%% Note that in the pattern above, no specific function (<code>'_'</code>) was | |||
%%% matched against. Instead, the `fun' used restricted functions to those | |||
%%% having two arguments, the first of which is either a list or an integer | |||
%%% greater than `1'. | |||
%%% | |||
%%% The limit was also set using `{10,100}' instead of an integer, making the | |||
%%% rate-limitting at 10 messages per 100 milliseconds, instead of an absolute | |||
%%% value. | |||
%%% | |||
%%% Any tracing can be manually interrupted by calling `recon_trace:clear()', | |||
%%% or killing the shell process. | |||
%%% | |||
%%% Be aware that extremely broad patterns with lax rate-limitting (or very | |||
%%% high absolute limits) may impact your node's stability in ways | |||
%%% `recon_trace' cannot easily help you with. | |||
%%% | |||
%%% In doubt, start with the most restrictive tracing possible, with low | |||
%%% limits, and progressively increase your scope. | |||
%%% | |||
%%% See {@link calls/3} for more details and tracing possibilities. | |||
%%% | |||
%%% == Structure == | |||
%%% | |||
%%% This library is production-safe due to taking the following structure for | |||
%%% tracing: | |||
%%% | |||
%%% ``` | |||
%%% [IO/Group leader] <---------------------, | |||
%%% | | | |||
%%% [shell] ---> [tracer process] ----> [formatter] | |||
%%% ''' | |||
%%% | |||
%%% The tracer process receives trace messages from the node, and enforces | |||
%%% limits in absolute terms or trace rates, before forwarding the messages | |||
%%% to the formatter. This is done so the tracer can do as little work as | |||
%%% possible and never block while building up a large mailbox. | |||
%%% | |||
%%% The tracer process is linked to the shell, and the formatter to the | |||
%%% tracer process. The formatter also traps exits to be able to handle | |||
%%% all received trace messages until the tracer termination, but will then | |||
%%% shut down as soon as possible. | |||
%%% | |||
%%% In case the operator is tracing from a remote shell which gets | |||
%%% disconnected, the links between the shell and the tracer should make it | |||
%%% so tracing is automatically turned off once you disconnect. | |||
%%% | |||
%%% If sending output to the Group Leader is not desired, you may specify | |||
%%% a different pid() via the option `io_server' in the {@link calls/3} function. | |||
%%% For instance to write the traces to a file you can do something like | |||
%%% | |||
%%% ``` | |||
%%% 1> {ok, Dev} = file:open("/tmp/trace",[write]). | |||
%%% 2> recon_trace:calls({queue, in, fun(_) -> return_trace() end}, 3, [{io_server, Dev}]). | |||
%%% 1 | |||
%%% 3> | |||
%%% Recon tracer rate limit tripped. | |||
%%% 4> file:close(Dev). | |||
%%% ''' | |||
%%% | |||
%%% The only output still sent to the Group Leader is the rate limit being | |||
%%% tripped, and any errors. The rest will be sent to the other IO | |||
%%% server (see [http://erlang.org/doc/apps/stdlib/io_protocol.html]). | |||
%%% | |||
%%% == Record Printing == | |||
%%% | |||
%%% Thanks to code contributed by Bartek Górny, record printing can be added | |||
%%% to traces by first importing records in an active session with | |||
%%% `recon_rec:import([Module, ...])', after which the records declared in | |||
%%% the module list will be supported. | |||
%%% @end | |||
-module(recon_trace). | |||
%% API | |||
-export([clear/0, calls/2, calls/3]). | |||
-export([format/1]). | |||
%% Internal exports | |||
-export([count_tracer/1, rate_tracer/2, formatter/5, format_trace_output/1, format_trace_output/2]). | |||
-type matchspec() :: [{[term()] | '_', [term()], [term()]}]. | |||
-type shellfun() :: fun((_) -> term()). | |||
-type formatterfun() :: fun((_) -> iodata()). | |||
-type millisecs() :: non_neg_integer(). | |||
-type pidspec() :: all | existing | new | recon:pid_term(). | |||
-type max_traces() :: non_neg_integer(). | |||
-type max_rate() :: {max_traces(), millisecs()}. | |||
%% trace options | |||
-type options() :: [{pid, pidspec() | [pidspec(), ...]} % default: all | |||
| {timestamp, formatter | trace} % default: formatter | |||
| {args, args | arity} % default: args | |||
| {io_server, pid()} % default: group_leader() | |||
| {formatter, formatterfun()} % default: internal formatter | |||
| return_to | {return_to, boolean()} % default: false | |||
%% match pattern options | |||
| {scope, global | local} % default: global | |||
]. | |||
-type mod() :: '_' | module(). | |||
-type fn() :: '_' | atom(). | |||
-type args() :: '_' | 0..255 | return_trace | matchspec() | shellfun(). | |||
-type tspec() :: {mod(), fn(), args()}. | |||
-type max() :: max_traces() | max_rate(). | |||
-type num_matches() :: non_neg_integer(). | |||
-export_type([mod/0, fn/0, args/0, tspec/0, num_matches/0, options/0, | |||
max_traces/0, max_rate/0]). | |||
%%%%%%%%%%%%%% | |||
%%% PUBLIC %%% | |||
%%%%%%%%%%%%%% | |||
%% @doc Stops all tracing at once. | |||
-spec clear() -> ok. | |||
clear() -> | |||
erlang:trace(all, false, [all]), | |||
erlang:trace_pattern({'_', '_', '_'}, false, [local, meta, call_count, call_time]), | |||
erlang:trace_pattern({'_', '_', '_'}, false, []), % unsets global | |||
maybe_kill(recon_trace_tracer), | |||
maybe_kill(recon_trace_formatter), | |||
ok. | |||
%% @equiv calls({Mod, Fun, Args}, Max, []) | |||
-spec calls(tspec() | [tspec(), ...], max()) -> num_matches(). | |||
calls({Mod, Fun, Args}, Max) -> | |||
calls([{Mod, Fun, Args}], Max, []); | |||
calls(TSpecs = [_ | _], Max) -> | |||
calls(TSpecs, Max, []). | |||
%% @doc Allows to set trace patterns and pid specifications to trace | |||
%% function calls. | |||
%% | |||
%% The basic calls take the trace patterns as tuples of the form | |||
%% `{Module, Function, Args}' where: | |||
%% | |||
%% <ul> | |||
%% <li>`Module' is any atom representing a module</li> | |||
%% <li>`Function' is any atom representing a function, or the wildcard | |||
%% <code>'_'</code></li> | |||
%% <li>`Args' is either the arity of a function (`0..255'), a wildcard | |||
%% pattern (<code>'_'</code>), a | |||
%% <a href="http://learnyousomeerlang.com/ets#you-have-been-selected">match specification</a>, | |||
%% or a function from a shell session that can be transformed into | |||
%% a match specification</li> | |||
%% </ul> | |||
%% | |||
%% There is also an argument specifying either a maximal count (a number) | |||
%% of trace messages to be received, or a maximal frequency (`{Num, Millisecs}'). | |||
%% | |||
%% Here are examples of things to trace: | |||
%% | |||
%% <ul> | |||
%% <li>All calls from the `queue' module, with 10 calls printed at most: | |||
%% ``recon_trace:calls({queue, '_', '_'}, 10)''</li> | |||
%% <li>All calls to `lists:seq(A,B)', with 100 calls printed at most: | |||
%% `recon_trace:calls({lists, seq, 2}, 100)'</li> | |||
%% <li>All calls to `lists:seq(A,B)', with 100 calls per second at most: | |||
%% `recon_trace:calls({lists, seq, 2}, {100, 1000})'</li> | |||
%% <li>All calls to `lists:seq(A,B,2)' (all sequences increasing by two) | |||
%% with 100 calls at most: | |||
%% `recon_trace:calls({lists, seq, fun([_,_,2]) -> ok end}, 100)'</li> | |||
%% <li>All calls to `iolist_to_binary/1' made with a binary as an argument | |||
%% already (kind of useless conversion!): | |||
%% `recon_trace:calls({erlang, iolist_to_binary, fun([X]) when is_binary(X) -> ok end}, 10)'</li> | |||
%% <li>Calls to the queue module only in a given process `Pid', at a rate | |||
%% of 50 per second at most: | |||
%% ``recon_trace:calls({queue, '_', '_'}, {50,1000}, [{pid, Pid}])''</li> | |||
%% <li>Print the traces with the function arity instead of literal arguments: | |||
%% `recon_trace:calls(TSpec, Max, [{args, arity}])'</li> | |||
%% <li>Matching the `filter/2' functions of both `dict' and `lists' modules, | |||
%% across new processes only: | |||
%% `recon_trace:calls([{dict,filter,2},{lists,filter,2}], 10, [{pid, new}])'</li> | |||
%% <li>Tracing the `handle_call/3' functions of a given module for all new processes, | |||
%% and those of an existing one registered with `gproc': | |||
%% `recon_trace:calls({Mod,handle_call,3}, {10,100}, [{pid, [{via, gproc, Name}, new]}'</li> | |||
%% <li>Show the result of a given function call: | |||
%% `recon_trace:calls({Mod,Fun,fun(_) -> return_trace() end}, Max, Opts)' | |||
%% or | |||
%% ``recon_trace:calls({Mod,Fun,[{'_', [], [{return_trace}]}]}, Max, Opts)'', | |||
%% the important bit being the `return_trace()' call or the | |||
%% `{return_trace}' match spec value. | |||
%% A short-hand version for this pattern of 'match anything, trace everything' | |||
%% for a function is `recon_trace:calls({Mod, Fun, return_trace})'. </li> | |||
%% </ul> | |||
%% | |||
%% There's a few more combination possible, with multiple trace patterns per call, and more | |||
%% options: | |||
%% | |||
%% <ul> | |||
%% <li>`{pid, PidSpec}': which processes to trace. Valid options is any of | |||
%% `all', `new', `existing', or a process descriptor (`{A,B,C}', | |||
%% `"<A.B.C>"', an atom representing a name, `{global, Name}', | |||
%% `{via, Registrar, Name}', or a pid). It's also possible to specify | |||
%% more than one by putting them in a list.</li> | |||
%% <li>`{timestamp, formatter | trace}': by default, the formatter process | |||
%% adds timestamps to messages received. If accurate timestamps are | |||
%% required, it's possible to force the usage of timestamps within | |||
%% trace messages by adding the option `{timestamp, trace}'.</li> | |||
%% <li>`{args, arity | args}': whether to print arity in function calls | |||
%% or their (by default) literal representation.</li> | |||
%% <li>`{scope, global | local}': by default, only 'global' (fully qualified | |||
%% function calls) are traced, not calls made internally. To force tracing | |||
%% of local calls, pass in `{scope, local}'. This is useful whenever | |||
%% you want to track the changes of code in a process that isn't called | |||
%% with `Module:Fun(Args)', but just `Fun(Args)'.</li> | |||
%% <li>`{formatter, fun(Term) -> io_data() end}': override the default | |||
%% formatting functionality provided by recon.</li> | |||
%% <li>`{io_server, pid() | atom()}': by default, recon logs to the current | |||
%% group leader, usually the shell. This option allows to redirect | |||
%% trace output to a different IO server (such as a file handle).</li> | |||
%% <li>`return_to': If this option is set (in conjunction with the match | |||
%% option `{scope, local}'), the function to which the value is returned | |||
%% is output in a trace. Note that this is distinct from giving the | |||
%% *caller* since exception handling or calls in tail position may | |||
%% hide the original caller.</li> | |||
%% </ul> | |||
%% | |||
%% Also note that putting extremely large `Max' values (i.e. `99999999' or | |||
%% `{10000,1}') will probably negate most of the safe-guarding this library | |||
%% does and be dangerous to your node. Similarly, tracing extremely large | |||
%% amounts of function calls (all of them, or all of `io' for example) | |||
%% can be risky if more trace messages are generated than any process on | |||
%% the node could ever handle, despite the precautions taken by this library. | |||
%% @end | |||
-spec calls(tspec() | [tspec(), ...], max(), options()) -> num_matches(). | |||
calls({Mod, Fun, Args}, Max, Opts) -> | |||
calls([{Mod, Fun, Args}], Max, Opts); | |||
calls(TSpecs = [_ | _], {Max, Time}, Opts) -> | |||
Pid = setup(rate_tracer, [Max, Time], | |||
validate_formatter(Opts), validate_io_server(Opts)), | |||
trace_calls(TSpecs, Pid, Opts); | |||
calls(TSpecs = [_ | _], Max, Opts) -> | |||
Pid = setup(count_tracer, [Max], | |||
validate_formatter(Opts), validate_io_server(Opts)), | |||
trace_calls(TSpecs, Pid, Opts). | |||
%%%%%%%%%%%%%%%%%%%%%%% | |||
%%% PRIVATE EXPORTS %%% | |||
%%%%%%%%%%%%%%%%%%%%%%% | |||
%% @private Stops when N trace messages have been received | |||
count_tracer(0) -> | |||
exit(normal); | |||
count_tracer(N) -> | |||
receive | |||
Msg -> | |||
recon_trace_formatter ! Msg, | |||
count_tracer(N - 1) | |||
end. | |||
%% @private Stops whenever the trace message rates goes higher than | |||
%% `Max' messages in `Time' milliseconds. Note that if the rate | |||
%% proposed is higher than what the IO system of the formatter | |||
%% can handle, this can still put a node at risk. | |||
%% | |||
%% It is recommended to try stricter rates to begin with. | |||
rate_tracer(Max, Time) -> rate_tracer(Max, Time, 0, os:timestamp()). | |||
rate_tracer(Max, Time, Count, Start) -> | |||
receive | |||
Msg -> | |||
recon_trace_formatter ! Msg, | |||
Now = os:timestamp(), | |||
Delay = timer:now_diff(Now, Start) div 1000, | |||
if Delay > Time -> rate_tracer(Max, Time, 0, Now) | |||
; Max > Count -> rate_tracer(Max, Time, Count + 1, Start) | |||
; Max =:= Count -> exit(normal) | |||
end | |||
end. | |||
%% @private Formats traces to be output | |||
formatter(Tracer, Parent, Ref, FormatterFun, IOServer) -> | |||
process_flag(trap_exit, true), | |||
link(Tracer), | |||
Parent ! {Ref, linked}, | |||
formatter(Tracer, IOServer, FormatterFun). | |||
formatter(Tracer, IOServer, FormatterFun) -> | |||
receive | |||
{'EXIT', Tracer, normal} -> | |||
io:format("Recon tracer rate limit tripped.~n"), | |||
exit(normal); | |||
{'EXIT', Tracer, Reason} -> | |||
exit(Reason); | |||
TraceMsg -> | |||
io:format(IOServer, FormatterFun(TraceMsg), []), | |||
formatter(Tracer, IOServer, FormatterFun) | |||
end. | |||
%%%%%%%%%%%%%%%%%%%%%%% | |||
%%% SETUP FUNCTIONS %%% | |||
%%%%%%%%%%%%%%%%%%%%%%% | |||
%% starts the tracer and formatter processes, and | |||
%% cleans them up before each call. | |||
setup(TracerFun, TracerArgs, FormatterFun, IOServer) -> | |||
clear(), | |||
Ref = make_ref(), | |||
Tracer = spawn_link(?MODULE, TracerFun, TracerArgs), | |||
register(recon_trace_tracer, Tracer), | |||
Format = spawn(?MODULE, formatter, [Tracer, self(), Ref, FormatterFun, IOServer]), | |||
register(recon_trace_formatter, Format), | |||
receive | |||
{Ref, linked} -> Tracer | |||
after 5000 -> | |||
error(setup_failed) | |||
end. | |||
%% Sets the traces in action | |||
trace_calls(TSpecs, Pid, Opts) -> | |||
{PidSpecs, TraceOpts, MatchOpts} = validate_opts(Opts), | |||
Matches = [begin | |||
{Arity, Spec} = validate_tspec(Mod, Fun, Args), | |||
erlang:trace_pattern({Mod, Fun, Arity}, Spec, MatchOpts) | |||
end || {Mod, Fun, Args} <- TSpecs], | |||
[erlang:trace(PidSpec, true, [call, {tracer, Pid} | TraceOpts]) | |||
|| PidSpec <- PidSpecs], | |||
lists:sum(Matches). | |||
%%%%%%%%%%%%%%%%%% | |||
%%% VALIDATION %%% | |||
%%%%%%%%%%%%%%%%%% | |||
validate_opts(Opts) -> | |||
PidSpecs = validate_pid_specs(proplists:get_value(pid, Opts, all)), | |||
Scope = proplists:get_value(scope, Opts, global), | |||
TraceOpts = case proplists:get_value(timestamp, Opts, formatter) of | |||
formatter -> []; | |||
trace -> [timestamp] | |||
end ++ | |||
case proplists:get_value(args, Opts, args) of | |||
args -> []; | |||
arity -> [arity] | |||
end ++ | |||
case proplists:get_value(return_to, Opts, undefined) of | |||
true when Scope =:= local -> | |||
[return_to]; | |||
true when Scope =:= global -> | |||
io:format("Option return_to only works with option {scope, local}~n"), | |||
%% Set it anyway | |||
[return_to]; | |||
_ -> | |||
[] | |||
end, | |||
MatchOpts = [Scope], | |||
{PidSpecs, TraceOpts, MatchOpts}. | |||
%% Support the regular specs, but also allow `recon:pid_term()' and lists | |||
%% of further pid specs. | |||
-spec validate_pid_specs(pidspec() | [pidspec(), ...]) -> | |||
[all | new | existing | pid(), ...]. | |||
validate_pid_specs(all) -> [all]; | |||
validate_pid_specs(existing) -> [existing]; | |||
validate_pid_specs(new) -> [new]; | |||
validate_pid_specs([Spec]) -> validate_pid_specs(Spec); | |||
validate_pid_specs(PidTerm = [Spec | Rest]) -> | |||
%% can be "<a.b.c>" or [pidspec()] | |||
try | |||
[recon_lib:term_to_pid(PidTerm)] | |||
catch | |||
error:function_clause -> | |||
validate_pid_specs(Spec) ++ validate_pid_specs(Rest) | |||
end; | |||
validate_pid_specs(PidTerm) -> | |||
%% has to be `recon:pid_term()'. | |||
[recon_lib:term_to_pid(PidTerm)]. | |||
validate_tspec(Mod, Fun, Args) when is_function(Args) -> | |||
validate_tspec(Mod, Fun, fun_to_ms(Args)); | |||
%% helper to save typing for common actions | |||
validate_tspec(Mod, Fun, return_trace) -> | |||
validate_tspec(Mod, Fun, [{'_', [], [{return_trace}]}]); | |||
validate_tspec(Mod, Fun, Args) -> | |||
BannedMods = ['_', ?MODULE, io, lists], | |||
%% The banned mod check can be bypassed by using | |||
%% match specs if you really feel like being dumb. | |||
case {lists:member(Mod, BannedMods), Args} of | |||
{true, '_'} -> error({dangerous_combo, {Mod, Fun, Args}}); | |||
{true, []} -> error({dangerous_combo, {Mod, Fun, Args}}); | |||
_ -> ok | |||
end, | |||
case Args of | |||
'_' -> {'_', true}; | |||
_ when is_list(Args) -> {'_', Args}; | |||
_ when Args >= 0, Args =< 255 -> {Args, true} | |||
end. | |||
validate_formatter(Opts) -> | |||
case proplists:get_value(formatter, Opts) of | |||
F when is_function(F, 1) -> F; | |||
_ -> fun format/1 | |||
end. | |||
validate_io_server(Opts) -> | |||
proplists:get_value(io_server, Opts, group_leader()). | |||
%%%%%%%%%%%%%%%%%%%%%%%% | |||
%%% TRACE FORMATTING %%% | |||
%%%%%%%%%%%%%%%%%%%%%%%% | |||
%% Thanks Geoff Cant for the foundations for this. | |||
format(TraceMsg) -> | |||
{Type, Pid, {Hour, Min, Sec}, TraceInfo} = extract_info(TraceMsg), | |||
{FormatStr, FormatArgs} = case {Type, TraceInfo} of | |||
%% {trace, Pid, 'receive', Msg} | |||
{'receive', [Msg]} -> | |||
{"< ~p", [Msg]}; | |||
%% {trace, Pid, send, Msg, To} | |||
{send, [Msg, To]} -> | |||
{" > ~p: ~p", [To, Msg]}; | |||
%% {trace, Pid, send_to_non_existing_process, Msg, To} | |||
{send_to_non_existing_process, [Msg, To]} -> | |||
{" > (non_existent) ~p: ~p", [To, Msg]}; | |||
%% {trace, Pid, call, {M, F, Args}} | |||
{call, [{M, F, Args}]} -> | |||
{"~p:~p~s", [M, F, format_args(Args)]}; | |||
%% {trace, Pid, call, {M, F, Args}, Msg} | |||
{call, [{M, F, Args}, Msg]} -> | |||
{"~p:~p~s ~s", [M, F, format_args(Args), format_trace_output(Msg)]}; | |||
%% {trace, Pid, return_to, {M, F, Arity}} | |||
{return_to, [{M, F, Arity}]} -> | |||
{" '--> ~p:~p/~p", [M, F, Arity]}; | |||
%% {trace, Pid, return_from, {M, F, Arity}, ReturnValue} | |||
{return_from, [{M, F, Arity}, Return]} -> | |||
{"~p:~p/~p --> ~s", [M, F, Arity, format_trace_output(Return)]}; | |||
%% {trace, Pid, exception_from, {M, F, Arity}, {Class, Value}} | |||
{exception_from, [{M, F, Arity}, {Class, Val}]} -> | |||
{"~p:~p/~p ~p ~p", [M, F, Arity, Class, Val]}; | |||
%% {trace, Pid, spawn, Spawned, {M, F, Args}} | |||
{spawn, [Spawned, {M, F, Args}]} -> | |||
{"spawned ~p as ~p:~p~s", [Spawned, M, F, format_args(Args)]}; | |||
%% {trace, Pid, exit, Reason} | |||
{exit, [Reason]} -> | |||
{"EXIT ~p", [Reason]}; | |||
%% {trace, Pid, link, Pid2} | |||
{link, [Linked]} -> | |||
{"link(~p)", [Linked]}; | |||
%% {trace, Pid, unlink, Pid2} | |||
{unlink, [Linked]} -> | |||
{"unlink(~p)", [Linked]}; | |||
%% {trace, Pid, getting_linked, Pid2} | |||
{getting_linked, [Linker]} -> | |||
{"getting linked by ~p", [Linker]}; | |||
%% {trace, Pid, getting_unlinked, Pid2} | |||
{getting_unlinked, [Unlinker]} -> | |||
{"getting unlinked by ~p", [Unlinker]}; | |||
%% {trace, Pid, register, RegName} | |||
{register, [Name]} -> | |||
{"registered as ~p", [Name]}; | |||
%% {trace, Pid, unregister, RegName} | |||
{unregister, [Name]} -> | |||
{"no longer registered as ~p", [Name]}; | |||
%% {trace, Pid, in, {M, F, Arity} | 0} | |||
{in, [{M, F, Arity}]} -> | |||
{"scheduled in for ~p:~p/~p", [M, F, Arity]}; | |||
{in, [0]} -> | |||
{"scheduled in", []}; | |||
%% {trace, Pid, out, {M, F, Arity} | 0} | |||
{out, [{M, F, Arity}]} -> | |||
{"scheduled out from ~p:~p/~p", [M, F, Arity]}; | |||
{out, [0]} -> | |||
{"scheduled out", []}; | |||
%% {trace, Pid, gc_start, Info} | |||
{gc_start, [Info]} -> | |||
HeapSize = proplists:get_value(heap_size, Info), | |||
OldHeapSize = proplists:get_value(old_heap_size, Info), | |||
MbufSize = proplists:get_value(mbuf_size, Info), | |||
{"gc beginning -- heap ~p bytes", | |||
[HeapSize + OldHeapSize + MbufSize]}; | |||
%% {trace, Pid, gc_end, Info} | |||
{gc_end, [Info]} -> | |||
HeapSize = proplists:get_value(heap_size, Info), | |||
OldHeapSize = proplists:get_value(old_heap_size, Info), | |||
MbufSize = proplists:get_value(mbuf_size, Info), | |||
{"gc finished -- heap ~p bytes", | |||
[HeapSize + OldHeapSize + MbufSize]}; | |||
_ -> | |||
{"unknown trace type ~p -- ~p", [Type, TraceInfo]} | |||
end, | |||
io_lib:format("~n~p:~p:~9.6.0f ~p " ++ FormatStr ++ "~n", | |||
[Hour, Min, Sec, Pid] ++ FormatArgs). | |||
extract_info(TraceMsg) -> | |||
case tuple_to_list(TraceMsg) of | |||
[trace_ts, Pid, Type | Info] -> | |||
{TraceInfo, [Timestamp]} = lists:split(length(Info) - 1, Info), | |||
{Type, Pid, to_hms(Timestamp), TraceInfo}; | |||
[trace, Pid, Type | TraceInfo] -> | |||
{Type, Pid, to_hms(os:timestamp()), TraceInfo} | |||
end. | |||
to_hms(Stamp = {_, _, Micro}) -> | |||
{_, {H, M, Secs}} = calendar:now_to_local_time(Stamp), | |||
Seconds = Secs rem 60 + (Micro / 1000000), | |||
{H, M, Seconds}; | |||
to_hms(_) -> | |||
{0, 0, 0}. | |||
format_args(Arity) when is_integer(Arity) -> | |||
[$/, integer_to_list(Arity)]; | |||
format_args(Args) when is_list(Args) -> | |||
[$(, join(", ", [format_trace_output(Arg) || Arg <- Args]), $)]. | |||
%% @doc formats call arguments and return values - most types are just printed out, except for | |||
%% tuples recognised as records, which mimic the source code syntax | |||
%% @end | |||
format_trace_output(Args) -> | |||
format_trace_output(recon_rec:is_active(), recon_map:is_active(), Args). | |||
format_trace_output(Recs, Args) -> | |||
format_trace_output(Recs, recon_map:is_active(), Args). | |||
format_trace_output(true, _, Args) when is_tuple(Args) -> | |||
recon_rec:format_tuple(Args); | |||
format_trace_output(false, true, Args) when is_tuple(Args) -> | |||
format_tuple(false, true, Args); | |||
format_trace_output(Recs, Maps, Args) when is_list(Args), Recs orelse Maps -> | |||
case io_lib:printable_list(Args) of | |||
true -> | |||
io_lib:format("~p", [Args]); | |||
false -> | |||
format_maybe_improper_list(Recs, Maps, Args) | |||
end; | |||
format_trace_output(Recs, true, Args) when is_map(Args) -> | |||
{Label, Map} = case recon_map:process_map(Args) of | |||
{L, M} -> {atom_to_list(L), M}; | |||
M -> {"", M} | |||
end, | |||
ItemList = maps:to_list(Map), | |||
[Label, | |||
"#{", | |||
join(", ", [format_kv(Recs, true, Key, Val) || {Key, Val} <- ItemList]), | |||
"}"]; | |||
format_trace_output(Recs, false, Args) when is_map(Args) -> | |||
ItemList = maps:to_list(Args), | |||
["#{", | |||
join(", ", [format_kv(Recs, false, Key, Val) || {Key, Val} <- ItemList]), | |||
"}"]; | |||
format_trace_output(_, _, Args) -> | |||
io_lib:format("~p", [Args]). | |||
format_kv(Recs, Maps, Key, Val) -> | |||
[format_trace_output(Recs, Maps, Key), "=>", format_trace_output(Recs, Maps, Val)]. | |||
format_tuple(Recs, Maps, Tup) -> | |||
[${ | format_tuple_(Recs, Maps, tuple_to_list(Tup))]. | |||
format_tuple_(_Recs, _Maps, []) -> | |||
"}"; | |||
format_tuple_(Recs, Maps, [H | T]) -> | |||
[format_trace_output(Recs, Maps, H), $,, | |||
format_tuple_(Recs, Maps, T)]. | |||
format_maybe_improper_list(Recs, Maps, List) -> | |||
[$[ | format_maybe_improper_list_(Recs, Maps, List)]. | |||
format_maybe_improper_list_(_, _, []) -> | |||
"]"; | |||
format_maybe_improper_list_(Recs, Maps, [H | []]) -> | |||
[format_trace_output(Recs, Maps, H), $]]; | |||
format_maybe_improper_list_(Recs, Maps, [H | T]) when is_list(T) -> | |||
[format_trace_output(Recs, Maps, H), $,, | |||
format_maybe_improper_list_(Recs, Maps, T)]; | |||
format_maybe_improper_list_(Recs, Maps, [H | T]) when not is_list(T) -> | |||
%% Handling improper lists | |||
[format_trace_output(Recs, Maps, H), $|, | |||
format_trace_output(Recs, Maps, T), $]]. | |||
%%%%%%%%%%%%%%% | |||
%%% HELPERS %%% | |||
%%%%%%%%%%%%%%% | |||
maybe_kill(Name) -> | |||
case whereis(Name) of | |||
undefined -> | |||
ok; | |||
Pid -> | |||
unlink(Pid), | |||
exit(Pid, kill), | |||
wait_for_death(Pid, Name) | |||
end. | |||
wait_for_death(Pid, Name) -> | |||
case is_process_alive(Pid) orelse whereis(Name) =:= Pid of | |||
true -> | |||
timer:sleep(10), | |||
wait_for_death(Pid, Name); | |||
false -> | |||
ok | |||
end. | |||
%% Borrowed from dbg | |||
fun_to_ms(ShellFun) when is_function(ShellFun) -> | |||
case erl_eval:fun_data(ShellFun) of | |||
{fun_data, ImportList, Clauses} -> | |||
case ms_transform:transform_from_shell( | |||
dbg, Clauses, ImportList) of | |||
{error, [{_, [{_, _, Code} | _]} | _], _} -> | |||
io:format("Error: ~s~n", | |||
[ms_transform:format_error(Code)]), | |||
{error, transform_error}; | |||
Else -> | |||
Else | |||
end; | |||
false -> | |||
exit(shell_funs_only) | |||
end. | |||
-ifdef(OTP_RELEASE). | |||
-spec join(term(), [term()]) -> [term()]. | |||
join(Sep, List) -> | |||
lists:join(Sep, List). | |||
-else. | |||
-spec join(string(), [string()]) -> string(). | |||
join(Sep, List) -> | |||
string:join(List, Sep). | |||
-endif. |
@ -1,184 +0,0 @@ | |||
-module(utTc). | |||
-compile(inline). | |||
-compile({inline_size, 128}). | |||
-export([ | |||
tc/1 | |||
, tc/2 | |||
, tc/3 | |||
, ts/4 | |||
, tm/5 | |||
, test/1 | |||
]). | |||
%% Measure the execution time (in nanoseconds) for Fun(). | |||
-spec tc(Fun :: function()) -> {Time :: integer(), Value :: term()}. | |||
tc(F) -> | |||
T1 = erlang:monotonic_time(), | |||
Val = F(), | |||
T2 = erlang:monotonic_time(), | |||
Time = erlang:convert_time_unit(T2 - T1, native, nanosecond), | |||
{Time, Val}. | |||
%% Measure the execution time (in nanoseconds) for Fun(Args). | |||
-spec tc(Fun :: function(), Arguments :: [term()]) -> {Time :: integer(), Value :: term()}. | |||
tc(F, A) -> | |||
T1 = erlang:monotonic_time(), | |||
Val = apply(F, A), | |||
T2 = erlang:monotonic_time(), | |||
Time = erlang:convert_time_unit(T2 - T1, native, nanosecond), | |||
{Time, Val}. | |||
%% Measure the execution time (in nanoseconds) for an MFA. | |||
-spec tc(Module :: module(), Function :: atom(), Arguments :: [term()]) -> {Time :: integer(), Value :: term()}. | |||
tc(M, F, A) -> | |||
T1 = erlang:monotonic_time(), | |||
Val = apply(M, F, A), | |||
T2 = erlang:monotonic_time(), | |||
Time = erlang:convert_time_unit(T2 - T1, native, nanosecond), | |||
{Time, Val}. | |||
%% 单进程循环测试:LoopTimes是循环次数 | |||
%% utTc:ts(LoopTimes, Module, Function, ArgsList). | |||
%% 多进程并发测试:SpawnProcessesCount是并发的进程数 LoopTimes是循环次数 | |||
%% utTc:tm(ProcessesCount, LoopTimes, Module, Function, ArgsList). | |||
doTc(M, F, A) -> | |||
T1 = erlang:monotonic_time(), | |||
apply(M, F, A), | |||
T2 = erlang:monotonic_time(), | |||
erlang:convert_time_unit(T2 - T1, native, nanosecond). | |||
distribution(List, Aver) -> | |||
distribution(List, Aver, 0, 0). | |||
distribution([H | T], Aver, Greater, Less) -> | |||
case H > Aver of | |||
true -> | |||
distribution(T, Aver, Greater + 1, Less); | |||
false -> | |||
distribution(T, Aver, Greater, Less + 1) | |||
end; | |||
distribution([], _Aver, Greater, Less) -> | |||
{Greater, Less}. | |||
%% =================================================================== | |||
%% test: one process test N times | |||
%% =================================================================== | |||
ts(LoopTime, M, F, A) -> | |||
{Max, Min, Sum, Aver, Greater, Less} = loopTs(LoopTime, M, F, A, LoopTime, 0, 0, 0, []), | |||
io:format("=====================~n"), | |||
<<_:16, ArgsStr/binary>> = << <<", ", (iolist_to_binary(io_lib:format("~p", [OArg], [{chars_limit, 80}])))/binary>> || OArg <- A>>, | |||
io:format("execute ~p:~p(~s).~n", [M, F, ArgsStr]), | |||
io:format("execute LoopTime:~p~n", [LoopTime]), | |||
io:format("MaxTime: ~10s(ns) ~10s(s)~n", [integer_to_binary(Max), float_to_binary(Max / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("MinTime: ~10s(ns) ~10s(s)~n", [integer_to_binary(Min), float_to_binary(Min / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("SumTime: ~10s(ns) ~10s(s)~n", [integer_to_binary(Sum), float_to_binary(Sum / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("AvgTime: ~10s(ns) ~10s(s)~n", [float_to_binary(Aver, [{decimals, 6}, compact]), float_to_binary(Aver / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("Grar : ~10s(cn) ~10s(~s)~n", [integer_to_binary(Greater), float_to_binary(Greater / LoopTime, [{decimals, 2}]), <<"%">>]), | |||
io:format("Less : ~10s(cn) ~10s(~s)~n", [integer_to_binary(Less), float_to_binary(Less / LoopTime, [{decimals, 2}]), <<"%">>]), | |||
io:format("=====================~n"). | |||
loopTs(0, _M, _F, _A, LoopTime, Max, Min, Sum, List) -> | |||
Aver = Sum / LoopTime, | |||
{Greater, Less} = distribution(List, Aver), | |||
{Max, Min, Sum, Aver, Greater, Less}; | |||
loopTs(Index, M, F, A, LoopTime, Max, Min, Sum, List) -> | |||
Nanosecond = doTc(M, F, A), | |||
NewSum = Sum + Nanosecond, | |||
if | |||
Max == 0 -> | |||
NewMax = NewMin = Nanosecond; | |||
Max < Nanosecond -> | |||
NewMax = Nanosecond, | |||
NewMin = Min; | |||
Min > Nanosecond -> | |||
NewMax = Max, | |||
NewMin = Nanosecond; | |||
true -> | |||
NewMax = Max, | |||
NewMin = Min | |||
end, | |||
loopTs(Index - 1, M, F, A, LoopTime, NewMax, NewMin, NewSum, [Nanosecond | List]). | |||
%% =================================================================== | |||
%% Concurrency test: N processes each test one time | |||
%% =================================================================== | |||
tm(ProcCnt, LoopTime, M, F, A) -> | |||
loopSpawn(ProcCnt, M, F, A, self(), LoopTime), | |||
{Max, Min, Sum, Aver, Greater, Less} = collector(ProcCnt, 0, 0, 0, ProcCnt, []), | |||
io:format("=====================~n"), | |||
<<_:16, ArgsStr/binary>> = << <<", ", (iolist_to_binary(io_lib:format("~p", [OArg], [{chars_limit, 80}])))/binary>> || OArg <- A>>, | |||
io:format("execute ~p:~p(~s).~n", [M, F, ArgsStr]), | |||
io:format("execute LoopTime:~p~n", [LoopTime]), | |||
io:format("execute ProcCnts:~p~n", [ProcCnt]), | |||
io:format("PMaxTime: ~10s(ns) ~10s(s)~n", [integer_to_binary(Max), float_to_binary(Max / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("PMinTime: ~10s(ns) ~10s(s)~n", [integer_to_binary(Min), float_to_binary(Min / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("PSumTime: ~10s(ns) ~10s(s)~n", [integer_to_binary(Sum), float_to_binary(Sum / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("PAvgTime: ~10s(ns) ~10s(s)~n", [float_to_binary(Aver, [{decimals, 6}, compact]), float_to_binary(Aver / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("FAvgTime: ~10s(ns) ~10s(s)~n", [float_to_binary(Aver / LoopTime, [{decimals, 6}, compact]), float_to_binary(Aver / LoopTime / 1000000000, [{decimals, 6}, compact])]), | |||
io:format("PGrar : ~10s(cn) ~10s(~s)~n", [integer_to_binary(Greater), float_to_binary(Greater / ProcCnt, [{decimals, 2}]), <<"%">>]), | |||
io:format("PLess : ~10s(cn) ~10s(~s)~n", [integer_to_binary(Less), float_to_binary(Less / ProcCnt, [{decimals, 2}]), <<"%">>]), | |||
io:format("=====================~n"). | |||
loopSpawn(0, _, _, _, _, _) -> | |||
ok; | |||
loopSpawn(ProcCnt, M, F, A, CollectorPid, LoopTime) -> | |||
spawn_link(fun() -> worker(LoopTime, M, F, A, CollectorPid) end), | |||
loopSpawn(ProcCnt - 1, M, F, A, CollectorPid, LoopTime). | |||
collector(0, Max, Min, Sum, ProcCnt, List) -> | |||
Aver = Sum / ProcCnt, | |||
{Greater, Less} = distribution(List, Aver), | |||
{Max, Min, Sum, Aver, Greater, Less}; | |||
collector(Index, Max, Min, Sum, ProcCnt, List) -> | |||
receive | |||
{result, Nanosecond} -> | |||
NewSum = Sum + Nanosecond, | |||
if | |||
Max == 0 -> | |||
NewMax = NewMin = Nanosecond; | |||
Max < Nanosecond -> | |||
NewMax = Nanosecond, | |||
NewMin = Min; | |||
Min > Nanosecond -> | |||
NewMax = Max, | |||
NewMin = Nanosecond; | |||
true -> | |||
NewMax = Max, | |||
NewMin = Min | |||
end, | |||
collector(Index - 1, NewMax, NewMin, NewSum, ProcCnt, [Nanosecond | List]) | |||
after 1800000 -> | |||
io:format("execute time out~n"), | |||
ok | |||
end. | |||
worker(LoopTime, M, F, A, CollectorPid) -> | |||
SumTime = loopTm(LoopTime, M, F, A, 0), | |||
CollectorPid ! {result, SumTime}. | |||
loopTm(0, _, _, _, SumTime) -> | |||
SumTime; | |||
loopTm(LoopTime, M, F, A, SumTime) -> | |||
Microsecond = doTc(M, F, A), | |||
loopTm(LoopTime - 1, M, F, A, SumTime + Microsecond). | |||
test(N) -> | |||
M1 = erlang:monotonic_time(), | |||
timer:sleep(N), | |||
M2 = erlang:monotonic_time(), | |||
Time = erlang:convert_time_unit(M2 - M1, native, nanosecond), | |||
io:format("IMY******************111 ~p~n", [Time]), | |||
S1 = erlang:system_time(nanosecond), | |||
timer:sleep(N), | |||
S2 = erlang:system_time(nanosecond), | |||
io:format("IMY******************222 ~p~n", [S2 - S1]). | |||