|
|
@ -52,75 +52,61 @@ |
|
|
|
|
|
|
|
NIF_ATOMS(NIF_ATOM_DECL) |
|
|
|
|
|
|
|
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) |
|
|
|
{ |
|
|
|
static int load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info) { |
|
|
|
NIF_ATOMS(NIF_ATOM_INIT) |
|
|
|
*priv_data = NULL; |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) |
|
|
|
{ |
|
|
|
static int upgrade(ErlNifEnv *env, void **priv_data, void **old_priv_data, ERL_NIF_TERM load_info) { |
|
|
|
*priv_data = *old_priv_data; |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static void unload(ErlNifEnv* env, void* priv_data) |
|
|
|
{ |
|
|
|
static void unload(ErlNifEnv *env, void *priv_data) { |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// enabled(TraceTag, TracerState, Tracee) |
|
|
|
static ERL_NIF_TERM enabled(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
ERL_NIF_TERM tracers, value; |
|
|
|
ErlNifPid tracer; |
|
|
|
|
|
|
|
// @todo We can go one step further by having the one pid |
|
|
|
// in its own value in the map, skipping a get_map_value step. |
|
|
|
int arity; |
|
|
|
const ERL_NIF_TERM *tuple; |
|
|
|
ErlNifPid tracer_pid; |
|
|
|
|
|
|
|
// This function will only be called for trace_status. |
|
|
|
// We can take a few shortcuts knowing this. |
|
|
|
|
|
|
|
// Disable the trace when the tracers option is missing. |
|
|
|
if (!enif_get_map_value(env, argv[1], atom_tracers, &tracers)) |
|
|
|
if (!enif_get_tuple(env, value, &arity, &tuple)) { |
|
|
|
return atom_remove; |
|
|
|
|
|
|
|
// Because the tracers supervisor is a one_for_all, we only need |
|
|
|
// to check one of the tracer processes to confirm all are alive. |
|
|
|
|
|
|
|
// We know for a fact that this key exists because |
|
|
|
// there's at least one tracer process. |
|
|
|
enif_get_map_value(env, tracers, enif_make_int(env, 0), &value); |
|
|
|
} |
|
|
|
|
|
|
|
// Disable the trace when one of the tracers is not a local process. |
|
|
|
if (!enif_get_local_pid(env, value, &tracer)) |
|
|
|
if (!enif_get_local_pid(env, tuple[0], &tracer_pid)) |
|
|
|
return atom_remove; |
|
|
|
|
|
|
|
// Disable the trace when one of the tracers is not alive. |
|
|
|
if (!enif_is_process_alive(env, &tracer)) |
|
|
|
if (!enif_is_process_alive(env, &tracer_pid)) |
|
|
|
return atom_remove; |
|
|
|
|
|
|
|
return atom_discard; |
|
|
|
} |
|
|
|
|
|
|
|
static ERL_NIF_TERM enabled_call(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
|
|
|
|
// We always want both call and return_to. |
|
|
|
return atom_trace; |
|
|
|
} |
|
|
|
|
|
|
|
static ERL_NIF_TERM enabled_procs(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
ERL_NIF_TERM mode; |
|
|
|
int arity; |
|
|
|
const ERL_NIF_TERM *tuple; |
|
|
|
|
|
|
|
if (!enif_get_tuple(env, value, &arity, &tuple)) { |
|
|
|
return atom_remove; |
|
|
|
} |
|
|
|
|
|
|
|
// We only want the spawn and exit events when 'profile' mode |
|
|
|
// is enabled. Technically we only care about exits for callgrind, |
|
|
|
// but spawn is cheap to keep and useful for message profilers. |
|
|
|
if (enif_get_map_value(env, argv[1], atom_mode, &mode) |
|
|
|
&& enif_is_identical(atom_profile, mode) |
|
|
|
&& !(enif_is_identical(atom_spawn, argv[0]) |
|
|
|
|| enif_is_identical(atom_exit, argv[0]))) { |
|
|
|
if (enif_is_identical(atom_profile, tuple[1]) && !(enif_is_identical(atom_spawn, argv[0]) || enif_is_identical(atom_exit, argv[0]))) { |
|
|
|
return atom_discard; |
|
|
|
} |
|
|
|
|
|
|
@ -128,13 +114,11 @@ static ERL_NIF_TERM enabled_procs(ErlNifEnv *env, int argc, const ERL_NIF_TERM a |
|
|
|
} |
|
|
|
|
|
|
|
static ERL_NIF_TERM enabled_send(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
|
|
|
|
// We always want both send and send_to_non_existing_process. |
|
|
|
return atom_trace; |
|
|
|
} |
|
|
|
|
|
|
|
static ERL_NIF_TERM enabled_receive(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
|
|
|
|
// We always want receive. |
|
|
|
return atom_trace; |
|
|
|
} |
|
|
@ -145,7 +129,6 @@ static ERL_NIF_TERM enabled_running_procs(ErlNifEnv *env, int argc, const ERL_NI |
|
|
|
} |
|
|
|
|
|
|
|
static ERL_NIF_TERM enabled_garbage_collection(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
|
|
|
|
// We always want both gc_minor_start, gc_max_heap_size, and gc_minor_end. |
|
|
|
return atom_trace; |
|
|
|
} |
|
|
@ -153,8 +136,8 @@ static ERL_NIF_TERM enabled_garbage_collection(ErlNifEnv *env, int argc, const E |
|
|
|
// trace(TraceTag, TracerState, Tracee, TraceTerm, Opts) |
|
|
|
static ERL_NIF_TERM trace(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
enif_fprintf(stdout, "IMY************trace:"); |
|
|
|
for(int i = 0; i <= argc - 1; i++) { |
|
|
|
if(i != 1) { |
|
|
|
for (int i = 0; i <= argc - 1; i++) { |
|
|
|
if (i != 1) { |
|
|
|
enif_fprintf(stdout, " %d %T", i, argv[i]); |
|
|
|
} |
|
|
|
} |
|
|
@ -235,14 +218,15 @@ static ERL_NIF_TERM trace(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { |
|
|
|
} |
|
|
|
|
|
|
|
static ErlNifFunc nifFuns[] = { |
|
|
|
{"enabled", 3, enabled}, |
|
|
|
{"enabled_call", 3, enabled_call}, |
|
|
|
{"enabled_procs", 3, enabled_procs}, |
|
|
|
{"enabled_send", 3, enabled_send}, |
|
|
|
{"enabled_receive", 3, enabled_receive}, |
|
|
|
{"enabled_running_procs", 3, enabled_running_procs}, |
|
|
|
{"enabled", 3, enabled}, |
|
|
|
{"enabled_call", 3, enabled_call}, |
|
|
|
{"enabled_procs", 3, enabled_procs}, |
|
|
|
{"enabled_send", 3, enabled_send}, |
|
|
|
{"enabled_receive", 3, enabled_receive}, |
|
|
|
{"enabled_running_procs", 3, enabled_running_procs}, |
|
|
|
{"enabled_garbage_collection", 3, enabled_garbage_collection}, |
|
|
|
{"trace", 5, trace}, |
|
|
|
{"trace", 5, trace}, |
|
|
|
}; |
|
|
|
|
|
|
|
ERL_NIF_INIT(tpTracerNif, nifFuns, load, NULL, upgrade, unload) |
|
|
|
ERL_NIF_INIT(tpTracerNif, nifFuns, load, NULL, upgrade, unload |
|
|
|
) |