Ver código fonte

Merge pull request #270 from basho/intg

Integrate patches into lager 3.0 release
pull/281/head 3.0.0
Mark Allen 9 anos atrás
pai
commit
26540665e6
21 arquivos alterados com 1445 adições e 521 exclusões
  1. +133
    -3
      README.md
  2. +19
    -1
      include/lager.hrl
  3. +46
    -4
      rebar.config
  4. +52
    -67
      src/error_logger_lager_h.erl
  5. +10
    -6
      src/lager.app.src
  6. +250
    -101
      src/lager.erl
  7. +195
    -69
      src/lager_app.erl
  8. +32
    -5
      src/lager_backend_throttle.erl
  9. +39
    -16
      src/lager_config.erl
  10. +9
    -8
      src/lager_console_backend.erl
  11. +4
    -1
      src/lager_default_formatter.erl
  12. +73
    -14
      src/lager_file_backend.erl
  13. +29
    -19
      src/lager_handler_watcher.erl
  14. +6
    -1
      src/lager_sup.erl
  15. +153
    -126
      src/lager_transform.erl
  16. +19
    -19
      src/lager_trunc_io.erl
  17. +82
    -20
      src/lager_util.erl
  18. +16
    -0
      test/compress_pr_record_test.erl
  19. +2
    -2
      test/crash.erl
  20. +274
    -35
      test/lager_test_backend.erl
  21. +2
    -4
      test/pr_nested_record_test.erl

+ 133
- 3
README.md Ver arquivo

@ -16,9 +16,11 @@ Features
* When no handler is consuming a log level (eg. debug) no event is sent
to the log handler
* Supports multiple backends, including console and file.
* Supports multiple sinks
* Rewrites common OTP error messages into more readable messages
* Support for pretty printing records encountered at compile time
* Tolerant in the face of large or many log messages, won't out of memory the node
* Optional feature to bypass log size truncation ("unsafe")
* Supports internal time and date based rotation, as well as external rotation tools
* Syslog style log level comparison flags
* Colored terminal output (requires R16+)
@ -92,6 +94,84 @@ your app.config):
The available configuration options for each backend are listed in their
module's documentation.
Sinks
-----
Lager has traditionally supported a single sink (implemented as a
`gen_event` manager) named `lager_event` to which all backends were
connected.
Lager now supports extra sinks; each sink can have different
sync/async message thresholds and different backends.
### Sink configuration
To use multiple sinks (beyond the built-in sink of lager and lager_event), you
need to:
1. Setup rebar.config
2. Configure the backends in app.config
#### Names
Each sink has two names: one atom to be used like a module name for
sending messages, and that atom with `_lager_event` appended for backend
configuration.
This reflects the legacy behavior: `lager:info` (or `critical`, or
`debug`, etc) is a way of sending a message to a sink named
`lager_event`. Now developers can invoke `audit:info` or
`myCompanyName:debug` so long as the corresponding `audit_lager_event` or
`myCompanyName_lager_event` sinks are configured.
#### rebar.config
In `rebar.config` for the project that requires lager, include a list
of sink names (without the `_lager_event` suffix) in `erl_opts`:
`{lager_extra_sinks, [audit]}`
#### Runtime requirements
To be useful, sinks must be configured at runtime with backends.
In `app.config` for the project that requires lager, for example,
extend the lager configuration to include an `extra_sinks` tuple with
backends (aka "handlers") and optionally `async_threshold` and
`async_threshold_window` values (see **Overload Protection**
below). If async values are not configured, no overload protection
will be applied on that sink.
```erlang
[{lager, [
{log_root, "/tmp"},
%% Default handlers for lager/lager_event
{handlers, [
{lager_console_backend, info},
{lager_file_backend, [{file, "error.log"}, {level, error}]},
{lager_file_backend, [{file, "console.log"}, {level, info}]}
]},
%% Any other sinks
{extra_sinks,
[
{audit_lager_event,
[{handlers,
[{lager_file_backend,
[{file, "sink1.log"},
{level, info}
]
}]
},
{async_threshold, 500},
{async_threshold_window, 50}]
}]
}
]
}
].
```
Custom Formatting
-----------------
All loggers have a default formatting that can be overriden. A formatter is any module that
@ -113,7 +193,9 @@ Included is `lager_default_formatter`. This provides a generic, default formatt
* Any traditional iolist elements in the configuration are printed verbatim.
* Atoms in the configuration are treated as placeholders for lager metadata and extracted from the log message.
* The placeholders `date`, `time`, `message`, and `severity` will always exist.
* The placeholders `date`, `time`, `message`, `sev` and `severity` will always exist.
* `sev` is an abbreviated severity which is interpreted as a capitalized single letter encoding of the severity level
(e.g. `'debug'` -> `$D`)
* The placeholders `pid`, `file`, `line`, `module`, `function`, and `node` will always exist if the parse transform is used.
* Applications can define their own metadata placeholder.
* A tuple of `{atom(), semi-iolist()}` allows for a fallback for
@ -182,6 +264,30 @@ related processes crash, you can set a limit:
It is probably best to keep this number small.
"Unsafe"
--------
The unsafe code pathway bypasses the normal lager formatting code and uses the
same code as error_logger in OTP. This provides a marginal speedup to your logging
code (we measured between 0.5-1.3% improvement during our benchmarking; others have
reported better improvements.)
This is a **dangerous** feature. It *will not* protect you against
large log messages - large messages can kill your application and even your
Erlang VM dead due to memory exhaustion as large terms are copied over and
over in a failure cascade. We strongly recommend that this code pathway
only be used by log messages with a well bounded upper size of around 500 bytes.
If there's any possibility the log messages could exceed that limit, you should
use the normal lager message formatting code which will provide the appropriate
size limitations and protection against memory exhaustion.
If you want to format an unsafe log message, you may use the severity level (as
usual) followed by `_unsafe`. Here's an example:
```erlang
lager:info_unsafe("The quick brown ~s jumped over the lazy ~s", ["fox", "dog"]).
```
Runtime loglevel changes
------------------------
You can change the log level of any lager backend at runtime by doing the
@ -365,8 +471,8 @@ You can also specify multiple expressions in a filter, or use the `*` atom as
a wildcard to match any message that has that attribute, regardless of its
value.
Tracing to an existing logfile is also supported, if you wanted to log
warnings from a particular function in a particular module to the default `error.log`:
Tracing to an existing logfile is also supported (but see **Multiple
sink support** below):
```erlang
lager:trace_file("log/error.log", [{module, mymodule}, {function, myfunction}], warning)
@ -405,6 +511,30 @@ lager:trace_console([{request, '>', 117}, {request, '<', 120}])
Using `=` is equivalent to the 2-tuple form.
### Multiple sink support
If using multiple sinks, there are limitations on tracing that you
should be aware of.
Traces are specific to a sink, which can be specified via trace
filters:
```erlang
lager:trace_file("log/security.log", [{sink, audit}, {function, myfunction}], warning)
```
If no sink is thus specified, the default lager sink will be used.
This has two ramifications:
* Traces cannot intercept messages sent to a different sink.
* Tracing to a file already opened via `lager:trace_file` will only be
successful if the same sink is specified.
The former can be ameliorated by opening multiple traces; the latter
can be fixed by rearchitecting lager's file backend, but this has not
been tackled.
Setting the truncation limit at compile-time
--------------------------------------------
Lager defaults to truncating messages at 4096 bytes, you can alter this by

+ 19
- 1
include/lager.hrl Ver arquivo

@ -17,10 +17,16 @@
-define(DEFAULT_TRUNCATION, 4096).
-define(DEFAULT_TRACER, lager_default_tracer).
-define(DEFAULT_SINK, lager_event).
-define(LEVELS,
[debug, info, notice, warning, error, critical, alert, emergency, none]).
%% Use of these "functions" means that the argument list will not be
%% truncated for safety
-define(LEVELS_UNSAFE,
[{debug_unsafe, debug}, {info_unsafe, info}, {notice_unsafe, notice}, {warning_unsafe, warning}, {error_unsafe, error}, {critical_unsafe, critical}, {alert_unsafe, alert}, {emergency_unsafe, emergency}]).
-define(DEBUG, 128).
-define(INFO, 64).
-define(NOTICE, 32).
@ -63,7 +69,7 @@
Level,
[{pid,Pid},{line,?LINE},{file,?FILE},{module,?MODULE}],
[])}
)).
)).
%% FOR INTERNAL USE ONLY
%% internal non-blocking logging call
@ -100,3 +106,15 @@
end)).
-endif.
-record(lager_shaper, {
%% how many messages per second we try to deliver
hwm = undefined :: 'undefined' | pos_integer(),
%% how many messages we've received this second
mps = 0 :: non_neg_integer(),
%% the current second
lasttime = os:timestamp() :: erlang:timestamp(),
%% count of dropped messages this second
dropped = 0 :: non_neg_integer()
}).
-type lager_shaper() :: #lager_shaper{}.

+ 46
- 4
rebar.config Ver arquivo

@ -1,9 +1,51 @@
{erl_opts, [debug_info, warn_untyped_record]}.
%% -*- erlang -*-
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2011-2015 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
{erl_opts, [
{lager_extra_sinks, ['__lager_test_sink']},
debug_info,
report,
verbose,
warn_deprecated_function,
warn_deprecated_type,
warn_export_all,
warn_export_vars,
warn_obsolete_guard,
warn_untyped_record,
warn_unused_import
% do NOT include warnings_as_errors, as rebar includes these options
% when compiling for eunit, and at least one test module has code that
% is deliberatly broken and will generate an un-maskable warning
]}.
{erl_first_files, ["src/lager_util.erl"]}.
{eunit_opts, [verbose]}.
{eunit_compile_opts, [
nowarn_untyped_record,
nowarn_export_all
]}.
{deps, [
{goldrush, "0\.1\.6",
{git, "git://github.com/DeadZen/goldrush.git", {tag, "0.1.6"}}}
]}.
{goldrush, ".*", {git, "git://github.com/DeadZen/goldrush.git", {tag, "0.1.7"}}}
]}.
{xref_checks, []}.
{xref_queries, [{"(XC - UC) || (XU - X - B - lager_default_tracer : Mod - erlang:\"(is_map|map_size)\"/1 - maps:to_list/1)", []}]}.

+ 52
- 67
src/error_logger_lager_h.erl Ver arquivo

@ -1,4 +1,4 @@
%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
%% Copyright (c) 2011-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
@ -33,15 +33,10 @@
-export([format_reason/1]).
-record(state, {
%% how many messages per second we try to deliver
hwm = undefined :: 'undefined' | pos_integer(),
%% how many messages we've received this second
mps = 0 :: non_neg_integer(),
%% the current second
lasttime = os:timestamp() :: erlang:timestamp(),
%% count of dropped messages this second
dropped = 0 :: non_neg_integer()
-record(state, {
shaper :: lager_shaper(),
%% group leader strategy
groupleader_strategy :: handle | ignore | mirror
}).
-define(LOGMSG(Level, Pid, Msg),
@ -74,20 +69,27 @@ set_high_water(N) ->
gen_event:call(error_logger, ?MODULE, {set_high_water, N}, infinity).
-spec init(any()) -> {ok, #state{}}.
init([HighWaterMark]) ->
{ok, #state{hwm=HighWaterMark}}.
init([HighWaterMark, GlStrategy]) ->
Shaper = #lager_shaper{hwm=HighWaterMark},
{ok, #state{shaper=Shaper, groupleader_strategy=GlStrategy}}.
handle_call({set_high_water, N}, State) ->
{ok, ok, State#state{hwm = N}};
handle_call({set_high_water, N}, #state{shaper=Shaper} = State) ->
NewShaper = Shaper#lager_shaper{hwm=N},
{ok, ok, State#state{shaper = NewShaper}};
handle_call(_Request, State) ->
{ok, unknown_call, State}.
handle_event(Event, State) ->
case check_hwm(State) of
{true, NewState} ->
log_event(Event, NewState);
{false, NewState} ->
{ok, NewState}
handle_event(Event, #state{shaper=Shaper} = State) ->
case lager_util:check_hwm(Shaper) of
{true, 0, NewShaper} ->
eval_gl(Event, State#state{shaper=NewShaper});
{true, Drop, #lager_shaper{hwm=Hwm} = NewShaper} when Drop > 0 ->
?LOGFMT(warning, self(),
"lager_error_logger_h dropped ~p messages in the last second that exceeded the limit of ~p messages/sec",
[Drop, Hwm]),
eval_gl(Event, State#state{shaper=NewShaper});
{false, _, NewShaper} ->
{ok, State#state{shaper=NewShaper}}
end.
handle_info(_Info, State) ->
@ -101,49 +103,19 @@ code_change(_OldVsn, State, _Extra) ->
%% internal functions
check_hwm(State = #state{hwm = undefined}) ->
{true, State};
check_hwm(State = #state{mps = Mps, hwm = Hwm}) when Mps < Hwm ->
%% haven't hit high water mark yet, just log it
{true, State#state{mps=Mps+1}};
check_hwm(State = #state{hwm = Hwm, lasttime = Last, dropped = Drop}) ->
%% are we still in the same second?
{M, S, _} = Now = os:timestamp(),
case Last of
{M, S, _} ->
%% still in same second, but have exceeded the high water mark
NewDrops = discard_messages(Now, 0),
{false, State#state{dropped=Drop+NewDrops}};
_ ->
%% different second, reset all counters and allow it
case Drop > 0 of
true ->
?LOGFMT(warning, self(), "lager_error_logger_h dropped ~p messages in the last second that exceeded the limit of ~p messages/sec",
[Drop, Hwm]);
false ->
ok
end,
{true, State#state{dropped = 0, mps=1, lasttime = Now}}
end.
discard_messages(Second, Count) ->
{M, S, _} = os:timestamp(),
case Second of
{M, S, _} ->
receive
%% we only discard gen_event notifications, because
%% otherwise we might discard gen_event internal
%% messages, such as trapped EXITs
{notify, _Event} ->
discard_messages(Second, Count+1);
{_From, _Tag, {sync_notify, _Event}} ->
discard_messages(Second, Count+1)
after 0 ->
Count
end;
_ ->
Count
end.
eval_gl(Event, #state{groupleader_strategy=GlStrategy0}=State) when is_pid(element(2, Event)) ->
case element(2, Event) of
GL when node(GL) =/= node(), GlStrategy0 =:= ignore ->
gen_event:notify({error_logger, node(GL)}, Event),
{ok, State};
GL when node(GL) =/= node(), GlStrategy0 =:= mirror ->
gen_event:notify({error_logger, node(GL)}, Event),
log_event(Event, State);
_ ->
log_event(Event, State)
end;
eval_gl(Event, State) ->
log_event(Event, State).
log_event(Event, State) ->
case Event of
@ -183,6 +155,19 @@ log_event(Event, State) ->
"Cowboy handler ~p terminated in ~p:~p/~p with reason: ~s",
[Module, Module, Function, Arity, format_reason({Reason, StackTrace})])
end;
"Ranch listener "++_ ->
%% Ranch errors
?CRASH_LOG(Event),
case Args of
[Ref, _Protocol, Worker, {[{reason, Reason}, {mfa, {Module, Function, Arity}}, {stacktrace, StackTrace} | _], _}] ->
?LOGFMT(error, Worker,
"Ranch listener ~p terminated in ~p:~p/~p with reason: ~s",
[Ref, Module, Function, Arity, format_reason({Reason, StackTrace})]);
[Ref, _Protocol, Worker, Reason] ->
?LOGFMT(error, Worker,
"Ranch listener ~p terminated with reason: ~s",
[Ref, format_reason(Reason)])
end;
"webmachine error"++_ ->
%% Webmachine HTTP server error
?CRASH_LOG(Event),
@ -197,7 +182,7 @@ log_event(Event, State) ->
?LOGFMT(error, Pid, "Webmachine error at path ~p : ~s", [Path, format_reason(StackTrace)]);
_ ->
?CRASH_LOG(Event),
?LOGMSG(error, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION))
?LOGFMT(error, Pid, Fmt, Args)
end;
{error_report, _GL, {Pid, std_error, D}} ->
?CRASH_LOG(Event),
@ -217,11 +202,11 @@ log_event(Event, State) ->
?CRASH_LOG(Event),
?LOGMSG(error, Pid, "CRASH REPORT " ++ format_crash_report(Self, Neighbours));
{warning_msg, _GL, {Pid, Fmt, Args}} ->
?LOGMSG(warning, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION));
?LOGFMT(warning, Pid, Fmt, Args);
{warning_report, _GL, {Pid, std_warning, Report}} ->
?LOGMSG(warning, Pid, print_silly_list(Report));
{info_msg, _GL, {Pid, Fmt, Args}} ->
?LOGMSG(info, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION));
?LOGFMT(info, Pid, Fmt, Args);
{info_report, _GL, {Pid, std_info, D}} when is_list(D) ->
Details = lists:sort(D),
case Details of
@ -318,7 +303,7 @@ format_reason({function_clause, [MFA|_]}) ->
format_reason({if_clause, [MFA|_]}) ->
["no true branch found while evaluating if expression in ", format_mfa(MFA)];
format_reason({{try_clause, Val}, [MFA|_]}) ->
["no try clause matching ", print_val(Val), " in ", format_mfa(MFA)];
["no try clause matching ", print_val(Val), " in ", format_mfa(MFA)];
format_reason({badarith, [MFA|_]}) ->
["bad arithmetic expression in ", format_mfa(MFA)];
format_reason({{badmatch, Val}, [MFA|_]}) ->

+ 10
- 6
src/lager.app.src Ver arquivo

@ -13,9 +13,9 @@
{registered, [lager_sup, lager_event, lager_crash_log, lager_handler_watcher_sup]},
{mod, {lager_app, []}},
{env, [
%% Note: application:start(lager) overwrites previously defined environment variables
%% Note: application:start(lager) overwrites previously defined environment variables
%% thus declaration of default handlers is done at lager_app.erl
%% What colors to use with what log levels
{colored, false},
{colors, [
@ -43,14 +43,18 @@
%% Number of rotated crash logs to keep, 0 means keep only the
%% current one - default is 0
{crash_log_count, 5},
%% Whether to redirect error_logger messages into lager - defaults to true
%% Whether to redirect error_logger messages into the default lager_event sink - defaults to true
{error_logger_redirect, true},
%% How many messages per second to allow from error_logger before we start dropping them
{error_logger_hwm, 50},
%% How big the gen_event mailbox can get before it is switched into sync mode
%% How big the gen_event mailbox can get before it is
%% switched into sync mode. This value only applies to
%% the default sink; extra sinks can supply their own.
{async_threshold, 20},
%% Switch back to async mode, when gen_event mailbox size decrease from `async_threshold'
%% to async_threshold - async_threshold_window
%% Switch back to async mode, when gen_event mailbox size
%% decrease from `async_threshold' to async_threshold -
%% async_threshold_window. This value only applies to the
%% default sink; extra sinks can supply their own.
{async_threshold_window, 5}
]}
]}.

+ 250
- 101
src/lager.erl Ver arquivo

@ -21,17 +21,19 @@
-include("lager.hrl").
-define(LAGER_MD_KEY, '__lager_metadata').
-define(TRACE_SINK, '__trace_sink').
%% API
-export([start/0,
log/3, log/4,
log/3, log/4, log/5,
log_unsafe/4,
md/0, md/1,
trace/2, trace/3, trace_file/2, trace_file/3, trace_file/4, trace_console/1, trace_console/2,
clear_all_traces/0, stop_trace/1, stop_trace/3, status/0,
get_loglevel/1, set_loglevel/2, set_loglevel/3, get_loglevels/0,
update_loglevel_config/0, posix_error/1,
safe_format/3, safe_format_chop/3, dispatch_log/5, dispatch_log/9,
do_log/9, pr/2]).
list_all_sinks/0, clear_all_traces/0, stop_trace/1, stop_trace/3, status/0,
get_loglevel/1, get_loglevel/2, set_loglevel/2, set_loglevel/3, set_loglevel/4, get_loglevels/1,
update_loglevel_config/1, posix_error/1, set_loghwm/2, set_loghwm/3, set_loghwm/4,
safe_format/3, safe_format_chop/3, unsafe_format/2, dispatch_log/5, dispatch_log/7, dispatch_log/9,
do_log/9, do_log/10, do_log_unsafe/10, pr/2, pr/3]).
-type log_level() :: debug | info | notice | warning | error | critical | alert | emergency.
-type log_level_number() :: 0..7.
@ -81,51 +83,85 @@ md(NewMD) when is_list(NewMD) ->
md(_) ->
erlang:error(badarg).
-spec dispatch_log(log_level(), list(), string(), list() | none, pos_integer()) -> ok | {error, lager_not_running}.
-spec dispatch_log(atom(), log_level(), list(), string(), list() | none, pos_integer(), safe | unsafe) -> ok | {error, lager_not_running} | {error, {sink_not_configured, atom()}}.
%% this is the same check that the parse transform bakes into the module at compile time
dispatch_log(Severity, Metadata, Format, Args, Size) when is_atom(Severity)->
%% see lager_transform (lines 173-216)
dispatch_log(Sink, Severity, Metadata, Format, Args, Size, Safety) when is_atom(Severity)->
SeverityAsInt=lager_util:level_to_num(Severity),
case {whereis(lager_event), lager_config:get(loglevel, {?LOG_NONE, []})} of
{undefined, _} ->
{error, lager_not_running};
{Pid, {Level, Traces}} when (Level band SeverityAsInt) /= 0 orelse Traces /= [] ->
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, Level, Traces, Pid);
_ ->
ok
case {whereis(Sink), whereis(?DEFAULT_SINK), lager_config:get({Sink, loglevel}, {?LOG_NONE, []})} of
{undefined, undefined, _} -> {error, lager_not_running};
{undefined, _LagerEventPid0, _} -> {error, {sink_not_configured, Sink}};
{SinkPid, _LagerEventPid1, {Level, Traces}} when Safety =:= safe andalso ( (Level band SeverityAsInt) /= 0 orelse Traces /= [] ) ->
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, Level, Traces, Sink, SinkPid);
{SinkPid, _LagerEventPid1, {Level, Traces}} when Safety =:= unsafe andalso ( (Level band SeverityAsInt) /= 0 orelse Traces /= [] ) ->
do_log_unsafe(Severity, Metadata, Format, Args, Size, SeverityAsInt, Level, Traces, Sink, SinkPid);
_ -> ok
end.
%% @private Should only be called externally from code generated from the parse transform
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, Pid) when is_atom(Severity) ->
Destinations = case TraceFilters of
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid) when is_atom(Severity) ->
FormatFun = fun() -> safe_format_chop(Format, Args, Size) end,
do_log_impl(Severity, Metadata, Format, Args, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid, FormatFun).
do_log_impl(Severity, Metadata, Format, Args, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid, FormatFun) ->
{Destinations, TraceSinkPid} = case TraceFilters of
[] ->
[];
{[], undefined};
_ ->
lager_util:check_traces(Metadata,SeverityAsInt,TraceFilters,[])
{lager_util:check_traces(Metadata,SeverityAsInt,TraceFilters,[]), whereis(?TRACE_SINK)}
end,
case (LevelThreshold band SeverityAsInt) /= 0 orelse Destinations /= [] of
true ->
Msg = case Args of
A when is_list(A) ->
safe_format_chop(Format,Args,Size);
FormatFun();
_ ->
Format
end,
LagerMsg = lager_msg:new(Msg,
Severity, Metadata, Destinations),
case lager_config:get(async, false) of
case lager_config:get({Sink, async}, false) of
true ->
gen_event:notify(SinkPid, {log, LagerMsg});
false ->
gen_event:sync_notify(SinkPid, {log, LagerMsg})
end,
case TraceSinkPid /= undefined of
true ->
gen_event:notify(Pid, {log, LagerMsg});
gen_event:notify(TraceSinkPid, {log, LagerMsg});
false ->
gen_event:sync_notify(Pid, {log, LagerMsg})
ok
end;
false ->
ok
end.
%% @private Should only be called externally from code generated from the parse transform
%% Specifically, it would be level ++ `_unsafe' as in `info_unsafe'.
do_log_unsafe(Severity, Metadata, Format, Args, _Size, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid) when is_atom(Severity) ->
FormatFun = fun() -> unsafe_format(Format, Args) end,
do_log_impl(Severity, Metadata, Format, Args, SeverityAsInt, LevelThreshold, TraceFilters, Sink, SinkPid, FormatFun).
%% backwards compatible with beams compiled with lager 1.x
dispatch_log(Severity, _Module, _Function, _Line, _Pid, Metadata, Format, Args, Size) ->
dispatch_log(Severity, Metadata, Format, Args, Size).
%% backwards compatible with beams compiled with lager 2.x
dispatch_log(Severity, Metadata, Format, Args, Size) ->
dispatch_log(?DEFAULT_SINK, Severity, Metadata, Format, Args, Size, safe).
%% backwards compatible with beams compiled with lager 2.x
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, SinkPid) ->
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt,
LevelThreshold, TraceFilters, ?DEFAULT_SINK, SinkPid).
%% TODO:
%% Consider making log2/4 that takes the Level, Pid and Message params of log/3
%% along with a Sink param??
%% @doc Manually log a message into lager without using the parse transform.
-spec log(log_level(), pid() | atom() | [tuple(),...], list()) -> ok | {error, lager_not_running}.
log(Level, Pid, Message) when is_pid(Pid); is_atom(Pid) ->
@ -140,6 +176,27 @@ log(Level, Pid, Format, Args) when is_pid(Pid); is_atom(Pid) ->
log(Level, Metadata, Format, Args) when is_list(Metadata) ->
dispatch_log(Level, Metadata, Format, Args, ?DEFAULT_TRUNCATION).
log_unsafe(Level, Metadata, Format, Args) when is_list(Metadata) ->
dispatch_log(?DEFAULT_SINK, Level, Metadata, Format, Args, ?DEFAULT_TRUNCATION, unsafe).
%% @doc Manually log a message into lager without using the parse transform.
-spec log(atom(), log_level(), pid() | atom() | [tuple(),...], string(), list()) -> ok | {error, lager_not_running}.
log(Sink, Level, Pid, Format, Args) when is_pid(Pid); is_atom(Pid) ->
dispatch_log(Sink, Level, [{pid,Pid}], Format, Args, ?DEFAULT_TRUNCATION, safe);
log(Sink, Level, Metadata, Format, Args) when is_list(Metadata) ->
dispatch_log(Sink, Level, Metadata, Format, Args, ?DEFAULT_TRUNCATION, safe).
validate_trace_filters(Filters, Level, Backend) ->
Sink = proplists:get_value(sink, Filters, ?DEFAULT_SINK),
{Sink,
lager_util:validate_trace({
proplists:delete(sink, Filters),
Level,
Backend
})
}.
trace_file(File, Filter) ->
trace_file(File, Filter, debug, []).
@ -150,28 +207,38 @@ trace_file(File, Filter, Options) when is_list(Options) ->
trace_file(File, Filter, debug, Options).
trace_file(File, Filter, Level, Options) ->
Trace0 = {Filter, Level, {lager_file_backend, File}},
case lager_util:validate_trace(Trace0) of
{ok, Trace} ->
Handlers = gen_event:which_handlers(lager_event),
FileName = lager_util:expand_path(File),
case validate_trace_filters(Filter, Level, {lager_file_backend, FileName}) of
{Sink, {ok, Trace}} ->
Handlers = lager_config:global_get(handlers, []),
%% check if this file backend is already installed
Res = case lists:member({lager_file_backend, File}, Handlers) of
false ->
%% install the handler
LogFileConfig = lists:keystore(level, 1, lists:keystore(file, 1, Options, {file, File}), {level, none}),
supervisor:start_child(lager_handler_watcher_sup,
[lager_event, {lager_file_backend, File}, LogFileConfig]);
_ ->
{ok, exists}
Res = case lists:keyfind({lager_file_backend, FileName}, 1, Handlers) of
false ->
%% install the handler
LogFileConfig =
lists:keystore(level, 1,
lists:keystore(file, 1,
Options,
{file, FileName}),
{level, none}),
HandlerInfo =
lager_app:start_handler(Sink, {lager_file_backend, FileName},
LogFileConfig),
lager_config:global_set(handlers, [HandlerInfo|Handlers]),
{ok, installed};
{_Watcher, _Handler, Sink} ->
{ok, exists};
{_Watcher, _Handler, _OtherSink} ->
{error, file_in_use}
end,
case Res of
{ok, _} ->
add_trace_to_loglevel_config(Trace),
{ok, {{lager_file_backend, File}, Filter, Level}};
add_trace_to_loglevel_config(Trace, Sink),
{ok, {{lager_file_backend, FileName}, Filter, Level}};
{error, _} = E ->
E
end;
Error ->
{_Sink, Error} ->
Error
end.
@ -188,39 +255,37 @@ trace({lager_file_backend, File}, Filter, Level) ->
trace_file(File, Filter, Level);
trace(Backend, Filter, Level) ->
Trace0 = {Filter, Level, Backend},
case lager_util:validate_trace(Trace0) of
{ok, Trace} ->
add_trace_to_loglevel_config(Trace),
case validate_trace_filters(Filter, Level, Backend) of
{Sink, {ok, Trace}} ->
add_trace_to_loglevel_config(Trace, Sink),
{ok, {Backend, Filter, Level}};
Error ->
{_Sink, Error} ->
Error
end.
stop_trace(Backend, Filter, Level) ->
Trace0 = {Filter, Level, Backend},
case lager_util:validate_trace(Trace0) of
{ok, Trace} ->
stop_trace_int(Trace);
Error ->
case validate_trace_filters(Filter, Level, Backend) of
{Sink, {ok, Trace}} ->
stop_trace_int(Trace, Sink);
{_Sink, Error} ->
Error
end.
stop_trace({Backend, Filter, Level}) ->
stop_trace(Backend, Filter, Level).
stop_trace_int({Backend, _Filter, _Level} = Trace) ->
{Level, Traces} = lager_config:get(loglevel),
stop_trace_int({Backend, _Filter, _Level} = Trace, Sink) ->
{Level, Traces} = lager_config:get({Sink, loglevel}),
NewTraces = lists:delete(Trace, Traces),
_ = lager_util:trace_filter([ element(1, T) || T <- NewTraces ]),
%MinLevel = minimum_loglevel(get_loglevels() ++ get_trace_levels(NewTraces)),
lager_config:set(loglevel, {Level, NewTraces}),
case get_loglevel(Backend) of
lager_config:set({Sink, loglevel}, {Level, NewTraces}),
case get_loglevel(Sink, Backend) of
none ->
%% check no other traces point here
case lists:keyfind(Backend, 3, NewTraces) of
false ->
gen_event:delete_handler(lager_event, Backend, []);
gen_event:delete_handler(Sink, Backend, []);
_ ->
ok
end;
@ -229,37 +294,69 @@ stop_trace_int({Backend, _Filter, _Level} = Trace) ->
end,
ok.
list_all_sinks() ->
sets:to_list(
lists:foldl(fun({_Watcher, _Handler, Sink}, Set) ->
sets:add_element(Sink, Set)
end,
sets:new(),
lager_config:global_get(handlers, []))).
clear_traces_by_sink(Sinks) ->
lists:foreach(fun(S) ->
{Level, _Traces} =
lager_config:get({S, loglevel}),
lager_config:set({S, loglevel},
{Level, []})
end,
Sinks).
clear_all_traces() ->
{Level, _Traces} = lager_config:get(loglevel),
Handlers = lager_config:global_get(handlers, []),
clear_traces_by_sink(list_all_sinks()),
_ = lager_util:trace_filter(none),
lager_config:set(loglevel, {Level, []}),
lists:foreach(fun(Handler) ->
case get_loglevel(Handler) of
none ->
gen_event:delete_handler(lager_event, Handler, []);
_ ->
ok
end
end, gen_event:which_handlers(lager_event)).
lager_config:global_set(handlers,
lists:filter(
fun({Handler, _Watcher, Sink}) ->
case get_loglevel(Sink, Handler) of
none ->
gen_event:delete_handler(Sink, Handler, []),
false;
_ ->
true
end
end, Handlers)).
find_traces(Sinks) ->
lists:foldl(fun(S, Acc) ->
{_Level, Traces} = lager_config:get({S, loglevel}),
Acc ++ lists:map(fun(T) -> {S, T} end, Traces)
end,
[],
Sinks).
status() ->
Handlers = gen_event:which_handlers(lager_event),
TraceCount = case length(element(2, lager_config:get(loglevel))) of
Handlers = lager_config:global_get(handlers, []),
Sinks = lists:sort(list_all_sinks()),
Traces = find_traces(Sinks),
TraceCount = case length(Traces) of
0 -> 1;
N -> N
end,
Status = ["Lager status:\n",
[begin
Level = get_loglevel(Handler),
Level = get_loglevel(Sink, Handler),
case Handler of
{lager_file_backend, File} ->
io_lib:format("File ~s at level ~p\n", [File, Level]);
io_lib:format("File ~s (~s) at level ~p\n", [File, Sink, Level]);
lager_console_backend ->
io_lib:format("Console at level ~p\n", [Level]);
io_lib:format("Console (~s) at level ~p\n", [Sink, Level]);
_ ->
[]
end
end || Handler <- Handlers],
end || {Handler, _Watcher, Sink} <- lists:sort(fun({_, _, S1},
{_, _, S2}) -> S1 =< S2 end,
Handlers)],
"Active Traces:\n",
[begin
LevelName = case Level of
@ -271,9 +368,9 @@ status() ->
Num ->
lager_util:num_to_level(Num)
end,
io_lib:format("Tracing messages matching ~p at level ~p to ~p\n",
[Filter, LevelName, Destination])
end || {Filter, Level, Destination} <- element(2, lager_config:get(loglevel))],
io_lib:format("Tracing messages matching ~p (sink ~s) at level ~p to ~p\n",
[Filter, Sink, LevelName, Destination])
end || {Sink, {Filter, Level, Destination}} <- Traces],
[
"Tracing Reductions:\n",
case ?DEFAULT_TRACER:info('query') of
@ -283,7 +380,7 @@ status() ->
],
[
"Tracing Statistics:\n ",
[ begin
[ begin
[" ", atom_to_list(Table), ": ",
integer_to_list(?DEFAULT_TRACER:info(Table) div TraceCount),
"\n"]
@ -294,21 +391,34 @@ status() ->
%% @doc Set the loglevel for a particular backend.
set_loglevel(Handler, Level) when is_atom(Level) ->
Reply = gen_event:call(lager_event, Handler, {set_loglevel, Level}, infinity),
update_loglevel_config(),
Reply.
set_loglevel(?DEFAULT_SINK, Handler, undefined, Level).
%% @doc Set the loglevel for a particular backend that has multiple identifiers
%% (eg. the file backend).
set_loglevel(Handler, Ident, Level) when is_atom(Level) ->
Reply = gen_event:call(lager_event, {Handler, Ident}, {set_loglevel, Level}, infinity),
update_loglevel_config(),
set_loglevel(?DEFAULT_SINK, Handler, Ident, Level).
%% @doc Set the loglevel for a particular sink's backend that potentially has
%% multiple identifiers. (Use `undefined' if it doesn't have any.)
set_loglevel(Sink, Handler, Ident, Level) when is_atom(Level) ->
HandlerArg = case Ident of
undefined -> Handler;
_ -> {Handler, Ident}
end,
Reply = gen_event:call(Sink, HandlerArg, {set_loglevel, Level}, infinity),
update_loglevel_config(Sink),
Reply.
%% @doc Get the loglevel for a particular backend. In the case that the backend
%% has multiple identifiers, the lowest is returned
%% @doc Get the loglevel for a particular backend on the default sink. In the case that the backend
%% has multiple identifiers, the lowest is returned.
get_loglevel(Handler) ->
case gen_event:call(lager_event, Handler, get_loglevel, infinity) of
get_loglevel(?DEFAULT_SINK, Handler).
%% @doc Get the loglevel for a particular sink's backend. In the case that the backend
%% has multiple identifiers, the lowest is returned.
get_loglevel(Sink, Handler) ->
case gen_event:call(Sink, Handler, get_loglevel, infinity) of
{mask, Mask} ->
case lager_util:mask_to_levels(Mask) of
[] -> none;
@ -330,27 +440,39 @@ posix_error(Error) ->
safe_format_chop("~p", [Error], ?DEFAULT_TRUNCATION).
%% @private
get_loglevels() ->
[gen_event:call(lager_event, Handler, get_loglevel, infinity) ||
Handler <- gen_event:which_handlers(lager_event)].
get_loglevels(Sink) ->
[gen_event:call(Sink, Handler, get_loglevel, infinity) ||
Handler <- gen_event:which_handlers(Sink)].
%% @doc Set the loghwm for the default sink.
set_loghwm(Handler, Hwm) when is_integer(Hwm) ->
set_loghwm(?DEFAULT_SINK, Handler, Hwm).
%% @doc Set the loghwm for a particular backend.
set_loghwm(Sink, Handler, Hwm) when is_integer(Hwm) ->
gen_event:call(Sink, Handler, {set_loghwm, Hwm}, infinity).
%% @doc Set the loghwm (log high water mark) for file backends with multiple identifiers
set_loghwm(Sink, Handler, Ident, Hwm) when is_integer(Hwm) ->
gen_event:call(Sink, {Handler, Ident}, {set_loghwm, Hwm}, infinity).
%% @private
add_trace_to_loglevel_config(Trace) ->
{MinLevel, Traces} = lager_config:get(loglevel),
add_trace_to_loglevel_config(Trace, Sink) ->
{MinLevel, Traces} = lager_config:get({Sink, loglevel}),
case lists:member(Trace, Traces) of
false ->
NewTraces = [Trace|Traces],
_ = lager_util:trace_filter([ element(1, T) || T <- NewTraces]),
lager_config:set(loglevel, {MinLevel, [Trace|Traces]});
lager_config:set({Sink, loglevel}, {MinLevel, [Trace|Traces]});
_ ->
ok
end.
%% @doc recalculate min log level
update_loglevel_config() ->
{_, Traces} = lager_config:get(loglevel),
MinLog = minimum_loglevel(get_loglevels()),
lager_config:set(loglevel, {MinLog, Traces}).
update_loglevel_config(Sink) ->
{_, Traces} = lager_config:get({Sink, loglevel}, {ignore_me, []}),
MinLog = minimum_loglevel(get_loglevels(Sink)),
lager_config:set({Sink, loglevel}, {MinLog, Traces}).
%% @private
minimum_loglevel(Levels) ->
@ -381,49 +503,76 @@ safe_format(Fmt, Args, Limit, Options) ->
safe_format_chop(Fmt, Args, Limit) ->
safe_format(Fmt, Args, Limit, [{chomp, true}]).
%% @private Print the format string `Fmt' with `Args' without a size limit.
%% This is unsafe because the output of this function is unbounded.
%%
%% Log messages with unbounded size will kill your application dead as
%% OTP mechanisms stuggle to cope with them. So this function is
%% intended <b>only</b> for messages which have a reasonable bounded
%% size before they're formatted.
%%
%% If the format string is invalid or not enough arguments are
%% supplied a 'FORMAT ERROR' message is printed instead with the
%% offending arguments. The caller is NOT crashed.
unsafe_format(Fmt, Args) ->
try io_lib:format(Fmt, Args)
catch
_:_ -> io_lib:format("FORMAT ERROR: ~p ~p", [Fmt, Args])
end.
%% @doc Print a record lager found during parse transform
pr(Record, Module) when is_tuple(Record), is_atom(element(1, Record)) ->
pr(Record, Module, []);
pr(Record, _) ->
Record.
%% @doc Print a record lager found during parse transform
pr(Record, Module, Options) when is_tuple(Record), is_atom(element(1, Record)), is_list(Options) ->
try
case is_record_known(Record, Module) of
false ->
Record;
{RecordName, RecordFields} ->
{'$lager_record', RecordName,
zip(RecordFields, tl(tuple_to_list(Record)), Module, [])}
zip(RecordFields, tl(tuple_to_list(Record)), Module, Options, [])}
end
catch
error:undef ->
Record
end;
pr(Record, _) ->
pr(Record, _, _) ->
Record.
zip([FieldName|RecordFields], [FieldValue|Record], Module, ToReturn) ->
zip([FieldName|RecordFields], [FieldValue|Record], Module, Options, ToReturn) ->
Compress = lists:member(compress, Options),
case is_tuple(FieldValue) andalso
tuple_size(FieldValue) > 0 andalso
is_atom(element(1, FieldValue)) andalso
is_record_known(FieldValue, Module) of
false when Compress andalso FieldValue =:= undefined ->
zip(RecordFields, Record, Module, Options, ToReturn);
false ->
zip(RecordFields, Record, Module, [{FieldName, FieldValue}|ToReturn]);
zip(RecordFields, Record, Module, Options, [{FieldName, FieldValue}|ToReturn]);
_Else ->
F = {FieldName, pr(FieldValue, Module)},
zip(RecordFields, Record, Module, [F|ToReturn])
F = {FieldName, pr(FieldValue, Module, Options)},
zip(RecordFields, Record, Module, Options, [F|ToReturn])
end;
zip([], [], _Module, ToReturn) ->
zip([], [], _Module, _Compress, ToReturn) ->
lists:reverse(ToReturn).
is_record_known(Record, Module) ->
is_record_known(Record, Module) ->
Name = element(1, Record),
Attrs = Module:module_info(attributes),
case lists:keyfind(lager_records, 1, Attrs) of
false -> false;
{lager_records, Records} ->
{lager_records, Records} ->
case lists:keyfind(Name, 1, Records) of
false -> false;
{Name, RecordFields} ->
{Name, RecordFields} ->
case (tuple_size(Record) - 1) =:= length(RecordFields) of
false -> false;
true -> {Name, RecordFields}
end
end
end.

+ 195
- 69
src/lager_app.erl Ver arquivo

@ -27,93 +27,193 @@
-endif.
-export([start/0,
start/2,
start_handler/3,
stop/1]).
-define(FILENAMES, '__lager_file_backend_filenames').
-define(THROTTLE, lager_backend_throttle).
-define(DEFAULT_HANDLER_CONF,
[{lager_console_backend, info},
{lager_file_backend,
[{file, "log/error.log"}, {level, error},
{size, 10485760}, {date, "$D0"}, {count, 5}]
},
{lager_file_backend,
[{file, "log/console.log"}, {level, info},
{size, 10485760}, {date, "$D0"}, {count, 5}]
}
]).
start() ->
application:start(lager).
start(_StartType, _StartArgs) ->
{ok, Pid} = lager_sup:start_link(),
start_throttle(Sink, Threshold, Window) ->
_ = supervisor:start_child(lager_handler_watcher_sup,
[Sink, ?THROTTLE, [Threshold, Window]]),
ok.
determine_async_behavior(_Sink, {ok, undefined}, _Window) ->
ok;
determine_async_behavior(_Sink, undefined, _Window) ->
ok;
determine_async_behavior(_Sink, {ok, Threshold}, _Window) when not is_integer(Threshold) orelse Threshold < 0 ->
error_logger:error_msg("Invalid value for 'async_threshold': ~p~n",
[Threshold]),
throw({error, bad_config});
determine_async_behavior(Sink, {ok, Threshold}, undefined) ->
start_throttle(Sink, Threshold, erlang:trunc(Threshold * 0.2));
determine_async_behavior(_Sink, {ok, Threshold}, {ok, Window}) when not is_integer(Window) orelse Window > Threshold orelse Window < 0 ->
error_logger:error_msg(
"Invalid value for 'async_threshold_window': ~p~n", [Window]),
throw({error, bad_config});
determine_async_behavior(Sink, {ok, Threshold}, {ok, Window}) ->
start_throttle(Sink, Threshold, Window).
case application:get_env(lager, async_threshold) of
undefined ->
ok;
{ok, undefined} ->
undefined;
{ok, Threshold} when is_integer(Threshold), Threshold >= 0 ->
DefWindow = erlang:trunc(Threshold * 0.2), % maybe 0?
ThresholdWindow =
case application:get_env(lager, async_threshold_window) of
start_handlers(_Sink, undefined) ->
ok;
start_handlers(_Sink, Handlers) when not is_list(Handlers) ->
error_logger:error_msg(
"Invalid value for 'handlers' (must be list): ~p~n", [Handlers]),
throw({error, bad_config});
start_handlers(Sink, Handlers) ->
%% handlers failing to start are handled in the handler_watcher
lager_config:global_set(handlers,
lager_config:global_get(handlers, []) ++
lists:map(fun({Module, Config}) ->
check_handler_config(Module, Config),
start_handler(Sink, Module, Config);
(_) ->
throw({error, bad_config})
end,
expand_handlers(Handlers))),
ok.
start_handler(Sink, Module, Config) ->
{ok, Watcher} = supervisor:start_child(lager_handler_watcher_sup,
[Sink, Module, Config]),
{Module, Watcher, Sink}.
check_handler_config({lager_file_backend, F}, Config) when is_list(Config) ->
Fs = case get(?FILENAMES) of
undefined -> ordsets:new();
X -> X
end,
case ordsets:is_element(F, Fs) of
true ->
error_logger:error_msg(
"Cannot have same file (~p) in multiple file backends~n", [F]),
throw({error, bad_config});
false ->
put(?FILENAMES,
ordsets:add_element(F, Fs))
end,
ok;
check_handler_config(_Handler, Config) when is_list(Config) orelse is_atom(Config) ->
ok;
check_handler_config(Handler, _BadConfig) ->
throw({error, {bad_config, Handler}}).
clean_up_config_checks() ->
erase(?FILENAMES).
interpret_hwm(undefined) ->
undefined;
interpret_hwm({ok, undefined}) ->
undefined;
interpret_hwm({ok, HWM}) when not is_integer(HWM) orelse HWM < 0 ->
_ = lager:log(warning, self(), "Invalid error_logger high water mark: ~p, disabling", [HWM]),
undefined;
interpret_hwm({ok, HWM}) ->
HWM.
start_error_logger_handler({ok, false}, _HWM, _Whitelist) ->
[];
start_error_logger_handler(_, HWM, undefined) ->
start_error_logger_handler(ignore_me, HWM, {ok, []});
start_error_logger_handler(_, HWM, {ok, WhiteList}) ->
GlStrategy = case application:get_env(lager, error_logger_groupleader_strategy) of
undefined ->
DefWindow;
{ok, Window} when is_integer(Window), Window < Threshold, Window >= 0 ->
Window;
{ok, BadWindow} ->
handle;
{ok, GlStrategy0} when
GlStrategy0 =:= handle;
GlStrategy0 =:= ignore;
GlStrategy0 =:= mirror ->
GlStrategy0;
{ok, BadGlStrategy} ->
error_logger:error_msg(
"Invalid value for 'async_threshold_window': ~p~n", [BadWindow]),
"Invalid value for 'error_logger_groupleader_strategy': ~p~n",
[BadGlStrategy]),
throw({error, bad_config})
end,
_ = supervisor:start_child(lager_handler_watcher_sup,
[lager_event, lager_backend_throttle, [Threshold, ThresholdWindow]]),
ok;
{ok, BadThreshold} ->
error_logger:error_msg("Invalid value for 'async_threshold': ~p~n", [BadThreshold]),
throw({error, bad_config})
end,
Handlers = case application:get_env(lager, handlers) of
undefined ->
[{lager_console_backend, info},
{lager_file_backend, [{file, "log/error.log"}, {level, error}, {size, 10485760}, {date, "$D0"}, {count, 5}]},
{lager_file_backend, [{file, "log/console.log"}, {level, info}, {size, 10485760}, {date, "$D0"}, {count, 5}]}];
{ok, Val} ->
Val
end,
case supervisor:start_child(lager_handler_watcher_sup, [error_logger, error_logger_lager_h, [HWM, GlStrategy]]) of
{ok, _} ->
[begin error_logger:delete_report_handler(X), X end ||
X <- gen_event:which_handlers(error_logger) -- [error_logger_lager_h | WhiteList]];
{error, _} ->
[]
end.
%% handlers failing to start are handled in the handler_watcher
_ = [supervisor:start_child(lager_handler_watcher_sup, [lager_event, Module, Config]) ||
{Module, Config} <- expand_handlers(Handlers)],
%% `determine_async_behavior/3' is called with the results from either
%% `application:get_env/2' and `proplists:get_value/2'. Since
%% `application:get_env/2' wraps a successful retrieval in an `{ok,
%% Value}' tuple, do the same for the result from
%% `proplists:get_value/2'.
wrap_proplist_value(undefined) ->
undefined;
wrap_proplist_value(Value) ->
{ok, Value}.
configure_sink(Sink, SinkDef) ->
lager_config:new_sink(Sink),
ChildId = lager_util:make_internal_sink_name(Sink),
_ = supervisor:start_child(lager_sup,
{ChildId,
{gen_event, start_link,
[{local, Sink}]},
permanent, 5000, worker, dynamic}),
determine_async_behavior(Sink,
wrap_proplist_value(
proplists:get_value(async_threshold, SinkDef)),
wrap_proplist_value(
proplists:get_value(async_threshold_window, SinkDef))
),
start_handlers(Sink,
proplists:get_value(handlers, SinkDef, [])),
lager:update_loglevel_config(Sink).
configure_extra_sinks(Sinks) ->
lists:foreach(fun({Sink, Proplist}) -> configure_sink(Sink, Proplist) end,
Sinks).
start(_StartType, _StartArgs) ->
{ok, Pid} = lager_sup:start_link(),
%% Handle the default sink.
determine_async_behavior(?DEFAULT_SINK,
application:get_env(lager, async_threshold),
application:get_env(lager, async_threshold_window)),
start_handlers(?DEFAULT_SINK,
application:get_env(lager, handlers, ?DEFAULT_HANDLER_CONF)),
ok = add_configured_traces(),
%% mask the messages we have no use for
lager:update_loglevel_config(),
HighWaterMark = case application:get_env(lager, error_logger_hwm) of
{ok, undefined} ->
undefined;
{ok, HwmVal} when is_integer(HwmVal), HwmVal > 0 ->
HwmVal;
{ok, BadVal} ->
_ = lager:log(warning, self(), "Invalid error_logger high water mark: ~p, disabling", [BadVal]),
undefined;
undefined ->
undefined
end,
lager:update_loglevel_config(?DEFAULT_SINK),
SavedHandlers =
case application:get_env(lager, error_logger_redirect) of
{ok, false} ->
[];
_ ->
WhiteList = case application:get_env(lager, error_logger_whitelist) of
undefined ->
[];
{ok, WhiteList0} ->
WhiteList0
end,
SavedHandlers = start_error_logger_handler(
application:get_env(lager, error_logger_redirect),
interpret_hwm(application:get_env(lager, error_logger_hwm)),
application:get_env(lager, error_logger_whitelist)
),
case supervisor:start_child(lager_handler_watcher_sup, [error_logger, error_logger_lager_h, [HighWaterMark]]) of
{ok, _} ->
[begin error_logger:delete_report_handler(X), X end ||
X <- gen_event:which_handlers(error_logger) -- [error_logger_lager_h | WhiteList]];
{error, _} ->
[]
end
end,
_ = lager_util:trace_filter(none),
%% Now handle extra sinks
configure_extra_sinks(application:get_env(lager, extra_sinks, [])),
_ = lager_util:trace_filter(none),
clean_up_config_checks(),
{ok, Pid, SavedHandlers}.
@ -153,7 +253,7 @@ add_configured_traces() ->
maybe_make_handler_id(Mod, Config) ->
%% Allow the backend to generate a gen_event handler id, if it wants to.
%% We don't use erlang:function_exported here because that requires the module
%% We don't use erlang:function_exported here because that requires the module
%% already be loaded, which is unlikely at this phase of startup. Using code:load
%% caused undesireable side-effects with generating code-coverage reports.
try Mod:config_to_id(Config) of
@ -229,4 +329,30 @@ application_config_mangling_test_() ->
)
}
].
check_handler_config_test_() ->
Good = expand_handlers(?DEFAULT_HANDLER_CONF),
Bad = expand_handlers([{lager_console_backend, info},
{lager_file_backend, [{file, "same_file.log"}]},
{lager_file_backend, [{file, "same_file.log"}, {level, info}]}]),
AlsoBad = [{lager_logstash_backend,
{level, info},
{output, {udp, "localhost", 5000}},
{format, json},
{json_encoder, jiffy}}],
BadToo = [{fail, {fail}}],
[
{"lager_file_backend_good",
?_assertEqual([ok, ok, ok], [ check_handler_config(M,C) || {M,C} <- Good ])
},
{"lager_file_backend_bad",
?_assertThrow({error, bad_config}, [ check_handler_config(M,C) || {M,C} <- Bad ])
},
{"Invalid config dies",
?_assertThrow({error, bad_config}, start_handlers(foo, AlsoBad))
},
{"Invalid config dies",
?_assertThrow({error, {bad_config, _}}, start_handlers(foo, BadToo))
}
].
-endif.

+ 32
- 5
src/lager_backend_throttle.erl Ver arquivo

@ -29,15 +29,27 @@
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
%%
%% Allow test code to verify that we're doing the needful.
-ifdef(TEST).
-define(ETS_TABLE, async_threshold_test).
-define(TOGGLE_SYNC(), test_increment(sync_toggled)).
-define(TOGGLE_ASYNC(), test_increment(async_toggled)).
-else.
-define(TOGGLE_SYNC(), true).
-define(TOGGLE_ASYNC(), true).
-endif.
-record(state, {
sink :: atom(),
hwm :: non_neg_integer(),
window_min :: non_neg_integer(),
async = true :: boolean()
}).
init([Hwm, Window]) ->
lager_config:set(async, true),
{ok, #state{hwm=Hwm, window_min=Hwm - Window}}.
init([{sink, Sink}, Hwm, Window]) ->
lager_config:set({Sink, async}, true),
{ok, #state{sink=Sink, hwm=Hwm, window_min=Hwm - Window}}.
handle_call(get_loglevel, State) ->
@ -52,11 +64,13 @@ handle_event({log, _Message},State) ->
case {Len > State#state.hwm, Len < State#state.window_min, State#state.async} of
{true, _, true} ->
%% need to flip to sync mode
lager_config:set(async, false),
?TOGGLE_SYNC(),
lager_config:set({State#state.sink, async}, false),
{ok, State#state{async=false}};
{_, true, false} ->
%% need to flip to async mode
lager_config:set(async, true),
?TOGGLE_ASYNC(),
lager_config:set({State#state.sink, async}, true),
{ok, State#state{async=true}};
_ ->
%% nothing needs to change
@ -76,3 +90,16 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-ifdef(TEST).
test_get(Key) ->
get_default(ets:lookup(?ETS_TABLE, Key)).
test_increment(Key) ->
ets:insert(?ETS_TABLE,
{Key, test_get(Key) + 1}).
get_default([]) ->
0;
get_default([{_Key, Value}]) ->
Value.
-endif.

+ 39
- 16
src/lager_config.erl Ver arquivo

@ -20,9 +20,16 @@
-include("lager.hrl").
-export([new/0, get/1, get/2, set/2]).
-export([new/0, new_sink/1, get/1, get/2, set/2,
global_get/1, global_get/2, global_set/2]).
-define(TBL, lager_config).
-define(GLOBAL, '_global').
%% For multiple sinks, the key is now the registered event name and the old key
%% as a tuple.
%%
%% {{lager_event, loglevel}, Value} instead of {loglevel, Value}
new() ->
%% set up the ETS configuration table
@ -33,32 +40,48 @@ new() ->
error:badarg ->
?INT_LOG(warning, "Table ~p already exists", [?TBL])
end,
new_sink(?DEFAULT_SINK),
%% Need to be able to find the `lager_handler_watcher' for all handlers
ets:insert_new(?TBL, {{?GLOBAL, handlers}, []}),
ok.
new_sink(Sink) ->
%% use insert_new here so that if we're in an appup we don't mess anything up
%%
%% until lager is completely started, allow all messages to go through
ets:insert_new(?TBL, {loglevel, {element(2, lager_util:config_to_mask(debug)), []}}),
ok.
ets:insert_new(?TBL, {{Sink, loglevel}, {element(2, lager_util:config_to_mask(debug)), []}}).
global_get(Key) ->
global_get(Key, undefined).
global_get(Key, Default) ->
get({?GLOBAL, Key}, Default).
global_set(Key, Value) ->
set({?GLOBAL, Key}, Value).
get({_Sink, _Key}=FullKey) ->
get(FullKey, undefined);
get(Key) ->
case ets:lookup(?TBL, Key) of
[] ->
undefined;
[{Key, Res}] ->
Res
end.
get({?DEFAULT_SINK, Key}, undefined).
get(Key, Default) ->
try ?MODULE:get(Key) of
undefined ->
get({Sink, Key}, Default) ->
try
case ets:lookup(?TBL, {Sink, Key}) of
[] ->
Default;
Res ->
[{{Sink, Key}, Res}] ->
Res
end
catch
_:_ ->
Default
end.
end;
get(Key, Default) ->
get({?DEFAULT_SINK, Key}, Default).
set({Sink, Key}, Value) ->
ets:insert(?TBL, {{Sink, Key}, Value});
set(Key, Value) ->
ets:insert(?TBL, {Key, Value}).
set({?DEFAULT_SINK, Key}, Value).

+ 9
- 8
src/lager_console_backend.erl Ver arquivo

@ -38,6 +38,8 @@
-define(TERSE_FORMAT,[time, " ", color, "[", severity,"] ", message]).
%% @private
init([Level]) when is_atom(Level) ->
init(Level);
init([Level, true]) -> % for backwards compatibility
init([Level,{lager_default_formatter,[{eol, eol()}]}]);
init([Level,false]) -> % for backwards compatibility
@ -75,7 +77,6 @@ init([Level,{Formatter,FormatterConfig}]) when is_atom(Formatter) ->
init(Level) ->
init([Level,{lager_default_formatter,?TERSE_FORMAT ++ [eol()]}]).
%% @private
handle_call(get_loglevel, #state{level=Level} = State) ->
{ok, Level, State};
@ -186,7 +187,7 @@ console_log_test_() ->
register(user, Pid),
erlang:group_leader(Pid, whereis(lager_event)),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}),
lager:log(info, self(), "Test message"),
receive
{io_request, From, ReplyAs, {put_chars, unicode, Msg}} ->
@ -206,7 +207,7 @@ console_log_test_() ->
register(user, Pid),
erlang:group_leader(Pid, whereis(lager_event)),
gen_event:add_handler(lager_event, lager_console_backend, [info, true]),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}),
lager:info("Test message"),
PidStr = pid_to_list(self()),
receive
@ -228,7 +229,7 @@ console_log_test_() ->
gen_event:add_handler(lager_event, lager_console_backend,
[info, {lager_default_formatter, [date,"#",time,"#",severity,"#",node,"#",pid,"#",
module,"#",function,"#",file,"#",line,"#",message,"\r\n"]}]),
lager_config:set(loglevel, {?INFO, []}),
lager_config:set({lager_event, loglevel}, {?INFO, []}),
lager:info("Test message"),
PidStr = pid_to_list(self()),
NodeStr = atom_to_list(node()),
@ -251,7 +252,7 @@ console_log_test_() ->
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
erlang:group_leader(Pid, whereis(lager_event)),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}),
lager:debug("Test message"),
receive
{io_request, From, ReplyAs, {put_chars, unicode, _Msg}} ->
@ -280,7 +281,7 @@ console_log_test_() ->
unregister(user),
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}),
erlang:group_leader(Pid, whereis(lager_event)),
lager:debug("Test message"),
receive
@ -319,7 +320,7 @@ console_log_test_() ->
unregister(user),
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}),
lager:set_loglevel(lager_console_backend, '!=info'),
erlang:group_leader(Pid, whereis(lager_event)),
lager:debug("Test message"),
@ -350,7 +351,7 @@ console_log_test_() ->
unregister(user),
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager_config:set({lager_event, loglevel}, {element(2, lager_util:config_to_mask(info)), []}),
lager:set_loglevel(lager_console_backend, '=debug'),
erlang:group_leader(Pid, whereis(lager_event)),
lager:debug("Test message"),

+ 4
- 1
src/lager_default_formatter.erl Ver arquivo

@ -40,7 +40,7 @@
%% or refer to other properties, if desired. You can also use a {atom, semi-iolist(), semi-iolist()} formatter, which
%% acts like a ternary operator's true/false branches.
%%
%% The metadata properties date,time, message, and severity will always exist.
%% The metadata properties date,time, message, severity, and sev will always exist.
%% The properties pid, file, line, module, and function will always exist if the parser transform is used.
%%
%% Example:
@ -86,6 +86,9 @@ output(time,Msg) ->
T;
output(severity,Msg) ->
atom_to_list(lager_msg:severity(Msg));
output(sev,Msg) ->
%% Write brief acronym for the severity level (e.g. debug -> $D)
[lager_util:level_to_chr(lager_msg:severity(Msg))];
output(Prop,Msg) when is_atom(Prop) ->
Metadata = lager_msg:metadata(Msg),
make_printable(get_metadata(Prop,Metadata,<<"Undefined">>));

+ 73
- 14
src/lager_file_backend.erl Ver arquivo

@ -63,6 +63,7 @@
size = 0 :: integer(),
date :: undefined | string(),
count = 10 :: integer(),
shaper :: lager_shaper(),
formatter :: atom(),
formatter_config :: any(),
sync_on :: {'mask', integer()},
@ -74,7 +75,8 @@
-type option() :: {file, string()} | {level, lager:log_level()} |
{size, non_neg_integer()} | {date, string()} |
{count, non_neg_integer()} | {sync_interval, non_neg_integer()} |
{count, non_neg_integer()} | {high_water_mark, non_neg_integer()} |
{sync_interval, non_neg_integer()} |
{sync_size, non_neg_integer()} | {sync_on, lager:log_level()} |
{check_interval, non_neg_integer()} | {formatter, atom()} |
{formatter_config, term()}.
@ -102,11 +104,12 @@ init(LogFileConfig) when is_list(LogFileConfig) ->
{error, {fatal, bad_config}};
Config ->
%% probabably a better way to do this, but whatever
[RelName, Level, Date, Size, Count, SyncInterval, SyncSize, SyncOn, CheckInterval, Formatter, FormatterConfig] =
[proplists:get_value(Key, Config) || Key <- [file, level, date, size, count, sync_interval, sync_size, sync_on, check_interval, formatter, formatter_config]],
[RelName, Level, Date, Size, Count, HighWaterMark, SyncInterval, SyncSize, SyncOn, CheckInterval, Formatter, FormatterConfig] =
[proplists:get_value(Key, Config) || Key <- [file, level, date, size, count, high_water_mark, sync_interval, sync_size, sync_on, check_interval, formatter, formatter_config]],
Name = lager_util:expand_path(RelName),
schedule_rotation(Name, Date),
State0 = #state{name=Name, level=Level, size=Size, date=Date, count=Count, formatter=Formatter,
Shaper = #lager_shaper{hwm=HighWaterMark},
State0 = #state{name=Name, level=Level, size=Size, date=Date, count=Count, shaper=Shaper, formatter=Formatter,
formatter_config=FormatterConfig, sync_on=SyncOn, sync_interval=SyncInterval, sync_size=SyncSize,
check_interval=CheckInterval},
State = case lager_util:open_logfile(Name, {SyncSize, SyncInterval}) of
@ -130,15 +133,42 @@ handle_call({set_loglevel, Level}, #state{name=Ident} = State) ->
end;
handle_call(get_loglevel, #state{level=Level} = State) ->
{ok, Level, State};
handle_call({set_loghwm, Hwm}, #state{shaper=Shaper, name=Name} = State) ->
case validate_logfile_proplist([{file, Name}, {high_water_mark, Hwm}]) of
false ->
{ok, {error, bad_log_hwm}, State};
_ ->
NewShaper = Shaper#lager_shaper{hwm=Hwm},
?INT_LOG(notice, "Changed loghwm of ~s to ~p", [Name, Hwm]),
{ok, {last_loghwm, Shaper#lager_shaper.hwm}, State#state{shaper=NewShaper}}
end;
handle_call(_Request, State) ->
{ok, ok, State}.
%% @private
handle_event({log, Message},
#state{name=Name, level=L,formatter=Formatter,formatter_config=FormatConfig} = State) ->
#state{name=Name, level=L, shaper=Shaper, formatter=Formatter,formatter_config=FormatConfig} = State) ->
case lager_util:is_loggable(Message,L,{lager_file_backend, Name}) of
true ->
{ok,write(State, lager_msg:timestamp(Message), lager_msg:severity_as_int(Message), Formatter:format(Message,FormatConfig)) };
case lager_util:check_hwm(Shaper) of
{true, Drop, #lager_shaper{hwm=Hwm} = NewShaper} ->
NewState = case Drop > 0 of
true ->
Report = io_lib:format(
"lager_file_backend dropped ~p messages in the last second that exceeded the limit of ~p messages/sec",
[Drop, Hwm]),
ReportMsg = lager_msg:new(Report, warning, [], []),
write(State, lager_msg:timestamp(ReportMsg),
lager_msg:severity_as_int(ReportMsg), Formatter:format(ReportMsg, FormatConfig));
false ->
State
end,
{ok,write(NewState#state{shaper=NewShaper},
lager_msg:timestamp(Message), lager_msg:severity_as_int(Message),
Formatter:format(Message,FormatConfig))};
{false, _, NewShaper} ->
{ok, State#state{shaper=NewShaper}}
end;
false ->
{ok, State}
end;
@ -164,7 +194,7 @@ terminate(_Reason, #state{fd=FD}) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% @private convert the config into a gen_event handler ID
%% Convert the config into a gen_event handler ID
config_to_id({Name,_Severity}) when is_list(Name) ->
{?MODULE, Name};
config_to_id({Name,_Severity,_Size,_Rotation,_Count}) ->
@ -237,7 +267,7 @@ do_write(#state{fd=FD, name=Name, flap=Flap} = State, Level, Msg) ->
Flap
end,
State#state{flap=Flap2};
_ ->
_ ->
State
end.
@ -301,6 +331,13 @@ validate_logfile_proplist([{count, Count}|Tail], Acc) ->
_ ->
throw({bad_config, "Invalid rotation count", Count})
end;
validate_logfile_proplist([{high_water_mark, HighWaterMark}|Tail], Acc) ->
case HighWaterMark of
Hwm when is_integer(Hwm), Hwm >= 0 ->
validate_logfile_proplist(Tail, [{high_water_mark, Hwm}|Acc]);
_ ->
throw({bad_config, "Invalid high water mark", HighWaterMark})
end;
validate_logfile_proplist([{date, Date}|Tail], Acc) ->
case lager_util:parse_rotation_date_spec(Date) of
{ok, Spec} ->
@ -666,8 +703,8 @@ filesystem_test_() ->
{"test.log", critical}),
lager:error("Test message"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
{Level, _} = lager_config:get(loglevel),
lager_config:set(loglevel, {Level, [{[{module,
{Level, _} = lager_config:get({lager_event, loglevel}),
lager_config:set({lager_event, loglevel}, {Level, [{[{module,
?MODULE}], ?DEBUG,
{lager_file_backend, "test.log"}}]}),
lager:error("Test message"),
@ -684,8 +721,8 @@ filesystem_test_() ->
{ok, Bin1} = file:read_file("test.log"),
?assertMatch([_, _, "[critical]", _, "Test message\n"], re:split(Bin1, " ", [{return, list}, {parts, 5}])),
ok = file:delete("test.log"),
{Level, _} = lager_config:get(loglevel),
lager_config:set(loglevel, {Level, [{[{module,
{Level, _} = lager_config:get({lager_event, loglevel}),
lager_config:set({lager_event, loglevel}, {Level, [{[{module,
?MODULE}], ?DEBUG,
{lager_file_backend, "test.log"}}]}),
lager:critical("Test message"),
@ -708,12 +745,31 @@ filesystem_test_() ->
?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin3, " ", [{return, list}, {parts, 5}]))
end
},
{"tracing to a dedicated file should work even if root_log is set",
fun() ->
{ok, P} = file:get_cwd(),
file:delete(P ++ "/test_root_log/foo.log"),
application:set_env(lager, log_root, P++"/test_root_log"),
{ok, _} = lager:trace_file("foo.log", [{module, ?MODULE}]),
lager:error("Test message"),
%% not elegible for trace
lager:log(error, self(), "Test message"),
{ok, Bin3} = file:read_file(P++"/test_root_log/foo.log"),
application:unset_env(lager, log_root),
?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin3, " ", [{return, list}, {parts, 5}]))
end
},
{"tracing with options should work",
fun() ->
file:delete("foo.log"),
{ok, _} = lager:trace_file("foo.log", [{module, ?MODULE}], [{size, 20}, {check_interval, 1}]),
{ok, _} = lager:trace_file("foo.log", [{module, ?MODULE}], [{size, 20}, {check_interval, 1}]),
lager:error("Test message"),
?assertNot(filelib:is_regular("foo.log.0")),
%% rotation is sensitive to intervals between
%% writes so we sleep to exceed the 1
%% millisecond interval specified by
%% check_interval above
timer:sleep(2),
lager:error("Test message"),
timer:sleep(10),
?assert(filelib:is_regular("foo.log.0"))
@ -769,6 +825,10 @@ config_validation_test_() ->
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {count, infinity}]))
},
{"bad high water mark",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {high_water_mark, infinity}]))
},
{"bad date",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {date, "midnight"}]))
@ -809,4 +869,3 @@ config_validation_test_() ->
-endif.

+ 29
- 19
src/lager_handler_watcher.erl Ver arquivo

@ -38,18 +38,18 @@
-record(state, {
module :: atom(),
config :: any(),
event :: pid() | atom()
sink :: pid() | atom()
}).
start_link(Event, Module, Config) ->
gen_server:start_link(?MODULE, [Event, Module, Config], []).
start_link(Sink, Module, Config) ->
gen_server:start_link(?MODULE, [Sink, Module, Config], []).
start(Event, Module, Config) ->
gen_server:start(?MODULE, [Event, Module, Config], []).
start(Sink, Module, Config) ->
gen_server:start(?MODULE, [Sink, Module, Config], []).
init([Event, Module, Config]) ->
install_handler(Event, Module, Config),
{ok, #state{event=Event, module=Module, config=Config}}.
init([Sink, Module, Config]) ->
install_handler(Sink, Module, Config),
{ok, #state{sink=Sink, module=Module, config=Config}}.
handle_call(_Call, _From, State) ->
{reply, ok, State}.
@ -62,18 +62,18 @@ handle_info({gen_event_EXIT, Module, normal}, #state{module=Module} = State) ->
handle_info({gen_event_EXIT, Module, shutdown}, #state{module=Module} = State) ->
{stop, normal, State};
handle_info({gen_event_EXIT, Module, Reason}, #state{module=Module,
config=Config, event=Event} = State) ->
config=Config, sink=Sink} = State) ->
case lager:log(error, self(), "Lager event handler ~p exited with reason ~s",
[Module, error_logger_lager_h:format_reason(Reason)]) of
ok ->
install_handler(Event, Module, Config);
install_handler(Sink, Module, Config);
{error, _} ->
%% lager is not working, so installing a handler won't work
ok
end,
{noreply, State};
handle_info(reinstall_handler, #state{module=Module, config=Config, event=Event} = State) ->
install_handler(Event, Module, Config),
handle_info(reinstall_handler, #state{module=Module, config=Config, sink=Sink} = State) ->
install_handler(Sink, Module, Config),
{noreply, State};
handle_info(stop, State) ->
{stop, normal, State};
@ -87,23 +87,33 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% internal
install_handler(Event, Module, Config) ->
case gen_event:add_sup_handler(Event, Module, Config) of
install_handler(Sink, lager_backend_throttle, Config) ->
%% The lager_backend_throttle needs to know to which sink it is
%% attached, hence this admittedly ugly workaround. Handlers are
%% sensitive to the structure of the configuration sent to `init',
%% sadly, so it's not trivial to add a configuration item to be
%% ignored to backends without breaking 3rd party handlers.
install_handler2(Sink, lager_backend_throttle, [{sink, Sink}|Config]);
install_handler(Sink, Module, Config) ->
install_handler2(Sink, Module, Config).
%% private
install_handler2(Sink, Module, Config) ->
case gen_event:add_sup_handler(Sink, Module, Config) of
ok ->
?INT_LOG(debug, "Lager installed handler ~p into ~p", [Module, Event]),
lager:update_loglevel_config(),
?INT_LOG(debug, "Lager installed handler ~p into ~p", [Module, Sink]),
lager:update_loglevel_config(Sink),
ok;
{error, {fatal, Reason}} ->
?INT_LOG(error, "Lager fatally failed to install handler ~p into"
" ~p, NOT retrying: ~p", [Module, Event, Reason]),
" ~p, NOT retrying: ~p", [Module, Sink, Reason]),
%% tell ourselves to stop
self() ! stop,
ok;
Error ->
%% try to reinstall it later
?INT_LOG(error, "Lager failed to install handler ~p into"
" ~p, retrying later : ~p", [Module, Event, Error]),
" ~p, retrying later : ~p", [Module, Sink, Error]),
erlang:send_after(5000, self(), reinstall_handler),
ok
end.

+ 6
- 1
src/lager_sup.erl Ver arquivo

@ -34,9 +34,14 @@ start_link() ->
init([]) ->
%% set up the config, is safe even during relups
lager_config:new(),
%% TODO:
%% Always start lager_event as the default and make sure that
%% other gen_event stuff can start up as needed
%%
%% Maybe a new API to handle the sink and its policy?
Children = [
{lager, {gen_event, start_link, [{local, lager_event}]},
permanent, 5000, worker, [dynamic]},
permanent, 5000, worker, dynamic},
{lager_handler_watcher_sup, {lager_handler_watcher_sup, start_link, []},
permanent, 5000, supervisor, [lager_handler_watcher_sup]}],

+ 153
- 126
src/lager_transform.erl Ver arquivo

@ -31,8 +31,10 @@
parse_transform(AST, Options) ->
TruncSize = proplists:get_value(lager_truncation_size, Options, ?DEFAULT_TRUNCATION),
Enable = proplists:get_value(lager_print_records_flag, Options, true),
Sinks = [lager] ++ proplists:get_value(lager_extra_sinks, Options, []),
put(print_records_flag, Enable),
put(truncation_size, TruncSize),
put(sinks, Sinks),
erlang:put(records, []),
%% .app file should either be in the outdir, or the same dir as the source file
guess_application(proplists:get_value(outdir, Options), hd(AST)),
@ -75,138 +77,163 @@ walk_clauses(Acc, [{clause, Line, Arguments, Guards, Body}|T]) ->
walk_body(Acc, []) ->
lists:reverse(Acc);
walk_body(Acc, [H|T]) ->
walk_body([transform_statement(H)|Acc], T).
walk_body([transform_statement(H, get(sinks))|Acc], T).
transform_statement({call, Line, {remote, _Line1, {atom, _Line2, lager},
{atom, _Line3, Severity}}, Arguments0} = Stmt) ->
case lists:member(Severity, ?LEVELS) of
transform_statement({call, Line, {remote, _Line1, {atom, _Line2, Module},
{atom, _Line3, Function}}, Arguments0} = Stmt,
Sinks) ->
case lists:member(Module, Sinks) of
true ->
SeverityAsInt=lager_util:level_to_num(Severity),
DefaultAttrs0 = {cons, Line, {tuple, Line, [
{atom, Line, module}, {atom, Line, get(module)}]},
{cons, Line, {tuple, Line, [
{atom, Line, function}, {atom, Line, get(function)}]},
{cons, Line, {tuple, Line, [
{atom, Line, line},
{integer, Line, Line}]},
{cons, Line, {tuple, Line, [
{atom, Line, pid},
{call, Line, {atom, Line, pid_to_list}, [
{call, Line, {atom, Line ,self}, []}]}]},
{cons, Line, {tuple, Line, [
{atom, Line, node},
{call, Line, {atom, Line, node}, []}]},
%% get the metadata with lager:md(), this will always return a list so we can use it as the tail here
{call, Line, {remote, Line, {atom, Line, lager}, {atom, Line, md}}, []}}}}}},
%{nil, Line}}}}}}},
DefaultAttrs = case erlang:get(application) of
undefined ->
DefaultAttrs0;
App ->
%% stick the application in the attribute list
concat_lists({cons, Line, {tuple, Line, [
{atom, Line, application},
{atom, Line, App}]},
{nil, Line}}, DefaultAttrs0)
end,
{Traces, Message, Arguments} = case Arguments0 of
[Format] ->
{DefaultAttrs, Format, {atom, Line, none}};
[Arg1, Arg2] ->
%% some ambiguity here, figure out if these arguments are
%% [Format, Args] or [Attr, Format].
%% The trace attributes will be a list of tuples, so check
%% for that.
case {element(1, Arg1), Arg1} of
{_, {cons, _, {tuple, _, _}, _}} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
{Type, _} when Type == var;
Type == lc;
Type == call;
Type == record_field ->
%% crap, its not a literal. look at the second
%% argument to see if it is a string
case Arg2 of
{string, _, _} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
_ ->
%% not a string, going to have to guess
%% it's the argument list
{DefaultAttrs, Arg1, Arg2}
end;
_ ->
{DefaultAttrs, Arg1, Arg2}
end;
[Attrs, Format, Args] ->
{concat_lists(Attrs, DefaultAttrs), Format, Args}
end,
%% Generate some unique variable names so we don't accidentaly export from case clauses.
%% Note that these are not actual atoms, but the AST treats variable names as atoms.
LevelVar = make_varname("__Level", Line),
TracesVar = make_varname("__Traces", Line),
PidVar = make_varname("__Pid", Line),
%% Wrap the call to lager_dispatch log in a case that will avoid doing any work if this message is not elegible for logging
%% case {whereis(lager_event(lager_event), lager_config:get(loglevel, {?LOG_NONE, []})} of
{'case', Line,
{tuple, Line,
[{call, Line, {atom, Line, whereis}, [{atom, Line, lager_event}]},
{call, Line, {remote, Line, {atom, Line, lager_config}, {atom, Line, get}}, [{atom, Line, loglevel}, {tuple, Line, [{integer, Line, 0},{nil, Line}]}]}]},
[
%% {undefined, _} -> {error, lager_not_running}
{clause, Line,
[{tuple, Line, [{atom, Line, undefined}, {var, Line, '_'}]}],
[],
%% trick the linter into avoiding a 'term constructed by not used' error:
%% (fun() -> {error, lager_not_running} end)();
[{call, Line, {'fun', Line, {clauses, [{clause, Line, [],[], [{tuple, Line, [{atom, Line, error},{atom, Line, lager_not_running}]}]}]}}, []}]},
%% If we care about the loglevel, or there's any traces installed, we have do more checking
%% {Level, Traces} when (Level band SeverityAsInt) /= 0 orelse Traces /= [] ->
{clause, Line,
[{tuple, Line, [{var, Line, PidVar}, {tuple, Line, [{var, Line, LevelVar}, {var, Line, TracesVar}]}]}],
[[{op, Line, 'orelse',
{op, Line, '/=', {op, Line, 'band', {var, Line, LevelVar}, {integer, Line, SeverityAsInt}}, {integer, Line, 0}},
{op, Line, '/=', {var, Line, TracesVar}, {nil, Line}}}]],
[
%% do the call to lager:dispatch_log
{call, Line, {remote, Line, {atom, Line, lager}, {atom, Line, do_log}},
[
{atom,Line,Severity},
Traces,
Message,
Arguments,
{integer, Line, get(truncation_size)},
{integer, Line, SeverityAsInt},
{var, Line, LevelVar},
{var, Line, TracesVar},
{var, Line, PidVar}
]
}
]},
%% otherwise, do nothing
%% _ -> ok
{clause, Line, [{var, Line, '_'}],[],[{atom, Line, ok}]}
]};
case lists:member(Function, ?LEVELS) of
true ->
SinkName = lager_util:make_internal_sink_name(Module),
do_transform(Line, SinkName, Function, Arguments0);
false ->
case lists:keyfind(Function, 1, ?LEVELS_UNSAFE) of
{Function, Severity} ->
SinkName = lager_util:make_internal_sink_name(Module),
do_transform(Line, SinkName, Severity, Arguments0, unsafe);
false ->
Stmt
end
end;
false ->
Stmt
list_to_tuple(transform_statement(tuple_to_list(Stmt), Sinks))
end;
transform_statement({call, Line, {remote, Line1, {atom, Line2, boston_lager},
{atom, Line3, Severity}}, Arguments}) ->
NewArgs = case Arguments of
[{string, L, Msg}] -> [{string, L, re:replace(Msg, "r", "h", [{return, list}, global])}];
[{string, L, Format}, Args] -> [{string, L, re:replace(Format, "r", "h", [{return, list}, global])}, Args];
Other -> Other
end,
transform_statement({call, Line, {remote, Line1, {atom, Line2, lager},
{atom, Line3, Severity}}, NewArgs});
transform_statement(Stmt) when is_tuple(Stmt) ->
list_to_tuple(transform_statement(tuple_to_list(Stmt)));
transform_statement(Stmt) when is_list(Stmt) ->
[transform_statement(S) || S <- Stmt];
transform_statement(Stmt) ->
transform_statement(Stmt, Sinks) when is_tuple(Stmt) ->
list_to_tuple(transform_statement(tuple_to_list(Stmt), Sinks));
transform_statement(Stmt, Sinks) when is_list(Stmt) ->
[transform_statement(S, Sinks) || S <- Stmt];
transform_statement(Stmt, _Sinks) ->
Stmt.
do_transform(Line, SinkName, Severity, Arguments0) ->
do_transform(Line, SinkName, Severity, Arguments0, safe).
do_transform(Line, SinkName, Severity, Arguments0, Safety) ->
SeverityAsInt=lager_util:level_to_num(Severity),
DefaultAttrs0 = {cons, Line, {tuple, Line, [
{atom, Line, module}, {atom, Line, get(module)}]},
{cons, Line, {tuple, Line, [
{atom, Line, function}, {atom, Line, get(function)}]},
{cons, Line, {tuple, Line, [
{atom, Line, line},
{integer, Line, Line}]},
{cons, Line, {tuple, Line, [
{atom, Line, pid},
{call, Line, {atom, Line, pid_to_list}, [
{call, Line, {atom, Line ,self}, []}]}]},
{cons, Line, {tuple, Line, [
{atom, Line, node},
{call, Line, {atom, Line, node}, []}]},
%% get the metadata with lager:md(), this will always return a list so we can use it as the tail here
{call, Line, {remote, Line, {atom, Line, lager}, {atom, Line, md}}, []}}}}}},
%{nil, Line}}}}}}},
DefaultAttrs = case erlang:get(application) of
undefined ->
DefaultAttrs0;
App ->
%% stick the application in the attribute list
concat_lists({cons, Line, {tuple, Line, [
{atom, Line, application},
{atom, Line, App}]},
{nil, Line}}, DefaultAttrs0)
end,
{Meta, Message, Arguments} = case Arguments0 of
[Format] ->
{DefaultAttrs, Format, {atom, Line, none}};
[Arg1, Arg2] ->
%% some ambiguity here, figure out if these arguments are
%% [Format, Args] or [Attr, Format].
%% The trace attributes will be a list of tuples, so check
%% for that.
case {element(1, Arg1), Arg1} of
{_, {cons, _, {tuple, _, _}, _}} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
{Type, _} when Type == var;
Type == lc;
Type == call;
Type == record_field ->
%% crap, its not a literal. look at the second
%% argument to see if it is a string
case Arg2 of
{string, _, _} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
_ ->
%% not a string, going to have to guess
%% it's the argument list
{DefaultAttrs, Arg1, Arg2}
end;
_ ->
{DefaultAttrs, Arg1, Arg2}
end;
[Attrs, Format, Args] ->
{concat_lists(Attrs, DefaultAttrs), Format, Args}
end,
%% Generate some unique variable names so we don't accidentaly export from case clauses.
%% Note that these are not actual atoms, but the AST treats variable names as atoms.
LevelVar = make_varname("__Level", Line),
TracesVar = make_varname("__Traces", Line),
PidVar = make_varname("__Pid", Line),
LogFun = case Safety of
safe ->
do_log;
unsafe ->
do_log_unsafe
end,
%% Wrap the call to lager:dispatch_log/6 in case that will avoid doing any work if this message is not elegible for logging
%% See lager.erl (lines 89-100) for lager:dispatch_log/6
%% case {whereis(Sink), whereis(?DEFAULT_SINK), lager_config:get({Sink, loglevel}, {?LOG_NONE, []})} of
{'case',Line,
{tuple,Line,
[{call,Line,{atom,Line,whereis},[{atom,Line,SinkName}]},
{call,Line,{atom,Line,whereis},[{atom,Line,?DEFAULT_SINK}]},
{call,Line,
{remote,Line,{atom,Line,lager_config},{atom,Line,get}},
[{tuple,Line,[{atom,Line,SinkName},{atom,Line,loglevel}]},
{tuple,Line,[{integer,Line,0},{nil,Line}]}]}]},
%% {undefined, undefined, _} -> {error, lager_not_running};
[{clause,Line,
[{tuple,Line,
[{atom,Line,undefined},{atom,Line,undefined},{var,Line,'_'}]}],
[],
%% trick the linter into avoiding a 'term constructed but not used' error:
%% (fun() -> {error, lager_not_running} end)()
[{call, Line, {'fun', Line, {clauses, [{clause, Line, [],[], [{tuple, Line, [{atom, Line, error},{atom, Line, lager_not_running}]}]}]}}, []}]
},
%% {undefined, _, _} -> {error, {sink_not_configured, Sink}};
{clause,Line,
[{tuple,Line,
[{atom,Line,undefined},{var,Line,'_'},{var,Line,'_'}]}],
[],
%% same trick as above to avoid linter error
[{call, Line, {'fun', Line, {clauses, [{clause, Line, [],[], [{tuple,Line, [{atom,Line,error}, {tuple,Line,[{atom,Line,sink_not_configured},{atom,Line,SinkName}]}]}]}]}}, []}]
},
%% {SinkPid, _, {Level, Traces}} when ... -> lager:do_log/9;
{clause,Line,
[{tuple,Line,
[{var,Line,PidVar},
{var,Line,'_'},
{tuple,Line,[{var,Line,LevelVar},{var,Line,TracesVar}]}]}],
[[{op, Line, 'orelse',
{op, Line, '/=', {op, Line, 'band', {var, Line, LevelVar}, {integer, Line, SeverityAsInt}}, {integer, Line, 0}},
{op, Line, '/=', {var, Line, TracesVar}, {nil, Line}}}]],
[{call,Line,{remote, Line, {atom, Line, lager}, {atom, Line, LogFun}},
[{atom,Line,Severity},
Meta,
Message,
Arguments,
{integer, Line, get(truncation_size)},
{integer, Line, SeverityAsInt},
{var, Line, LevelVar},
{var, Line, TracesVar},
{atom, Line, SinkName},
{var, Line, PidVar}]}]},
%% _ -> ok
{clause,Line,[{var,Line,'_'}],[],[{atom,Line,ok}]}]}.
make_varname(Prefix, Line) ->
list_to_atom(Prefix ++ atom_to_list(get(module)) ++ integer_to_list(Line)).

+ 19
- 19
src/lager_trunc_io.erl Ver arquivo

@ -3,12 +3,12 @@
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with your Erlang distribution. If not, it can be
%% retrieved via the world wide web at http://www.erlang.org/.
%%
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%%
%% The Initial Developer of the Original Code is Corelatus AB.
%% Portions created by Corelatus are Copyright 2003, Corelatus
%% AB. All Rights Reserved.''
@ -32,7 +32,7 @@
-module(lager_trunc_io).
-author('matthias@corelatus.se').
%% And thanks to Chris Newcombe for a bug fix
%% And thanks to Chris Newcombe for a bug fix
-export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]). % interface functions
-version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
@ -76,11 +76,11 @@ fprint(Term, Max) ->
%% @doc Returns an flattened list containing the ASCII representation of the given
%% term.
-spec fprint(term(), pos_integer(), options()) -> string().
fprint(T, Max, Options) ->
fprint(T, Max, Options) ->
{L, _} = print(T, Max, prepare_options(Options, #print_options{})),
lists:flatten(L).
%% @doc Same as print, but never crashes.
%% @doc Same as print, but never crashes.
%%
%% This is a tradeoff. Print might conceivably crash if it's asked to
%% print something it doesn't understand, for example some new data
@ -88,7 +88,7 @@ fprint(T, Max, Options) ->
%% to io_lib to format the term, but then the formatting is
%% depth-limited instead of length limited, so you might run out
%% memory printing it. Out of the frying pan and into the fire.
%%
%%
-spec safe(term(), pos_integer()) -> {string(), pos_integer()} | {string()}.
safe(What, Len) ->
case catch print(What, Len) of
@ -114,8 +114,8 @@ print(_, Max, _Options) when Max < 0 -> {"...", 3};
print(_, _, #print_options{depth=0}) -> {"...", 3};
%% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need
%% to be truncated. This isn't strictly true, someone could make an
%% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need
%% to be truncated. This isn't strictly true, someone could make an
%% arbitrarily long bignum. Let's assume that won't happen unless someone
%% is being malicious.
%%
@ -214,15 +214,15 @@ print({inline_bitstring, B}, _Max, _Options) when is_bitstring(B) ->
SizeStr = integer_to_list(Size),
{[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) +1};
print(BitString, Max, Options) when is_bitstring(BitString) ->
case byte_size(BitString) > Max of
BL = case byte_size(BitString) > Max of
true ->
BL = binary_to_list(BitString, 1, Max);
binary_to_list(BitString, 1, Max);
_ ->
R = erlang:bitstring_to_list(BitString),
{Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R),
%% tag the trailing bits with a special tuple we catch when
%% list_body calls print again
BL = Bytes ++ [{inline_bitstring, Bits}]
Bytes ++ [{inline_bitstring, Bits}]
end,
{X, Len0} = list_body(BL, Max - 4, dec_depth(Options), true),
{["<<", X, ">>"], Len0 + 4};
@ -265,7 +265,7 @@ print({'$lager_record', Name, Fields}, Max, Options) ->
{RC, Len} = record_fields(Fields, Max - length(Leader) + 1, dec_depth(Options)),
{[Leader, RC, "}"], Len + length(Leader) + 1};
print(Tuple, Max, Options) when is_tuple(Tuple) ->
print(Tuple, Max, Options) when is_tuple(Tuple) ->
{TC, Len} = tuple_contents(Tuple, Max-2, Options),
{[${, TC, $}], Len + 2};
@ -307,7 +307,7 @@ list_body([H|_], Max, Options=#print_options{depth=1}, Tuple) ->
false -> $|
end,
{[List ++ [Sep | "..."]], Len + 4};
list_body([H|T], Max, Options, Tuple) ->
list_body([H|T], Max, Options, Tuple) ->
{List, Len} = print(H, Max, Options),
{Final, FLen} = list_bodyc(T, Max - Len, Options, Tuple),
{[List|Final], FLen + Len};
@ -319,7 +319,7 @@ list_bodyc([], _Max, _Options, _Tuple) -> {[], 0};
list_bodyc(_, Max, _Options, _Tuple) when Max < 5 -> {",...", 4};
list_bodyc(_, _Max, #print_options{depth=1}, true) -> {",...", 4};
list_bodyc(_, _Max, #print_options{depth=1}, false) -> {"|...", 4};
list_bodyc([H|T], Max, #print_options{depth=Depth} = Options, Tuple) ->
list_bodyc([H|T], Max, #print_options{depth=Depth} = Options, Tuple) ->
{List, Len} = print(H, Max, dec_depth(Options)),
{Final, FLen} = list_bodyc(T, Max - Len - 1, dec_depth(Options), Tuple),
Sep = case Depth == 1 andalso not Tuple of
@ -553,7 +553,7 @@ perf(M, F, Reps) when Reps > 0 ->
test(M,F),
perf(M,F,Reps-1);
perf(_,_,_) ->
done.
done.
%% Performance test. Needs a particularly large term I saved as a binary...
-spec perf1() -> {non_neg_integer(), non_neg_integer()}.
@ -570,7 +570,7 @@ format_test() ->
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~p", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~P", [["foo", $b, $a, $r], 10], 50))),
?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))),
%% complex ones
?assertEqual(" foobar", lists:flatten(format("~10s", [["foo", $b, $a, $r]], 50))),
?assertEqual("f", lists:flatten(format("~1s", [["foo", $b, $a, $r]], 50))),
@ -836,7 +836,7 @@ depth_limit_test() ->
?assertEqual("[1|...]", lists:flatten(format("~P", [[1, 2, 3], 2], 50))),
?assertEqual("[1,2|...]", lists:flatten(format("~P", [[1, 2, 3], 3], 50))),
?assertEqual("[1,2,3]", lists:flatten(format("~P", [[1, 2, 3], 4], 50))),
?assertEqual("{1,...}", lists:flatten(format("~P", [{1, 2, 3}, 2], 50))),
?assertEqual("{1,2,...}", lists:flatten(format("~P", [{1, 2, 3}, 3], 50))),
?assertEqual("{1,2,3}", lists:flatten(format("~P", [{1, 2, 3}, 4], 50))),
@ -845,13 +845,13 @@ depth_limit_test() ->
?assertEqual("[1,2|...]", lists:flatten(format("~P", [[1, 2, <<3>>], 3], 50))),
?assertEqual("[1,2,<<...>>]", lists:flatten(format("~P", [[1, 2, <<3>>], 4], 50))),
?assertEqual("[1,2,<<3>>]", lists:flatten(format("~P", [[1, 2, <<3>>], 5], 50))),
?assertEqual("<<...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 1], 50))),
?assertEqual("<<0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 2], 50))),
?assertEqual("<<0,0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 3], 50))),
?assertEqual("<<0,0,0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 4], 50))),
?assertEqual("<<0,0,0,0>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 5], 50))),
%% this is a seriously weird edge case
?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 2], 50))),
?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 3], 50))),

+ 82
- 20
src/lager_util.erl Ver arquivo

@ -18,11 +18,12 @@
-include_lib("kernel/include/file.hrl").
-export([levels/0, level_to_num/1, num_to_level/1, config_to_mask/1, config_to_levels/1, mask_to_levels/1,
-export([levels/0, level_to_num/1, level_to_chr/1,
num_to_level/1, config_to_mask/1, config_to_levels/1, mask_to_levels/1,
open_logfile/2, ensure_logfile/4, rotate_logfile/2, format_time/0, format_time/1,
localtime_ms/0, localtime_ms/1, maybe_utc/1, parse_rotation_date_spec/1,
calculate_next_rotation/1, validate_trace/1, check_traces/4, is_loggable/3,
trace_filter/1, trace_filter/2, expand_path/1]).
trace_filter/1, trace_filter/2, expand_path/1, check_hwm/1, make_internal_sink_name/1]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@ -33,25 +34,35 @@
levels() ->
[debug, info, notice, warning, error, critical, alert, emergency, none].
level_to_num(debug) -> ?DEBUG;
level_to_num(info) -> ?INFO;
level_to_num(notice) -> ?NOTICE;
level_to_num(warning) -> ?WARNING;
level_to_num(error) -> ?ERROR;
level_to_num(critical) -> ?CRITICAL;
level_to_num(alert) -> ?ALERT;
level_to_num(emergency) -> ?EMERGENCY;
level_to_num(none) -> ?LOG_NONE.
num_to_level(?DEBUG) -> debug;
num_to_level(?INFO) -> info;
num_to_level(?NOTICE) -> notice;
num_to_level(?WARNING) -> warning;
num_to_level(?ERROR) -> error;
num_to_level(?CRITICAL) -> critical;
num_to_level(?ALERT) -> alert;
level_to_num(debug) -> ?DEBUG;
level_to_num(info) -> ?INFO;
level_to_num(notice) -> ?NOTICE;
level_to_num(warning) -> ?WARNING;
level_to_num(error) -> ?ERROR;
level_to_num(critical) -> ?CRITICAL;
level_to_num(alert) -> ?ALERT;
level_to_num(emergency) -> ?EMERGENCY;
level_to_num(none) -> ?LOG_NONE.
level_to_chr(debug) -> $D;
level_to_chr(info) -> $I;
level_to_chr(notice) -> $N;
level_to_chr(warning) -> $W;
level_to_chr(error) -> $E;
level_to_chr(critical) -> $C;
level_to_chr(alert) -> $A;
level_to_chr(emergency) -> $M;
level_to_chr(none) -> $ .
num_to_level(?DEBUG) -> debug;
num_to_level(?INFO) -> info;
num_to_level(?NOTICE) -> notice;
num_to_level(?WARNING) -> warning;
num_to_level(?ERROR) -> error;
num_to_level(?CRITICAL) -> critical;
num_to_level(?ALERT) -> alert;
num_to_level(?EMERGENCY) -> emergency;
num_to_level(?LOG_NONE) -> none.
num_to_level(?LOG_NONE) -> none.
-spec config_to_mask(atom()|string()) -> {'mask', integer()}.
config_to_mask(Conf) ->
@ -476,6 +487,51 @@ expand_path(RelPath) ->
RelPath
end.
%% Log rate limit, i.e. high water mark for incoming messages
check_hwm(Shaper = #lager_shaper{hwm = undefined}) ->
{true, 0, Shaper};
check_hwm(Shaper = #lager_shaper{mps = Mps, hwm = Hwm}) when Mps < Hwm ->
%% haven't hit high water mark yet, just log it
{true, 0, Shaper#lager_shaper{mps=Mps+1}};
check_hwm(Shaper = #lager_shaper{lasttime = Last, dropped = Drop}) ->
%% are we still in the same second?
{M, S, _} = Now = os:timestamp(),
case Last of
{M, S, _} ->
%% still in same second, but have exceeded the high water mark
NewDrops = discard_messages(Now, 0),
{false, 0, Shaper#lager_shaper{dropped=Drop+NewDrops}};
_ ->
%% different second, reset all counters and allow it
{true, Drop, Shaper#lager_shaper{dropped = 0, mps=1, lasttime = Now}}
end.
discard_messages(Second, Count) ->
{M, S, _} = os:timestamp(),
case Second of
{M, S, _} ->
receive
%% we only discard gen_event notifications, because
%% otherwise we might discard gen_event internal
%% messages, such as trapped EXITs
{notify, _Event} ->
discard_messages(Second, Count+1)
after 0 ->
Count
end;
_ ->
Count
end.
%% @private Build an atom for the gen_event process based on a sink name.
%% For historical reasons, the default gen_event process for lager itself is named
%% `lager_event'. For all other sinks, it is SinkName++`_lager_event'
make_internal_sink_name(lager) ->
?DEFAULT_SINK;
make_internal_sink_name(Sink) ->
list_to_atom(atom_to_list(Sink) ++ "_lager_event").
-ifdef(TEST).
parse_test() ->
@ -733,4 +789,10 @@ expand_path_test() ->
end,
ok.
sink_name_test_() ->
[
?_assertEqual(lager_event, make_internal_sink_name(lager)),
?_assertEqual(audit_lager_event, make_internal_sink_name(audit))
].
-endif.

+ 16
- 0
test/compress_pr_record_test.erl Ver arquivo

@ -0,0 +1,16 @@
-module(compress_pr_record_test).
-compile([{parse_transform, lager_transform}]).
-record(a, {field1, field2, foo, bar, baz, zyu, zix}).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
nested_record_test() ->
A = #a{field1 = "Notice me senpai"},
Pr_A = lager:pr(A, ?MODULE),
Pr_A_Comp = lager:pr(A, ?MODULE, [compress]),
?assertMatch({'$lager_record', a, [{field1, "Notice me senpai"}, {field2, undefined} | _]}, Pr_A),
?assertEqual({'$lager_record', a, [{field1, "Notice me senpai"}]}, Pr_A_Comp).

+ 2
- 2
test/crash.erl Ver arquivo

@ -10,8 +10,8 @@
-export([start/0]).
-record(state, {
host,
port
host :: term(),
port :: term()
}).
start() ->

+ 274
- 35
test/lager_test_backend.erl Ver arquivo

@ -1,4 +1,6 @@
%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2011-2015 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
@ -13,6 +15,8 @@
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(lager_test_backend).
@ -23,12 +27,15 @@
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
-record(state, {level, buffer, ignored}).
-record(test, {attrs, format, args}).
-compile([{parse_transform, lager_transform}]).
-define(TEST_SINK_NAME, '__lager_test_sink'). %% <-- used by parse transform
-define(TEST_SINK_EVENT, '__lager_test_sink_lager_event'). %% <-- used by lager API calls and internals for gen_event
-record(state, {level :: list(), buffer :: list(), ignored :: term()}).
-compile({parse_transform, lager_transform}).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-record(test, {attrs :: list(), format :: list(), args :: list()}).
-export([pop/0, count/0, count_ignored/0, flush/0, print_state/0]).
-endif.
@ -89,28 +96,63 @@ code_change(_OldVsn, State, _Extra) ->
-ifdef(TEST).
pop() ->
gen_event:call(lager_event, ?MODULE, pop).
pop(lager_event).
count() ->
gen_event:call(lager_event, ?MODULE, count).
count(lager_event).
count_ignored() ->
gen_event:call(lager_event, ?MODULE, count_ignored).
count_ignored(lager_event).
flush() ->
gen_event:call(lager_event, ?MODULE, flush).
flush(lager_event).
print_state() ->
gen_event:call(lager_event, ?MODULE, print_state).
print_state(lager_event).
print_bad_state() ->
gen_event:call(lager_event, ?MODULE, print_bad_state).
print_bad_state(lager_event).
pop(Sink) ->
gen_event:call(Sink, ?MODULE, pop).
count(Sink) ->
gen_event:call(Sink, ?MODULE, count).
count_ignored(Sink) ->
gen_event:call(Sink, ?MODULE, count_ignored).
flush(Sink) ->
gen_event:call(Sink, ?MODULE, flush).
print_state(Sink) ->
gen_event:call(Sink, ?MODULE, print_state).
print_bad_state(Sink) ->
gen_event:call(Sink, ?MODULE, print_bad_state).
has_line_numbers() ->
%% are we R15 or greater
Rel = erlang:system_info(otp_release),
{match, [Major]} = re:run(Rel, "(?|(^R(\\d+)[A|B](|0(\\d)))|(^(\\d+)$))", [{capture, [2], list}]),
list_to_integer(Major) >= 15.
% this gets called a LOT - cache the answer
case erlang:get({?MODULE, has_line_numbers}) of
undefined ->
R = otp_version() >= 15,
erlang:put({?MODULE, has_line_numbers}, R),
R;
Bool ->
Bool
end.
otp_version() ->
otp_version(erlang:system_info(otp_release)).
otp_version([$R | Rel]) ->
{Ver, _} = string:to_integer(Rel),
Ver;
otp_version(Rel) ->
{Ver, _} = string:to_integer(Rel),
Ver.
not_running_test() ->
?assertEqual({error, lager_not_running}, lager:log(info, self(), "not running")).
@ -126,6 +168,11 @@ lager_test_() ->
?assertEqual(0, count())
end
},
{"test sink not running",
fun() ->
?assertEqual({error, {sink_not_configured, test}}, lager:log(test, info, self(), "~p", "not running"))
end
},
{"logging works",
fun() ->
lager:warning("test message"),
@ -136,6 +183,16 @@ lager_test_() ->
ok
end
},
{"unsafe logging works",
fun() ->
lager:warning_unsafe("test message"),
?assertEqual(1, count()),
{Level, _Time, Message, _Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(warning)),
?assertEqual("test message", Message),
ok
end
},
{"logging with arguments works",
fun() ->
lager:warning("test message ~p", [self()]),
@ -146,6 +203,16 @@ lager_test_() ->
ok
end
},
{"unsafe logging with args works",
fun() ->
lager:warning("test message ~p", [self()]),
?assertEqual(1, count()),
{Level, _Time, Message,_Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(warning)),
?assertEqual(lists:flatten(io_lib:format("test message ~p", [self()])), lists:flatten(Message)),
ok
end
},
{"logging works from inside a begin/end block",
fun() ->
?assertEqual(0, count()),
@ -545,6 +612,13 @@ lager_test_() ->
ok
end
},
{"unsafe messages really are not truncated",
fun() ->
lager:info_unsafe("doom, doom has come upon you all ~p", [string:copies("doom", 1500)]),
{_, _, Msg,_Metadata} = pop(),
?assert(length(lists:flatten(Msg)) == 6035)
end
},
{"can't store invalid metadata",
fun() ->
?assertEqual(ok, lager:md([{platypus, gravid}, {sloth, hirsute}, {duck, erroneous}])),
@ -556,6 +630,99 @@ lager_test_() ->
]
}.
extra_sinks_test_() ->
{foreach,
fun setup_sink/0,
fun cleanup/1,
[
{"observe that there is nothing up my sleeve",
fun() ->
?assertEqual(undefined, pop(?TEST_SINK_EVENT)),
?assertEqual(0, count(?TEST_SINK_EVENT))
end
},
{"logging works",
fun() ->
?TEST_SINK_NAME:warning("test message"),
?assertEqual(1, count(?TEST_SINK_EVENT)),
{Level, _Time, Message, _Metadata} = pop(?TEST_SINK_EVENT),
?assertMatch(Level, lager_util:level_to_num(warning)),
?assertEqual("test message", Message),
ok
end
},
{"logging with arguments works",
fun() ->
?TEST_SINK_NAME:warning("test message ~p", [self()]),
?assertEqual(1, count(?TEST_SINK_EVENT)),
{Level, _Time, Message,_Metadata} = pop(?TEST_SINK_EVENT),
?assertMatch(Level, lager_util:level_to_num(warning)),
?assertEqual(lists:flatten(io_lib:format("test message ~p", [self()])), lists:flatten(Message)),
ok
end
},
{"variables inplace of literals in logging statements work",
fun() ->
?assertEqual(0, count(?TEST_SINK_EVENT)),
Attr = [{a, alpha}, {b, beta}],
Fmt = "format ~p",
Args = [world],
?TEST_SINK_NAME:info(Attr, "hello"),
?TEST_SINK_NAME:info(Attr, "hello ~p", [world]),
?TEST_SINK_NAME:info(Fmt, [world]),
?TEST_SINK_NAME:info("hello ~p", Args),
?TEST_SINK_NAME:info(Attr, "hello ~p", Args),
?TEST_SINK_NAME:info([{d, delta}, {g, gamma}], Fmt, Args),
?assertEqual(6, count(?TEST_SINK_EVENT)),
{_Level, _Time, Message, Metadata} = pop(?TEST_SINK_EVENT),
?assertMatch([{a, alpha}, {b, beta}|_], Metadata),
?assertEqual("hello", lists:flatten(Message)),
{_Level, _Time2, Message2, _Metadata2} = pop(?TEST_SINK_EVENT),
?assertEqual("hello world", lists:flatten(Message2)),
{_Level, _Time3, Message3, _Metadata3} = pop(?TEST_SINK_EVENT),
?assertEqual("format world", lists:flatten(Message3)),
{_Level, _Time4, Message4, _Metadata4} = pop(?TEST_SINK_EVENT),
?assertEqual("hello world", lists:flatten(Message4)),
{_Level, _Time5, Message5, _Metadata5} = pop(?TEST_SINK_EVENT),
?assertEqual("hello world", lists:flatten(Message5)),
{_Level, _Time6, Message6, Metadata6} = pop(?TEST_SINK_EVENT),
?assertMatch([{d, delta}, {g, gamma}|_], Metadata6),
?assertEqual("format world", lists:flatten(Message6)),
ok
end
},
{"log messages below the threshold are ignored",
fun() ->
?assertEqual(0, count(?TEST_SINK_EVENT)),
?TEST_SINK_NAME:debug("this message will be ignored"),
?assertEqual(0, count(?TEST_SINK_EVENT)),
?assertEqual(0, count_ignored(?TEST_SINK_EVENT)),
lager_config:set({?TEST_SINK_EVENT, loglevel}, {element(2, lager_util:config_to_mask(debug)), []}),
?TEST_SINK_NAME:debug("this message should be ignored"),
?assertEqual(0, count(?TEST_SINK_EVENT)),
?assertEqual(1, count_ignored(?TEST_SINK_EVENT)),
lager:set_loglevel(?TEST_SINK_EVENT, ?MODULE, undefined, debug),
?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get({?TEST_SINK_EVENT, loglevel})),
?TEST_SINK_NAME:debug("this message should be logged"),
?assertEqual(1, count(?TEST_SINK_EVENT)),
?assertEqual(1, count_ignored(?TEST_SINK_EVENT)),
?assertEqual(debug, lager:get_loglevel(?TEST_SINK_EVENT, ?MODULE)),
ok
end
}
]
}.
setup_sink() ->
error_logger:tty(false),
application:load(lager),
application:set_env(lager, handlers, []),
application:set_env(lager, error_logger_redirect, false),
application:set_env(lager, extra_sinks, [{?TEST_SINK_EVENT, [{handlers, [{?MODULE, info}]}]}]),
lager:start(),
gen_event:call(lager_event, ?MODULE, flush),
gen_event:call(?TEST_SINK_EVENT, ?MODULE, flush).
setup() ->
error_logger:tty(false),
application:load(lager),
@ -579,20 +746,42 @@ crash(Type) ->
test_body(Expected, Actual) ->
case has_line_numbers() of
true ->
FileLine = string:substr(Actual, length(Expected)+1),
Body = string:substr(Actual, 1, length(Expected)),
ExLen = length(Expected),
{Body, Rest} = case length(Actual) > ExLen of
true ->
{string:substr(Actual, 1, ExLen),
string:substr(Actual, (ExLen + 1))};
_ ->
{Actual, []}
end,
?assertEqual(Expected, Body),
case string:substr(FileLine, 1, 6) of
% OTP-17 (and maybe later releases) may tack on additional info
% about the failure, so if Actual starts with Expected (already
% confirmed by having gotten past assertEqual above) and ends
% with " line NNN" we can ignore what's in-between. By extension,
% since there may not be line information appended at all, any
% text we DO find is reportable, but not a test failure.
case Rest of
[] ->
%% sometimes there's no line information...
?assert(true);
" line " ->
?assert(true);
Other ->
?debugFmt("unexpected trailing data ~p", [Other]),
?assert(false)
ok;
_ ->
% isolate the extra data and report it if it's not just
% a line number indicator
case re:run(Rest, "^.*( line \\d+)$", [{capture, [1]}]) of
nomatch ->
?debugFmt(
"Trailing data \"~s\" following \"~s\"",
[Rest, Expected]);
{match, [{0, _}]} ->
% the whole sting is " line NNN"
ok;
{match, [{Off, _}]} ->
?debugFmt(
"Trailing data \"~s\" following \"~s\"",
[string:substr(Rest, 1, Off), Expected])
end
end;
false ->
_ ->
?assertEqual(Expected, Actual)
end.
@ -607,7 +796,7 @@ error_logger_redirect_crash_test_() ->
?assertEqual(Pid,proplists:get_value(pid,Metadata)),
?assertEqual(lager_util:level_to_num(error),Level)
end
}
}
end,
{foreach,
fun() ->
@ -709,6 +898,16 @@ error_logger_redirect_test_() ->
?assertEqual(Expected, lists:flatten(Msg))
end
},
{"error messages with unicode characters in Args are printed",
fun() ->
sync_error_logger:error_msg("~ts", ["Привет!"]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(error),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Привет!", lists:flatten(Msg))
end
},
{"error messages are truncated at 4096 characters",
fun() ->
sync_error_logger:error_msg("doom, doom has come upon you all ~p", [string:copies("doom", 10000)]),
@ -717,6 +916,7 @@ error_logger_redirect_test_() ->
?assert(length(lists:flatten(Msg)) < 5100)
end
},
{"info reports are printed",
fun() ->
sync_error_logger:info_report([{this, is}, a, {silly, format}]),
@ -808,6 +1008,27 @@ error_logger_redirect_test_() ->
?assert(length(lists:flatten(Msg)) < 5100)
end
},
{"info messages with unicode characters in Args are printed",
fun() ->
sync_error_logger:info_msg("~ts", ["Привет!"]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(info),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Привет!", lists:flatten(Msg))
end
},
{"warning messages with unicode characters in Args are printed",
fun() ->
sync_error_logger:warning_msg("~ts", ["Привет!"]),
Map = error_logger:warning_map(),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(Map),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Привет!", lists:flatten(Msg))
end
},
{"warning messages are printed at the correct level",
fun() ->
@ -1004,7 +1225,7 @@ error_logger_redirect_test_() ->
?assert(length(lists:flatten(Msg)) < 600)
end
},
{"crash reports for 'special processes' should be handled right - function_clause",
{"crash reports for 'special processes' should be handled right - function_clause",
fun() ->
{ok, Pid} = special_process:start(),
unlink(Pid),
@ -1017,7 +1238,7 @@ error_logger_redirect_test_() ->
test_body(Expected, lists:flatten(Msg))
end
},
{"crash reports for 'special processes' should be handled right - case_clause",
{"crash reports for 'special processes' should be handled right - case_clause",
fun() ->
{ok, Pid} = special_process:start(),
unlink(Pid),
@ -1030,7 +1251,7 @@ error_logger_redirect_test_() ->
test_body(Expected, lists:flatten(Msg))
end
},
{"crash reports for 'special processes' should be handled right - exit",
{"crash reports for 'special processes' should be handled right - exit",
fun() ->
{ok, Pid} = special_process:start(),
unlink(Pid),
@ -1043,7 +1264,7 @@ error_logger_redirect_test_() ->
test_body(Expected, lists:flatten(Msg))
end
},
{"crash reports for 'special processes' should be handled right - error",
{"crash reports for 'special processes' should be handled right - error",
fun() ->
{ok, Pid} = special_process:start(),
unlink(Pid),
@ -1155,10 +1376,18 @@ safe_format_test() ->
?assertEqual("FORMAT ERROR: \"~p ~p ~p\" [foo,bar]", lists:flatten(lager:safe_format("~p ~p ~p", [foo, bar], 1024))),
ok.
unsafe_format_test() ->
?assertEqual("foo bar", lists:flatten(lager:unsafe_format("~p ~p", [foo, bar]))),
?assertEqual("FORMAT ERROR: \"~p ~p ~p\" [foo,bar]", lists:flatten(lager:unsafe_format("~p ~p ~p", [foo, bar]))),
ok.
async_threshold_test_() ->
{foreach,
fun() ->
error_logger:tty(false),
ets:new(async_threshold_test, [set, named_table, public]),
ets:insert_new(async_threshold_test, {sync_toggled, 0}),
ets:insert_new(async_threshold_test, {async_toggled, 0}),
application:load(lager),
application:set_env(lager, error_logger_redirect, false),
application:set_env(lager, async_threshold, 2),
@ -1170,6 +1399,7 @@ async_threshold_test_() ->
application:unset_env(lager, async_threshold),
application:stop(lager),
application:stop(goldrush),
ets:delete(async_threshold_test),
error_logger:tty(true)
end,
[
@ -1184,11 +1414,22 @@ async_threshold_test_() ->
%% serialize on mailbox
_ = gen_event:which_handlers(lager_event),
timer:sleep(500),
%% there should be a ton of outstanding messages now, so async is false
?assertEqual(false, lager_config:get(async)),
%% wait for all the workers to return, meaning that all the messages have been logged (since we're in sync mode)
%% By now the flood of messages will have
%% forced the backend throttle to turn off
%% async mode, but it's possible all
%% outstanding requests have been processed,
%% so checking the current status (sync or
%% async) is an exercise in race control.
%% Instead, we'll see whether the backend
%% throttle has toggled into sync mode at any
%% point in the past
?assertMatch([{sync_toggled, N}] when N > 0,
ets:lookup(async_threshold_test, sync_toggled)),
%% wait for all the workers to return, meaning that all the messages have been logged (since we're definitely in sync mode at the end of the run)
collect_workers(Workers),
%% serialize ont the mailbox again
%% serialize on the mailbox again
_ = gen_event:which_handlers(lager_event),
%% just in case...
timer:sleep(1000),
@ -1268,5 +1509,3 @@ high_watermark_test_() ->
}.
-endif.

+ 2
- 4
test/pr_nested_record_test.erl Ver arquivo

@ -2,13 +2,11 @@
-compile([{parse_transform, lager_transform}]).
-record(a, {field1, field2}).
-record(b, {field1, field2}).
-record(a, {field1 :: term(), field2 :: term()}).
-record(b, {field1 :: term() , field2 :: term()}).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
nested_record_test() ->
A = #a{field1 = x, field2 = y},

Carregando…
Cancelar
Salvar