瀏覽代碼

Merge branch 'master' of github.com:basho/lager

pull/160/head
Alexander Ovchinnikov 11 年之前
父節點
當前提交
52e0fa4520
共有 28 個檔案被更改,包括 2882 行新增916 行删除
  1. +2
    -0
      .gitignore
  2. +2
    -0
      .travis.yml
  3. +3
    -3
      Makefile
  4. +404
    -0
      README.md
  5. +0
    -292
      README.org
  6. +12
    -12
      include/lager.hrl
  7. +5
    -1
      rebar.config
  8. +151
    -37
      src/error_logger_lager_h.erl
  9. +29
    -9
      src/lager.app.src
  10. +183
    -60
      src/lager.erl
  11. +140
    -29
      src/lager_app.erl
  12. +78
    -0
      src/lager_backend_throttle.erl
  13. +64
    -0
      src/lager_config.erl
  14. +175
    -34
      src/lager_console_backend.erl
  15. +1
    -3
      src/lager_crash_log.erl
  16. +40
    -20
      src/lager_default_formatter.erl
  17. +497
    -129
      src/lager_file_backend.erl
  18. +25
    -12
      src/lager_format.erl
  19. +17
    -10
      src/lager_handler_watcher.erl
  20. +1
    -1
      src/lager_handler_watcher_sup.erl
  21. +0
    -107
      src/lager_mochiglobal.erl
  22. +18
    -5
      src/lager_msg.erl
  23. +4
    -0
      src/lager_sup.erl
  24. +161
    -17
      src/lager_transform.erl
  25. +70
    -8
      src/lager_trunc_io.erl
  26. +293
    -94
      src/lager_util.erl
  27. +496
    -27
      test/lager_test_backend.erl
  28. +11
    -6
      test/trunc_io_eqc.erl

+ 2
- 0
.gitignore 查看文件

@ -5,3 +5,5 @@ doc
*.swp
erl_crash.dump
.project
log
deps

+ 2
- 0
.travis.yml 查看文件

@ -3,6 +3,8 @@ notifications:
webhooks: http://basho-engbot.herokuapp.com/travis?key=8c2550739c2f776d4b05d993aa032f0724fe5450
email: eng@basho.com
otp_release:
- R16B
- R15B03
- R15B01
- R15B
- R14B04

+ 3
- 3
Makefile 查看文件

@ -2,11 +2,11 @@
all: deps compile
compile:
compile: deps
./rebar compile
deps:
./rebar get-deps
test -d deps || ./rebar get-deps
clean:
./rebar clean
@ -24,7 +24,7 @@ docs:
./rebar doc
APPS = kernel stdlib sasl erts ssl tools os_mon runtime_tools crypto inets \
xmerl webtool snmp public_key mnesia eunit syntax_tools compiler
xmerl webtool snmp public_key mnesia eunit
COMBO_PLT = $(HOME)/.riak_combo_dialyzer_plt
check_plt: compile

+ 404
- 0
README.md 查看文件

@ -0,0 +1,404 @@
Overview
--------
Lager (as in the beer) is a logging framework for Erlang. Its purpose is
to provide a more traditional way to perform logging in an erlang application
that plays nicely with traditional UNIX logging tools like logrotate and
syslog.
[Travis-CI](http://travis-ci.org/basho/lager) :: ![Travis-CI](https://secure.travis-ci.org/basho/lager.png)
Features
--------
* Finer grained log levels (debug, info, notice, warning, error, critical,
alert, emergency)
* Logger calls are transformed using a parse transform to allow capturing
Module/Function/Line/Pid information
* When no handler is consuming a log level (eg. debug) no event is even sent
to the log handler
* Supports multiple backends, including console and file.
* Rewrites common OTP error messages into more readable messages
* Support for pretty printing records encountered at compile time
* Tolerant in the face of large or many log messages, won't out of memory the node
* Supports internal time and date based rotation, as well as external rotation tools
* Syslog style log level comparison flags
* Colored terminal output (requires R16+)
Usage
-----
To use lager in your application, you need to define it as a rebar dep or have
some other way of including it in erlang's path. You can then add the
following option to the erlang compiler flags
```erlang
{parse_transform, lager_transform}
```
Alternately, you can add it to the module you wish to compile with logging
enabled:
```erlang
-compile([{parse_transform, lager_transform}]).
```
Before logging any messages, you'll need to start the lager application. The
lager module's start function takes care of loading and starting any dependencies
lager requires.
```erlang
lager:start().
```
You can also start lager on startup with a switch to `erl`:
```erlang
erl -pa path/to/lager/ebin -s lager
```
Once you have built your code with lager and started the lager application,
you can then generate log messages by doing the following:
```erlang
lager:error("Some message")
```
Or:
```erlang
lager:warning("Some message with a term: ~p", [Term])
```
The general form is lager:Severity() where Severity is one of the log levels
mentioned above.
Configuration
-------------
To configure lager's backends, you use an application variable (probably in
your app.config):
```erlang
{lager, [
{handlers, [
{lager_console_backend, info},
{lager_file_backend, [{file, "error.log"}, {level, error}]},
{lager_file_backend, [{file, "console.log"}, {level, info}]}
]}
]}.
```
The available configuration options for each backend are listed in their
module's documentation.
Custom Formatting
-----------------
All loggers have a default formatting that can be overriden. A formatter is any module that
exports format(#lager_log_message{},Config#any()). It is specified as part of the configuration
for the backend:
```erlang
{lager, [
{handlers, [
{lager_console_backend, [info, {lager_default_formatter, [time," [",severity,"] ", message, "\n"]}]},
{lager_file_backend, [{file, "error.log"}, {level, error}, {formatter, lager_default_formatter},
{formatter_config, [date, " ", time," [",severity,"] ",pid, " ", message, "\n"]}]},
{lager_file_backend, [{file, "console.log"}, {level, info}]}
]}
]}.
```
Included is lager_default_formatter. This provides a generic, default formatting for log messages using a "semi-iolist"
as configuration. Any iolist allowed elements in the configuration are printed verbatim. Atoms in the configuration
are treated as metadata properties and extracted from the log message.
The metadata properties date,time, message, and severity will always exist.
The properties pid, file, line, module, function, and node will always exist if the parser transform is used.
```
["Foo"] -> "Foo", regardless of message content.
[message] -> The content of the logged message, alone.
[{pid,"Unknown Pid"}] -> "<?.?.?>" if pid is in the metadata, "Unknown Pid" if not.
[{pid, ["My pid is ", pid], "Unknown Pid"}] -> if pid is in the metadata print "My pid is <?.?.?>", otherwise print "Unknown Pid"
```
Optionally, a tuple of {atom(),semi-iolist()}
can be used. The atom will look up the property, but if not found it will use the semi-iolist() instead. These fallbacks
can be nested or refer to other properties.
```
[{pid,"Unknown Pid"}] -> "<?.?.?>" if pid is in the metadata, "Unknown Pid" if not.
[{server,[$(,{pid,"Unknown Server"},$)]}}] -> user provided server metadata, otherwise "(<?.?.?>)", otherwise "(Unknown Server)"
```
Error logger integration
------------------------
Lager is also supplied with a error_logger handler module that translates
traditional erlang error messages into a friendlier format and sends them into
lager itself to be treated like a regular lager log call. To disable this, set
the lager application variable `error_logger_redirect' to `false'.
The error_logger handler will also log more complete error messages (protected
with use of trunc_io) to a "crash log" which can be referred to for further
information. The location of the crash log can be specified by the crash_log
application variable. If undefined it is not written at all.
Messages in the crash log are subject to a maximum message size which can be
specified via the crash_log_msg_size application variable.
Overload Protection
-------------------
Prior to lager 2.0, the gen_event at the core of lager operated purely in
synchronous mode. Asynchronous mode is faster, but has no protection against
message queue overload. In lager 2.0, the gen_event takes a hybrid approach. it
polls its own mailbox size and toggles the messaging between synchronous and
asynchronous depending on mailbox size.
```erlang
{async_threshold, 20},
{async_threshold_window, 5}
```
This will use async messaging until the mailbox exceeds 20 messages, at which
point synchronous messaging will be used, and switch back to asynchronous, when
size reduces to `20 - 5 = 15`.
If you wish to disable this behaviour, simply set it to 'undefined'. It defaults
to a low number to prevent the mailbox growing rapidly beyond the limit and causing
problems. In general, lager should process messages as fast as they come in, so getting
20 behind should be relatively exceptional anyway.
If you want to limit the number of messages per second allowed from error_logger,
which is a good idea if you want to weather a flood of messages when lots of
related processes crash, you can set a limit:
```erlang
{error_logger_hwm, 50}
```
It is probably best to keep this number small.
Runtime loglevel changes
------------------------
You can change the log level of any lager backend at runtime by doing the
following:
```erlang
lager:set_loglevel(lager_console_backend, debug).
```
Or, for the backend with multiple handles (files, mainly):
```erlang
lager:set_loglevel(lager_file_backend, "console.log", debug).
```
Lager keeps track of the minium log level being used by any backend and
supresses generation of messages lower than that level. This means that debug
log messages, when no backend is consuming debug messages, are effectively
free. A simple benchmark of doing 1 million debug log messages while the
minimum threshold was above that takes less than half a second.
Syslog style loglevel comparison flags
--------------------------------------
In addition to the regular log level names, you can also do finer grained masking
of what you want to log:
```
info - info and higher (>= is implicit)
=debug - only the debug level
!=info - everything but the info level
<=notice - notice and below
<warning - anything less than warning
```
These can be used anywhere a loglevel is supplied, although they need to be either
a quoted atom or a string.
Internal log rotation
---------------------
Lager can rotate its own logs or have it done via an external process. To
use internal rotation, use the 'size', 'date' and 'count' values in the file
backend's config:
```erlang
[{name, "error.log"}, {level, error}, {size, 10485760}, {date, "$D0"}, {count, 5}]
```
This tells lager to log error and above messages to "error.log" and to
rotate the file at midnight or when it reaches 10mb, whichever comes first
and to keep 5 rotated logs, in addition to the current one. Setting the
count to 0 does not disable rotation, it instead rotates the file and keeps
no previous versions around. To disable rotation set the size to 0 and the
date to "".
The "$D0" syntax is taken from the syntax newsyslog uses in newsyslog.conf.
The relevant extract follows:
```
Day, week and month time format: The lead-in character
for day, week and month specification is a `$'-sign.
The particular format of day, week and month
specification is: [Dhh], [Ww[Dhh]] and [Mdd[Dhh]],
respectively. Optional time fields default to
midnight. The ranges for day and hour specifications
are:
hh hours, range 0 ... 23
w day of week, range 0 ... 6, 0 = Sunday
dd day of month, range 1 ... 31, or the
letter L or l to specify the last day of
the month.
Some examples:
$D0 rotate every night at midnight
$D23 rotate every day at 23:00 hr
$W0D23 rotate every week on Sunday at 23:00 hr
$W5D16 rotate every week on Friday at 16:00 hr
$M1D0 rotate on the first day of every month at
midnight (i.e., the start of the day)
$M5D6 rotate on every 5th day of the month at
6:00 hr
```
To configure the crash log rotation, the following application variables are
used:
* crash_log_size
* crash_log_date
* crash_log_count
See the .app.src file for further details.
Syslog Support
--------------
Lager syslog output is provided as a separate application;
[lager_syslog](https://github.com/basho/lager_syslog). It is packaged as a
separate application so Lager itself doesn't have an indirect dependancy on a
port driver. Please see the lager_syslog README for configuration information.
Older Backends
--------------
Lager 2.0 changed the backend API, there are various 3rd party backends for
lager available, but they may not have been updated to the new API. As they
are updated, links to them can be re-added here.
Record Pretty Printing
----------------------
Lager's parse transform will keep track of any record definitions it encounters
and store them in the module's attributes. You can then, at runtime, print any
record a module compiled with the lager parse transform knows about by using the
lager:pr/2 function, which takes the record and the module that knows about the record:
```erlang
lager:info("My state is ~p", [lager:pr(State, ?MODULE)])
```
Often, ?MODULE is sufficent, but you can obviously substitute that for a literal module name.
lager:pr also works from the shell.
Colored terminal output
-----------------------
If you have erlang R16 or higher, you can tell lager's console backend to be colored. Simply
add
```erlang
{colored, true}
```
To lager's application environment config. If you don't like the default colors, they are
also configurable, see the app.src file for more details.
Tracing
-------
Lager supports basic support for redirecting log messages based on log message
attributes. Lager automatically captures the pid, module, function and line at the
log message callsite. However, you can add any additional attributes you wish:
```erlang
lager:warning([{request, RequestID},{vhost, Vhost}], "Permission denied to ~s", [User])
```
Then, in addition to the default trace attributes, you'll be able to trace
based on request or vhost:
```erlang
lager:trace_file("logs/example.com.error", [{vhost, "example.com"}], error)
```
To persist metadata for the life of a process, you can use lager:md/1 to store metadata
in the process dictionary:
```erlang
lager:md([{zone, forbidden}])
```
Note that lager:md will *only* accept a list of key/value pairs keyed by atoms.
You can also omit the final argument, and the loglevel will default to
'debug'.
Tracing to the console is similar:
```erlang
lager:trace_console([{request, 117}])
```
In the above example, the loglevel is omitted, but it can be specified as the
second argument if desired.
You can also specify multiple expressions in a filter, or use the '*' atom as
a wildcard to match any message that has that attribute, regardless of its
value.
Tracing to an existing logfile is also supported, if you wanted to log
warnings from a particular module to the default error.log:
```erlang
lager:trace_file("log/error.log", [{module, mymodule}], warning)
```
To view the active log backends and traces, you can use the lager:status()
function. To clear all active traces, you can use lager:clear_all_traces().
To delete a specific trace, store a handle for the trace when you create it,
that you later pass to lager:stop_trace/1:
```erlang
{ok, Trace} = lager:trace_file("log/error.log", [{module, mymodule}]),
...
lager:stop_trace(Trace)
```
Tracing to a pid is somewhat of a special case, since a pid is not a
data-type that serializes well. To trace by pid, use the pid as a string:
```erlang
lager:trace_console([{pid, "<0.410.0>"}])
```
As of lager 2.0, you can also use a 3 tuple while tracing, where the second
element is a comparison operator. The currently supported comparison operators
are:
* '<' - less than
* '=' - equal to
* '>' - greater than
```erlang
lager:trace_console([{request, '>' 117}, {request, '<' 120}])
```
Using '=' is equivalent to the 2-tuple form.
Setting the truncation limit at compile-time
--------------------------------------------
Lager defaults to truncating messages at 4096 bytes, you can alter this by
using the {lager_truncation_size, X} option. In rebar, you can add it to
erl_opts:
```erlang
{erl_opts, [{parse_transform, lager_transform}, {lager_truncation_size, 1024}]}.
```
You can also pass it to erlc, if you prefer:
```
erlc -pa lager/ebin +'{parse_transform, lager_transform}' +'{lager_truncation_size, 1024}' file.erl
```

+ 0
- 292
README.org 查看文件

@ -1,292 +0,0 @@
* Overview
Lager (as in the beer) is a logging framework for Erlang. Its purpose is
to provide a more traditional way to perform logging in an erlang application
that plays nicely with traditional UNIX logging tools like logrotate and
syslog. You can view the Lager EDocs [[http://basho.github.com/lager/][here]].
[[http://travis-ci.org/basho/lager][Travis-CI]] :: [[https://secure.travis-ci.org/basho/lager.png]]
* Features
- Finer grained log levels (debug, info, notice, warning, error, critical,
alert, emergency)
- Logger calls are transformed using a parse transform to allow capturing
Module/Function/Line/Pid information
- When no handler is consuming a log level (eg. debug) no event is even sent
to the log handler
- Supports multiple backends, including console and file. More are planned.
* Usage
To use lager in your application, you need to define it as a rebar dep or have
some other way of including it in erlang's path. You can then add the
following option to the erlang compiler flags
#+BEGIN_EXAMPLE
{parse_transform, lager_transform}
#+END_EXAMPLE
Alternately, you can add it to the module you wish to compile with logging
enabled:
#+BEGIN_EXAMPLE
-compile([{parse_transform, lager_transform}]).
#+END_EXAMPLE
Before logging any messages, you'll need to start the lager application. The
lager module's start function takes care of loading and starting any dependencies
lager requires.
#+BEGIN_EXAMPLE
lager:start().
#+END_EXAMPLE
Once you have built your code with lager and started the lager application,
you can then generate log messages by doing the following:
#+BEGIN_EXAMPLE
lager:error("Some message")
#+END_EXAMPLE
Or:
#+BEGIN_EXAMPLE
lager:warning("Some message with a term: ~p", [Term])
#+END_EXAMPLE
The general form is lager:Severity() where Severity is one of the log levels
mentioned above.
* Configuration
To configure lager's backends, you use an application variable (probably in
your app.config):
#+BEGIN_EXAMPLE
{lager, [
{handlers, [
{lager_console_backend, info},
{lager_file_backend, [
{"error.log", error, 10485760, "$D0", 5},
{"console.log", info, 10485760, "$D0", 5}
]}
]}
]}.
#+END_EXAMPLE
The available configuration options for each backend are listed in their
module's documentation.
* Custom Formatting
All loggers have a default formatting that can be overriden. A formatter is any module that
exports format(#lager_log_message{},Config#any()). It is specified as part of the configuration
for the backend:
#+BEGIN_EXAMPLE
{lager, [
{handlers, [
{lager_console_backend, [info, {lager_default_formatter, [time," [",severity,"] ", message, "\n"]}},
{lager_file_backend, [
[{"error.log", error, 10485760, "$D0", 5,{lager_default_formatter,[date, " ", time," [",severity,"] ",pid, " ", message, "\n"]}],
{"console.log", info, 10485760, "$D0", 5}
]}
]}
]}.
#+END_EXAMPLE
Included is lager_default_formatter. This provides a generic, default formatting for log messages using a "semi-iolist"
as configuration. Any iolist allowed elements in the configuration are printed verbatim. Atoms in the configuration
are treated as metadata properties and extracted from the log message.
The metadata properties date,time, message, and severity will always exist.
The properties pid, file, line, module, and function will always exist if the parser transform is used.
#+BEGIN_EXAMPLE
["Foo"] -> "Foo", regardless of message content.
[message] -> The content of the logged message, alone.
[{pid,"Unknown Pid"}] -> "<?.?.?>" if pid is in the metadata, "Unknown Pid" if not.
[{pid, ["My pid is ", pid], "Unknown Pid"}] -> if pid is in the metadata print "My pid is <?.?.?>", otherwise print "Unknown Pid"
#+END_EXAMPLE
Optionally, a tuple of {atom(),semi-iolist()}
can be used. The atom will look up the property, but if not found it will use the semi-iolist() instead. These fallbacks
can be nested or refer to other properties.
#+BEGIN_EXAMPLE
[{pid,"Unknown Pid"}] -> "<?.?.?>" if pid is in the metadata, "Unknown Pid" if not.
[{server,[$(,{pid,"Unknown Server"},$)]}}] -> user provided server metadata, otherwise "(<?.?.?>)", otherwise "(Unknown Server)"
#+END_EXAMPLE
* Error logger integration
Lager is also supplied with a error_logger handler module that translates
traditional erlang error messages into a friendlier format and sends them into
lager itself to be treated like a regular lager log call. To disable this, set
the lager application variable `error_logger_redirect' to `false'.
The error_logger handler will also log more complete error messages (protected
with use of trunc_io) to a "crash log" which can be referred to for further
information. The location of the crash log can be specified by the crash_log
application variable. If undefined it is not written at all.
Messages in the crash log are subject to a maximum message size which can be
specified via the crash_log_msg_size application variable.
* Runtime loglevel changes
You can change the log level of any lager backend at runtime by doing the
following:
#+BEGIN_EXAMPLE
lager:set_loglevel(lager_console_backend, debug).
#+END_EXAMPLE
Or, for the backend with multiple handles (files, mainly):
#+BEGIN_EXAMPLE
lager:set_loglevel(lager_file_backend, "console.log", debug).
#+END_EXAMPLE
Lager keeps track of the minium log level being used by any backend and
supresses generation of messages lower than that level. This means that debug
log messages, when no backend is consuming debug messages, are effectively
free. A simple benchmark of doing 1 million debug log messages while the
minimum threshold was above that takes less than half a second.
* Internal log rotation
Lager can rotate its own logs or have it done via an external process. To
use internal rotation, use the last 3 values in the file backend's
configuration tuple. For example
#+BEGIN_EXAMPLE
{"error.log", error, 10485760, "$D0", 5}
#+END_EXAMPLE
This tells lager to log error and above messages to "error.log" and to
rotate the file at midnight or when it reaches 10mb, whichever comes first
and to keep 5 rotated logs, in addition to the current one. Setting the
count to 0 does not disable rotation, it instead rotates the file and keeps
no previous versions around. To disable rotation set the size to 0 and the
date to "".
The "$D0" syntax is taken from the syntax newsyslog uses in newsyslog.conf.
The relevant extract follows:
#+BEGIN_EXAMPLE
Day, week and month time format: The lead-in character
for day, week and month specification is a `$'-sign.
The particular format of day, week and month
specification is: [Dhh], [Ww[Dhh]] and [Mdd[Dhh]],
respectively. Optional time fields default to
midnight. The ranges for day and hour specifications
are:
hh hours, range 0 ... 23
w day of week, range 0 ... 6, 0 = Sunday
dd day of month, range 1 ... 31, or the
letter L or l to specify the last day of
the month.
Some examples:
$D0 rotate every night at midnight
$D23 rotate every day at 23:00 hr
$W0D23 rotate every week on Sunday at 23:00 hr
$W5D16 rotate every week on Friday at 16:00 hr
$M1D0 rotate on the first day of every month at
midnight (i.e., the start of the day)
$M5D6 rotate on every 5th day of the month at
6:00 hr
#+END_EXAMPLE
To configure the crash log rotation, the following application variables are
used:
- crash_log_size
- crash_log_date
- crash_log_count
See the .app.src file for further details.
* Syslog Support
Lager syslog output is provided as a separate application;
[[https://github.com/basho/lager_syslog][lager_syslog]]. It is packaged as a
separate application so Lager itself doesn't have an indirect dependancy on a
port driver. Please see the lager_syslog README for configuration information.
* AMQP Support
Jon Brisbin has written a lager backend to send lager messages into AMQP, so
you can aggregate logs from a cluster into a central point. You can find it
under the [[https://github.com/jbrisbin/lager_amqp_backend][lager_amqp_backend]]
project on github.
* Loggly Support
The team at [[https://www.kivra.com][KIVRA]] has written a lager backend to send
lager messages into [[http://www.loggly.com][Loggly]]. You can find it
under the [[https://github.com/kivra/lager_loggly][lager_loggly]]
project on github.
* Tracing
Lager supports basic support for redirecting log messages based on log message
attributes. Lager automatically captures the pid, module, function and line at the
log message callsite. However, you can add any additional attributes you wish:
#+BEGIN_EXAMPLE
lager:warning([{request, RequestID},{vhost, Vhost}], "Permission denied to ~s", [User])
#+END_EXAMPLE
Then, in addition to the default trace attributes, you'll be able to trace
based on request or vhost:
#+BEGIN_EXAMPLE
lager:trace_file("logs/example.com.error", [{vhost, "example.com"}], error)
#+END_EXAMPLE
You can also omit the final argument, and the loglevel will default to
'debug'.
Tracing to the console is similar:
#+BEGIN_EXAMPLE
lager:trace_console([{request, 117}])
#+END_EXAMPLE
In the above example, the loglevel is omitted, but it can be specified as the
second argument if desired.
You can also specify multiple expressions in a filter, or use the '*' atom as
a wildcard to match any message that has that attribute, regardless of its
value.
Tracing to an existing logfile is also supported, if you wanted to log
warnings from a particular module to the default error.log:
#+BEGIN_EXAMPLE
lager:trace_file("log/error.log", [{module, mymodule}], warning)
#+END_EXAMPLE
To view the active log backends and traces, you can use the lager:status()
function. To clear all active traces, you can use lager:clear_all_traces().
To delete a specific trace, store a handle for the trace when you create it,
that you later pass to lager:stop_trace/1:
#+BEGIN_EXAMPLE
{ok, Trace} = lager:trace_file("log/error.log", [{module, mymodule}]),
...
lager:stop_trace(Trace)
#+END_EXAMPLE
Tracing to a pid is somewhat of a special case, since a pid is not a
data-type that serializes well. To trace by pid, use the pid as a string:
#+BEGIN_EXAMPLE
lager:trace_console([{pid, "<0.410.0>"}])
#+END_EXAMPLE
* Setting the truncation limit at compile-time
Lager defaults to truncating messages at 4096 bytes, you can alter this by
using the {lager_truncation_size, X} option. In rebar, you can add it to
erl_opts:
#+BEGIN_EXAMPLE
{erl_opts, [{parse_transform, lager_transform}, {lager_truncation_size, 1024}]}.
#+END_EXAMPLE
You can also pass it to erlc, if you prefer:
#+BEGIN_EXAMPLE
erlc -pa lager/ebin +'{parse_transform, lager_transform}' +'{lager_truncation_size, 1024}' file.erl
#+END_EXAMPLE

+ 12
- 12
include/lager.hrl 查看文件

@ -16,19 +16,20 @@
-define(DEFAULT_TRUNCATION, 4096).
-define(DEFAULT_TRACER, lager_default_tracer).
-define(LEVELS,
[debug, info, notice, warning, error, critical, alert, emergency, none]).
-define(DEBUG, 7).
-define(INFO, 6).
-define(NOTICE, 5).
-define(WARNING, 4).
-define(ERROR, 3).
-define(CRITICAL, 2).
-define(ALERT, 1).
-define(EMERGENCY, 0).
-define(LOG_NONE, -1).
-define(DEBUG, 128).
-define(INFO, 64).
-define(NOTICE, 32).
-define(WARNING, 16).
-define(ERROR, 8).
-define(CRITICAL, 4).
-define(ALERT, 2).
-define(EMERGENCY, 1).
-define(LOG_NONE, 0).
-define(LEVEL2NUM(Level),
case Level of
@ -55,11 +56,10 @@
end).
-define(SHOULD_LOG(Level),
lager_util:level_to_num(Level) =< element(1, lager_mochiglobal:get(loglevel, {?LOG_NONE, []}))).
(lager_util:level_to_num(Level) band element(1, lager_config:get(loglevel, {?LOG_NONE, []}))) /= 0).
-define(NOTIFY(Level, Pid, Format, Args),
gen_event:notify(lager_event, {log, lager_msg:new(io_lib:format(Format, Args),
lager_util:format_time(),
Level,
[{pid,Pid},{line,?LINE},{file,?FILE},{module,?MODULE}],
[])}
@ -84,7 +84,7 @@
%% from a gen_event handler
spawn(fun() ->
case catch(gen_event:which_handlers(lager_event)) of
X when X == []; X == {'EXIT', noproc} ->
X when X == []; X == {'EXIT', noproc}; X == [lager_backend_throttle] ->
%% there's no handlers yet or lager isn't running, try again
%% in half a second.
timer:sleep(500),

+ 5
- 1
rebar.config 查看文件

@ -1,5 +1,9 @@
{erl_opts, [debug_info]}.
{erl_first_files, ["src/lager_util.erl"]}.
{deps, [
{goldrush, ".*",
{git, "git://github.com/DeadZen/goldrush.git", {tag, "879c69874a"}}}
]}.
{cover_enabled, true}.
{edoc_opts, [{stylesheet_file, "./priv/edoc.css"}]}.
{edoc_opts, [{stylesheet_file, "./priv/edoc.css"}]}.

+ 151
- 37
src/error_logger_lager_h.erl 查看文件

@ -27,12 +27,24 @@
-behaviour(gen_event).
-export([set_high_water/1]).
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
-export([format_reason/1]).
-define(LOG(Level, Pid, Msg),
-record(state, {
%% how many messages per second we try to deliver
hwm = undefined :: 'undefined' | pos_integer(),
%% how many messages we've received this second
mps = 0 :: non_neg_integer(),
%% the current second
lasttime = os:timestamp() :: erlang:timestamp(),
%% count of dropped messages this second
dropped = 0 :: non_neg_integer()
}).
-define(LOGMSG(Level, Pid, Msg),
case ?SHOULD_LOG(Level) of
true ->
_ =lager:log(Level, Pid, Msg),
@ -40,7 +52,7 @@
_ -> ok
end).
-define(LOG(Level, Pid, Fmt, Args),
-define(LOGFMT(Level, Pid, Fmt, Args),
case ?SHOULD_LOG(Level) of
true ->
_ = lager:log(Level, Pid, Fmt, Args),
@ -49,6 +61,7 @@
end).
-ifdef(TEST).
-compile(export_all).
%% Make CRASH synchronous when testing, to avoid timing headaches
-define(CRASH_LOG(Event),
catch(gen_server:call(lager_crash_log, {log, Event}))).
@ -57,14 +70,77 @@
gen_server:cast(lager_crash_log, {log, Event})).
-endif.
-spec init(any()) -> {ok, {}}.
init(_) ->
{ok, {}}.
set_high_water(N) ->
gen_event:call(error_logger, ?MODULE, {set_high_water, N}, infinity).
-spec init(any()) -> {ok, #state{}}.
init([HighWaterMark]) ->
{ok, #state{hwm=HighWaterMark}}.
handle_call({set_high_water, N}, State) ->
{ok, ok, State#state{hwm = N}};
handle_call(_Request, State) ->
{ok, ok, State}.
{ok, unknown_call, State}.
handle_event(Event, State) ->
case check_hwm(State) of
{true, NewState} ->
log_event(Event, NewState);
{false, NewState} ->
{ok, NewState}
end.
handle_info(_Info, State) ->
{ok, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% internal functions
check_hwm(State = #state{hwm = undefined}) ->
{true, State};
check_hwm(State = #state{mps = Mps, hwm = Hwm}) when Mps < Hwm ->
%% haven't hit high water mark yet, just log it
{true, State#state{mps=Mps+1}};
check_hwm(State = #state{hwm = Hwm, lasttime = Last, dropped = Drop}) ->
%% are we still in the same second?
{M, S, _} = Now = os:timestamp(),
case Last of
{M, S, _} ->
%% still in same second, but have exceeded the high water mark
NewDrops = discard_messages(Now, 0),
{false, State#state{dropped=Drop+NewDrops}};
_ ->
%% different second, reset all counters and allow it
case Drop > 0 of
true ->
?LOGFMT(warning, self(), "lager_error_logger_h dropped ~p messages in the last second that exceeded the limit of ~p messages/sec",
[Drop, Hwm]);
false ->
ok
end,
{true, State#state{dropped = 0, mps=1, lasttime = Now}}
end.
discard_messages(Second, Count) ->
{M, S, _} = os:timestamp(),
case Second of
{M, S, _} ->
receive
_Msg ->
discard_messages(Second, Count+1)
after 0 ->
Count
end;
_ ->
Count
end.
log_event(Event, State) ->
case Event of
{error, _GL, {Pid, Fmt, Args}} ->
case Fmt of
@ -72,88 +148,105 @@ handle_event(Event, State) ->
%% gen_server terminate
[Name, _Msg, _State, Reason] = Args,
?CRASH_LOG(Event),
?LOG(error, Pid, "gen_server ~w terminated with reason: ~s",
?LOGFMT(error, Pid, "gen_server ~w terminated with reason: ~s",
[Name, format_reason(Reason)]);
"** State machine "++_ ->
%% gen_fsm terminate
[Name, _Msg, StateName, _StateData, Reason] = Args,
?CRASH_LOG(Event),
?LOG(error, Pid, "gen_fsm ~w in state ~w terminated with reason: ~s",
?LOGFMT(error, Pid, "gen_fsm ~w in state ~w terminated with reason: ~s",
[Name, StateName, format_reason(Reason)]);
"** gen_event handler"++_ ->
%% gen_event handler terminate
[ID, Name, _Msg, _State, Reason] = Args,
?CRASH_LOG(Event),
?LOG(error, Pid, "gen_event ~w installed in ~w terminated with reason: ~s",
?LOGFMT(error, Pid, "gen_event ~w installed in ~w terminated with reason: ~s",
[ID, Name, format_reason(Reason)]);
"** Cowboy handler"++_ ->
%% Cowboy HTTP server error
?CRASH_LOG(Event),
case Args of
[Module, Function, Arity, _Request, _State] ->
%% we only get the 5-element list when its a non-exported function
?LOGFMT(error, Pid,
"Cowboy handler ~p terminated with reason: call to undefined function ~p:~p/~p",
[Module, Module, Function, Arity]);
[Module, Function, Arity, _Class, Reason | Tail] ->
%% any other cowboy error_format list *always* ends with the stacktrace
StackTrace = lists:last(Tail),
?LOGFMT(error, Pid,
"Cowboy handler ~p terminated in ~p:~p/~p with reason: ~s",
[Module, Module, Function, Arity, format_reason({Reason, StackTrace})])
end;
"webmachine error"++_ ->
%% Webmachine HTTP server error
?CRASH_LOG(Event),
[Path, Error] = Args,
%% webmachine likes to mangle the stack, for some reason
StackTrace = case Error of
{error, {error, Reason, Stack}} ->
{Reason, Stack};
_ ->
Error
end,
?LOGFMT(error, Pid, "Webmachine error at path ~p : ~s", [Path, format_reason(StackTrace)]);
_ ->
?CRASH_LOG(Event),
?LOG(error, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION))
?LOGMSG(error, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION))
end;
{error_report, _GL, {Pid, std_error, D}} ->
?CRASH_LOG(Event),
?LOG(error, Pid, print_silly_list(D));
?LOGMSG(error, Pid, print_silly_list(D));
{error_report, _GL, {Pid, supervisor_report, D}} ->
?CRASH_LOG(Event),
case lists:sort(D) of
[{errorContext, Ctx}, {offender, Off}, {reason, Reason}, {supervisor, Name}] ->
Offender = format_offender(Off),
?LOG(error, Pid,
?LOGFMT(error, Pid,
"Supervisor ~w had child ~s exit with reason ~s in context ~w",
[element(2, Name), Offender, format_reason(Reason), Ctx]);
[supervisor_name(Name), Offender, format_reason(Reason), Ctx]);
_ ->
?LOG(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D))
?LOGMSG(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D))
end;
{error_report, _GL, {Pid, crash_report, [Self, Neighbours]}} ->
?CRASH_LOG(Event),
?LOG(error, Pid, "CRASH REPORT " ++ format_crash_report(Self, Neighbours));
?LOGMSG(error, Pid, "CRASH REPORT " ++ format_crash_report(Self, Neighbours));
{warning_msg, _GL, {Pid, Fmt, Args}} ->
?LOG(warning, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION));
?LOGMSG(warning, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION));
{warning_report, _GL, {Pid, std_warning, Report}} ->
?LOG(warning, Pid, print_silly_list(Report));
?LOGMSG(warning, Pid, print_silly_list(Report));
{info_msg, _GL, {Pid, Fmt, Args}} ->
?LOG(info, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION));
?LOGMSG(info, Pid, lager:safe_format(Fmt, Args, ?DEFAULT_TRUNCATION));
{info_report, _GL, {Pid, std_info, D}} when is_list(D) ->
Details = lists:sort(D),
case Details of
[{application, App}, {exited, Reason}, {type, _Type}] ->
?LOG(info, Pid, "Application ~w exited with reason: ~s",
?LOGFMT(info, Pid, "Application ~w exited with reason: ~s",
[App, format_reason(Reason)]);
_ ->
?LOG(info, Pid, print_silly_list(D))
?LOGMSG(info, Pid, print_silly_list(D))
end;
{info_report, _GL, {Pid, std_info, D}} ->
?LOG(info, Pid, "~w", [D]);
?LOGFMT(info, Pid, "~w", [D]);
{info_report, _GL, {P, progress, D}} ->
Details = lists:sort(D),
case Details of
[{application, App}, {started_at, Node}] ->
?LOG(info, P, "Application ~w started on node ~w",
?LOGFMT(info, P, "Application ~w started on node ~w",
[App, Node]);
[{started, Started}, {supervisor, Name}] ->
MFA = format_mfa(proplists:get_value(mfargs, Started)),
Pid = proplists:get_value(pid, Started),
?LOG(debug, P, "Supervisor ~w started ~s at pid ~w",
[element(2, Name), MFA, Pid]);
?LOGFMT(debug, P, "Supervisor ~w started ~s at pid ~w",
[supervisor_name(Name), MFA, Pid]);
_ ->
?LOG(info, P, "PROGRESS REPORT " ++ print_silly_list(D))
?LOGMSG(info, P, "PROGRESS REPORT " ++ print_silly_list(D))
end;
_ ->
?LOG(warning, self(), "Unexpected error_logger event ~w", [Event])
?LOGFMT(warning, self(), "Unexpected error_logger event ~w", [Event])
end,
{ok, State}.
handle_info(_Info, State) ->
{ok, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% internal functions
format_crash_report(Report, Neighbours) ->
Name = case proplists:get_value(registered_name, Report, []) of
[] ->
@ -311,3 +404,24 @@ print_silly_list([H|T], Fmt, Acc) ->
print_val(Val) ->
{Str, _} = lager_trunc_io:print(Val, 500),
Str.
supervisor_name({local, Name}) -> Name;
supervisor_name(Name) -> Name.
-ifdef(TEST).
%% Not intended to be a fully paranoid EUnit test....
t0() ->
application:stop(lager),
application:stop(sasl),
application:start(sasl),
application:start(lager),
set_high_water(5),
[error_logger:warning_msg("Foo ~p!", [X]) || X <- lists:seq(1,10)],
timer:sleep(1000),
[error_logger:warning_msg("Bar ~p!", [X]) || X <- lists:seq(1,10)],
timer:sleep(1000),
error_logger:warning_msg("Baz!"),
ok.
-endif. % TEST

+ 29
- 9
src/lager.app.src 查看文件

@ -3,25 +3,38 @@
{application, lager,
[
{description, "Erlang logging framework"},
{vsn, "1.2.1"},
{vsn, "2.0.0"},
{modules, []},
{applications, [
kernel,
stdlib,
compiler,
syntax_tools
stdlib
]},
{registered, []},
{registered, [lager_sup, lager_event, lager_crash_log, lager_handler_watcher_sup]},
{included_applications, [goldrush]},
{mod, {lager_app, []}},
{env, [
%% What handlers to install with what arguments
{handlers, [
{lager_console_backend, info},
{lager_file_backend, [
{"log/error.log", error, 10485760, "$D0", 5},
{"log/console.log", info, 10485760, "$D0", 5}
]}
{file, "log/error.log"}, {level, error}, {size, 10485760}, {date, "$D0"}, {count, 5}]},
{lager_file_backend, [
{file, "log/console.log"}, {level, info}, {size, 10485760}, {date, "$D0"}, {count, 5}]}
]},
%% What colors to use with what log levels
{colored, false},
{colors, [
{debug, "\e[0;38m" },
{info, "\e[1;37m" },
{notice, "\e[1;36m" },
{warning, "\e[1;33m" },
{error, "\e[1;31m" },
{critical, "\e[1;35m" },
{alert, "\e[1;44m" },
{emergency, "\e[1;41m" }
]},
%% Whether to write a crash log, and where. Undefined means no crash logger.
{crash_log, "log/crash.log"},
%% Maximum size in bytes of events in the crash log - defaults to 65536
@ -36,6 +49,13 @@
%% current one - default is 0
{crash_log_count, 5},
%% Whether to redirect error_logger messages into lager - defaults to true
{error_logger_redirect, true}
{error_logger_redirect, true},
%% How many messages per second to allow from error_logger before we start dropping them
{error_logger_hwm, 50},
%% How big the gen_event mailbox can get before it is switched into sync mode
{async_threshold, 20},
%% Switch back to async mode, when gen_event mailbox size decrease from `async_threshold'
%% to async_threshold - async_threshold_window
{async_threshold_window, 5}
]}
]}.

+ 183
- 60
src/lager.erl 查看文件

@ -19,16 +19,19 @@
-module(lager).
-include("lager.hrl").
-include_lib("eunit/include/eunit.hrl").
-define(LAGER_MD_KEY, '__lager_metadata').
%% API
-export([start/0,
log/3, log/4,
trace_file/2, trace_file/3, trace_console/1, trace_console/2,
clear_all_traces/0, stop_trace/1, status/0,
md/0, md/1,
trace/2, trace/3, trace_file/2, trace_file/3, trace_console/1, trace_console/2,
clear_all_traces/0, stop_trace/1, status/0,
get_loglevel/1, set_loglevel/2, set_loglevel/3, get_loglevels/0,
minimum_loglevel/1, posix_error/1,
safe_format/3, safe_format_chop/3,dispatch_log/5]).
update_loglevel_config/0, posix_error/1,
safe_format/3, safe_format_chop/3, dispatch_log/5, dispatch_log/9,
do_log/9, pr/2]).
-type log_level() :: debug | info | notice | warning | error | critical | alert | emergency.
-type log_level_number() :: 0..7.
@ -52,35 +55,77 @@ start_ok(App, {error, {not_started, Dep}}) ->
start_ok(App, {error, Reason}) ->
erlang:error({app_start_failed, App, Reason}).
%% @doc Get lager metadata for current process
-spec md() -> [{atom(), any()}].
md() ->
case erlang:get(?LAGER_MD_KEY) of
undefined -> [];
MD -> MD
end.
%% @doc Set lager metadata for current process.
%% Will badarg if you don't supply a list of {key, value} tuples keyed by atoms.
-spec md([{atom(), any()},...]) -> ok.
md(NewMD) when is_list(NewMD) ->
%% make sure its actually a real proplist
case lists:all(
fun({Key, _Value}) when is_atom(Key) -> true;
(_) -> false
end, NewMD) of
true ->
erlang:put(?LAGER_MD_KEY, NewMD),
ok;
false ->
erlang:error(badarg)
end;
md(_) ->
erlang:error(badarg).
-spec dispatch_log(log_level(), list(), string(), list() | none, pos_integer()) -> ok | {error, lager_not_running}.
%% this is the same check that the parse transform bakes into the module at compile time
dispatch_log(Severity, Metadata, Format, Args, Size) when is_atom(Severity)->
case whereis(lager_event) of
undefined ->
%% lager isn't running
SeverityAsInt=lager_util:level_to_num(Severity),
case {whereis(lager_event), lager_config:get(loglevel, {?LOG_NONE, []})} of
{undefined, _} ->
{error, lager_not_running};
Pid ->
{LevelThreshold,TraceFilters} = lager_mochiglobal:get(loglevel,{?LOG_NONE,[]}),
SeverityAsInt=lager_util:level_to_num(Severity),
Destinations = case TraceFilters of
[] -> [];
_ ->
lager_util:check_traces(Metadata,SeverityAsInt,TraceFilters,[])
{Pid, {Level, Traces}} when (Level band SeverityAsInt) /= 0 orelse Traces /= [] ->
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, Level, Traces, Pid);
_ ->
ok
end.
%% @private Should only be called externally from code generated from the parse transform
do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, TraceFilters, Pid) when is_atom(Severity) ->
Destinations = case TraceFilters of
[] ->
[];
_ ->
lager_util:check_traces(Metadata,SeverityAsInt,TraceFilters,[])
end,
case (LevelThreshold band SeverityAsInt) /= 0 orelse Destinations /= [] of
true ->
Msg = case Args of
A when is_list(A) ->
safe_format_chop(Format,Args,Size);
_ ->
Format
end,
case (LevelThreshold >= SeverityAsInt orelse Destinations =/= []) of
true ->
Timestamp = lager_util:format_time(),
Msg=case Args of
A when is_list(A) ->safe_format_chop(Format,Args,Size);
_ -> Format
end,
gen_event:sync_notify(Pid, {log, lager_msg:new(Msg, Timestamp,
Severity, Metadata, Destinations)});
_ ->
ok
end
LagerMsg = lager_msg:new(Msg,
Severity, Metadata, Destinations),
case lager_config:get(async, false) of
true ->
gen_event:notify(Pid, {log, LagerMsg});
false ->
gen_event:sync_notify(Pid, {log, LagerMsg})
end;
false ->
ok
end.
%% backwards compatible with beams compiled with lager 1.x
dispatch_log(Severity, _Module, _Function, _Line, _Pid, Metadata, Format, Args, Size) ->
dispatch_log(Severity, Metadata, Format, Args, Size).
%% @doc Manually log a message into lager without using the parse transform.
-spec log(log_level(), pid() | atom() | [tuple(),...], list()) -> ok | {error, lager_not_running}.
log(Level, Pid, Message) when is_pid(Pid); is_atom(Pid) ->
@ -114,14 +159,7 @@ trace_file(File, Filter, Level) ->
end,
case Res of
{ok, _} ->
%% install the trace.
{MinLevel, Traces} = lager_mochiglobal:get(loglevel),
case lists:member(Trace, Traces) of
false ->
lager_mochiglobal:put(loglevel, {MinLevel, [Trace|Traces]});
_ ->
ok
end,
add_trace_to_loglevel_config(Trace),
{ok, Trace};
{error, _} = E ->
E
@ -130,28 +168,32 @@ trace_file(File, Filter, Level) ->
Error
end.
trace_console(Filter) ->
trace_console(Filter, debug).
trace_console(Filter, Level) ->
Trace0 = {Filter, Level, lager_console_backend},
trace(lager_console_backend, Filter, Level).
trace(Backend, Filter) ->
trace(Backend, Filter, debug).
trace(Backend, Filter, Level) ->
Trace0 = {Filter, Level, Backend},
case lager_util:validate_trace(Trace0) of
{ok, Trace} ->
{MinLevel, Traces} = lager_mochiglobal:get(loglevel),
case lists:member(Trace, Traces) of
false ->
lager_mochiglobal:put(loglevel, {MinLevel, [Trace|Traces]});
_ -> ok
end,
add_trace_to_loglevel_config(Trace),
{ok, Trace};
Error ->
Error
end.
stop_trace({_Filter, _Level, Target} = Trace) ->
{MinLevel, Traces} = lager_mochiglobal:get(loglevel),
{Level, Traces} = lager_config:get(loglevel),
NewTraces = lists:delete(Trace, Traces),
lager_mochiglobal:put(loglevel, {MinLevel, NewTraces}),
lager_util:trace_filter([ element(1, T) || T <- NewTraces ]),
%MinLevel = minimum_loglevel(get_loglevels() ++ get_trace_levels(NewTraces)),
lager_config:set(loglevel, {Level, NewTraces}),
case get_loglevel(Target) of
none ->
%% check no other traces point here
@ -167,8 +209,9 @@ stop_trace({_Filter, _Level, Target} = Trace) ->
ok.
clear_all_traces() ->
{MinLevel, _Traces} = lager_mochiglobal:get(loglevel),
lager_mochiglobal:put(loglevel, {MinLevel, []}),
{Level, _Traces} = lager_config:get(loglevel),
lager_util:trace_filter(none),
lager_config:set(loglevel, {Level, []}),
lists:foreach(fun(Handler) ->
case get_loglevel(Handler) of
none ->
@ -180,6 +223,10 @@ clear_all_traces() ->
status() ->
Handlers = gen_event:which_handlers(lager_event),
TraceCount = case length(element(2, lager_config:get(loglevel))) of
0 -> 1;
N -> N
end,
Status = ["Lager status:\n",
[begin
Level = get_loglevel(Handler),
@ -194,35 +241,58 @@ status() ->
end || Handler <- Handlers],
"Active Traces:\n",
[begin
LevelName = case Level of
{mask, Mask} ->
case lager_util:mask_to_levels(Mask) of
[] -> none;
Levels -> hd(Levels)
end;
Num ->
lager_util:num_to_level(Num)
end,
io_lib:format("Tracing messages matching ~p at level ~p to ~p\n",
[Filter, lager_util:num_to_level(Level), Destination])
end || {Filter, Level, Destination} <- element(2, lager_mochiglobal:get(loglevel))]],
[Filter, LevelName, Destination])
end || {Filter, Level, Destination} <- element(2, lager_config:get(loglevel))],
[
"Tracing Reductions:\n",
case ?DEFAULT_TRACER:info('query') of
{null, false} -> "";
Query -> io_lib:format("~p~n", [Query])
end
],
[
"Tracing Statistics:\n ",
[ begin
[" ", atom_to_list(Table), ": ",
integer_to_list(?DEFAULT_TRACER:info(Table) div TraceCount),
"\n"]
end || Table <- [input, output, filter] ]
]],
io:put_chars(Status).
%% @doc Set the loglevel for a particular backend.
set_loglevel(Handler, Level) when is_atom(Level) ->
Reply = gen_event:call(lager_event, Handler, {set_loglevel, Level}, infinity),
%% recalculate min log level
MinLog = minimum_loglevel(get_loglevels()),
{_, Traces} = lager_mochiglobal:get(loglevel),
lager_mochiglobal:put(loglevel, {MinLog, Traces}),
update_loglevel_config(),
Reply.
%% @doc Set the loglevel for a particular backend that has multiple identifiers
%% (eg. the file backend).
set_loglevel(Handler, Ident, Level) when is_atom(Level) ->
io:format("handler: ~p~n", [{Handler, Ident}]),
Reply = gen_event:call(lager_event, {Handler, Ident}, {set_loglevel, Level}, infinity),
%% recalculate min log level
MinLog = minimum_loglevel(get_loglevels()),
{_, Traces} = lager_mochiglobal:get(loglevel),
lager_mochiglobal:put(loglevel, {MinLog, Traces}),
update_loglevel_config(),
Reply.
%% @doc Get the loglevel for a particular backend. In the case that the backend
%% has multiple identifiers, the lowest is returned
get_loglevel(Handler) ->
case gen_event:call(lager_event, Handler, get_loglevel, infinity) of
{mask, Mask} ->
case lager_util:mask_to_levels(Mask) of
[] -> none;
Levels -> hd(Levels)
end;
X when is_integer(X) ->
lager_util:num_to_level(X);
Y -> Y
@ -244,10 +314,33 @@ get_loglevels() ->
Handler <- gen_event:which_handlers(lager_event)].
%% @private
minimum_loglevel([]) ->
-1; %% lower than any log level, logging off
add_trace_to_loglevel_config(Trace) ->
{MinLevel, Traces} = lager_config:get(loglevel),
case lists:member(Trace, Traces) of
false ->
NewTraces = [Trace|Traces],
lager_util:trace_filter([ element(1, T) || T <- NewTraces]),
lager_config:set(loglevel, {MinLevel, [Trace|Traces]});
_ ->
ok
end.
%% @doc recalculate min log level
update_loglevel_config() ->
{_, Traces} = lager_config:get(loglevel),
MinLog = minimum_loglevel(get_loglevels()),
lager_config:set(loglevel, {MinLog, Traces}).
%% @private
minimum_loglevel(Levels) ->
erlang:hd(lists:reverse(lists:sort(Levels))).
lists:foldl(fun({mask, Mask}, Acc) ->
Mask bor Acc;
(Level, Acc) when is_integer(Level) ->
{mask, Mask} = lager_util:config_to_mask(lager_util:num_to_level(Level)),
Mask bor Acc;
(_, Acc) ->
Acc
end, 0, Levels).
%% @doc Print the format string `Fmt' with `Args' safely with a size
%% limit of `Limit'. If the format string is invalid, or not enough
@ -266,3 +359,33 @@ safe_format(Fmt, Args, Limit, Options) ->
%% @private
safe_format_chop(Fmt, Args, Limit) ->
safe_format(Fmt, Args, Limit, [{chomp, true}]).
%% @doc Print a record lager found during parse transform
pr(Record, Module) when is_tuple(Record), is_atom(element(1, Record)) ->
try Module:module_info(attributes) of
Attrs ->
case lists:keyfind(lager_records, 1, Attrs) of
false ->
Record;
{lager_records, Records} ->
RecordName = element(1, Record),
RecordSize = tuple_size(Record) - 1,
case lists:filter(fun({Name, Fields}) when Name == RecordName,
length(Fields) == RecordSize ->
true;
(_) ->
false
end, Records) of
[] ->
Record;
[{RecordName, RecordFields}|_] ->
{'$lager_record', RecordName,
lists:zip(RecordFields, tl(tuple_to_list(Record)))}
end
end
catch
error:undef ->
Record
end;
pr(Record, _) ->
Record.

+ 140
- 29
src/lager_app.erl 查看文件

@ -33,9 +33,35 @@ start() ->
application:start(lager).
start(_StartType, _StartArgs) ->
%% until lager is completely started, allow all messages to go through
lager_mochiglobal:put(loglevel, {?DEBUG, []}),
{ok, Pid} = lager_sup:start_link(),
case application:get_env(lager, async_threshold) of
undefined ->
ok;
{ok, undefined} ->
undefined;
{ok, Threshold} when is_integer(Threshold), Threshold >= 0 ->
DefWindow = erlang:trunc(Threshold * 0.2), % maybe 0?
ThresholdWindow =
case application:get_env(lager, async_threshold_window) of
undefined ->
DefWindow;
{ok, Window} when is_integer(Window), Window < Threshold, Window >= 0 ->
Window;
{ok, BadWindow} ->
error_logger:error_msg(
"Invalid value for 'async_threshold_window': ~p~n", [BadWindow]),
throw({error, bad_config})
end,
_ = supervisor:start_child(lager_handler_watcher_sup,
[lager_event, lager_backend_throttle, [Threshold, ThresholdWindow]]),
ok;
{ok, BadThreshold} ->
error_logger:error_msg("Invalid value for 'async_threshold': ~p~n", [BadThreshold]),
throw({error, bad_config})
end,
Handlers = case application:get_env(lager, handlers) of
undefined ->
[{lager_console_backend, info},
@ -49,25 +75,46 @@ start(_StartType, _StartArgs) ->
_ = [supervisor:start_child(lager_handler_watcher_sup, [lager_event, Module, Config]) ||
{Module, Config} <- expand_handlers(Handlers)],
%% mask the messages we have no use for
MinLog = lager:minimum_loglevel(lager:get_loglevels()),
{_, Traces} = lager_mochiglobal:get(loglevel),
lager_mochiglobal:put(loglevel, {MinLog, Traces}),
ok = add_configured_traces(),
SavedHandlers = case application:get_env(lager, error_logger_redirect) of
{ok, false} ->
[];
_ ->
case supervisor:start_child(lager_handler_watcher_sup, [error_logger, error_logger_lager_h, []]) of
{ok, _} ->
%% Should we allow user to whitelist handlers to not be removed?
[begin error_logger:delete_report_handler(X), X end ||
X <- gen_event:which_handlers(error_logger) -- [error_logger_lager_h]];
{error, _} ->
[]
end
%% mask the messages we have no use for
lager:update_loglevel_config(),
HighWaterMark = case application:get_env(lager, error_logger_hwm) of
{ok, undefined} ->
undefined;
{ok, HwmVal} when is_integer(HwmVal), HwmVal > 0 ->
HwmVal;
{ok, BadVal} ->
_ = lager:log(warning, self(), "Invalid error_logger high water mark: ~p, disabling", [BadVal]),
undefined;
undefined ->
undefined
end,
SavedHandlers =
case application:get_env(lager, error_logger_redirect) of
{ok, false} ->
[];
_ ->
WhiteList = case application:get_env(lager, error_logger_whitelist) of
undefined ->
[];
{ok, WhiteList0} ->
WhiteList0
end,
case supervisor:start_child(lager_handler_watcher_sup, [error_logger, error_logger_lager_h, [HighWaterMark]]) of
{ok, _} ->
[begin error_logger:delete_report_handler(X), X end ||
X <- gen_event:which_handlers(error_logger) -- [error_logger_lager_h | WhiteList]];
{error, _} ->
[]
end
end,
lager_util:trace_filter(none),
{ok, Pid, SavedHandlers}.
@ -78,24 +125,49 @@ stop(Handlers) ->
expand_handlers([]) ->
[];
expand_handlers([{lager_file_backend, [{Key, _Value}|_]=Config}|T]) when is_atom(Key) ->
%% this is definitely a new-style config, no expansion needed
[maybe_make_handler_id(lager_file_backend, Config) | expand_handlers(T)];
expand_handlers([{lager_file_backend, Configs}|T]) ->
[ to_config(Config) || Config <- Configs] ++
?INT_LOG(notice, "Deprecated lager_file_backend config detected, please consider updating it", []),
[ {lager_file_backend:config_to_id(Config), Config} || Config <- Configs] ++
expand_handlers(T);
expand_handlers([{Mod, Config}|T]) when is_atom(Mod) ->
[maybe_make_handler_id(Mod, Config) | expand_handlers(T)];
expand_handlers([H|T]) ->
[H | expand_handlers(T)].
to_config({Name,Severity}) ->
{{lager_file_backend, Name}, {Name, Severity}};
to_config({Name,_Severity,_Size,_Rotation,_Count}=Config) ->
{{lager_file_backend, Name}, Config};
to_config([{Name,_Severity,_Size,_Rotation,_Count}, _Format] = Config) ->
{{lager_file_backend, Name}, Config}.
add_configured_traces() ->
Traces = case application:get_env(lager, traces) of
undefined ->
[];
{ok, TraceVal} ->
TraceVal
end,
lists:foreach(fun({Handler, Filter, Level}) ->
{ok, _} = lager:trace(Handler, Filter, Level)
end,
Traces),
ok.
maybe_make_handler_id(Mod, Config) ->
%% Allow the backend to generate a gen_event handler id, if it wants to.
%% We don't use erlang:function_exported here because that requires the module
%% already be loaded, which is unlikely at this phase of startup. Using code:load
%% caused undesireable side-effects with generating code-coverage reports.
try Mod:config_to_id(Config) of
Id ->
{Id, Config}
catch
error:undef ->
{Mod, Config}
end.
-ifdef(TEST).
application_config_mangling_test_() ->
[{"Explode the file backend handlers",
[
{"Explode the file backend handlers",
?_assertMatch(
[{lager_console_backend, info},
{{lager_file_backend,"error.log"},{"error.log",error,10485760, "$D0",5}},
@ -106,7 +178,21 @@ application_config_mangling_test_() ->
{"error.log", error, 10485760, "$D0", 5},
{"console.log", info, 10485760, "$D0", 5}
]}]
))},
))
},
{"Explode the short form of backend file handlers",
?_assertMatch(
[{lager_console_backend, info},
{{lager_file_backend,"error.log"},{"error.log",error}},
{{lager_file_backend,"console.log"},{"console.log",info}}
],
expand_handlers([{lager_console_backend, info},
{lager_file_backend, [
{"error.log", error},
{"console.log", info}
]}]
))
},
{"Explode with formatter info",
?_assertMatch(
[{{lager_file_backend,"test.log"}, [{"test.log", debug, 10485760, "$D0", 5},{lager_default_formatter,["[",severity,"] ", message, "\n"]}]},
@ -117,5 +203,30 @@ application_config_mangling_test_() ->
]
}])
)
}].
},
{"Explode short form with short formatter info",
?_assertMatch(
[{{lager_file_backend,"test.log"}, [{"test.log", debug},{lager_default_formatter,["[",severity,"] ", message, "\n"]}]},
{{lager_file_backend,"test2.log"}, [{"test2.log",debug},{lager_default_formatter}]}],
expand_handlers([{lager_file_backend, [
[{"test.log", debug},{lager_default_formatter,["[",severity,"] ", message, "\n"]}],
[{"test2.log",debug},{lager_default_formatter}]
]
}])
)
},
{"New form needs no expansion",
?_assertMatch([
{{lager_file_backend,"test.log"}, [{file, "test.log"}]},
{{lager_file_backend,"test2.log"}, [{file, "test2.log"}, {level, info}, {sync_on, none}]},
{{lager_file_backend,"test3.log"}, [{formatter, lager_default_formatter}, {file, "test3.log"}]}
],
expand_handlers([
{lager_file_backend, [{file, "test.log"}]},
{lager_file_backend, [{file, "test2.log"}, {level, info}, {sync_on, none}]},
{lager_file_backend, [{formatter, lager_default_formatter},{file, "test3.log"}]}
])
)
}
].
-endif.

+ 78
- 0
src/lager_backend_throttle.erl 查看文件

@ -0,0 +1,78 @@
%% Copyright (c) 2011-2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%% @doc A simple gen_event backend used to monitor mailbox size and
%% switch log messages between synchronous and asynchronous modes.
%% A gen_event handler is used because a process getting its own mailbox
%% size doesn't involve getting a lock, and gen_event handlers run in their
%% parent's process.
-module(lager_backend_throttle).
-include("lager.hrl").
-behaviour(gen_event).
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
-record(state, {
hwm,
window_min,
async = true
}).
init([Hwm, Window]) ->
lager_config:set(async, true),
{ok, #state{hwm=Hwm, window_min=Hwm - Window}}.
handle_call(get_loglevel, State) ->
{ok, {mask, ?LOG_NONE}, State};
handle_call({set_loglevel, _Level}, State) ->
{ok, ok, State};
handle_call(_Request, State) ->
{ok, ok, State}.
handle_event({log, _Message},State) ->
{message_queue_len, Len} = erlang:process_info(self(), message_queue_len),
case {Len > State#state.hwm, Len < State#state.window_min, State#state.async} of
{true, _, true} ->
%% need to flip to sync mode
lager_config:set(async, false),
{ok, State#state{async=false}};
{_, true, false} ->
%% need to flip to async mode
lager_config:set(async, true),
{ok, State#state{async=true}};
_ ->
%% nothing needs to change
{ok, State}
end;
handle_event(_Event, State) ->
{ok, State}.
handle_info(_Info, State) ->
{ok, State}.
%% @private
terminate(_Reason, _State) ->
ok.
%% @private
code_change(_OldVsn, State, _Extra) ->
{ok, State}.

+ 64
- 0
src/lager_config.erl 查看文件

@ -0,0 +1,64 @@
%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%% @doc Helper functions for working with lager's runtime config
-module(lager_config).
-include("lager.hrl").
-export([new/0, get/1, get/2, set/2]).
-define(TBL, lager_config).
new() ->
%% set up the ETS configuration table
_ = try ets:new(?TBL, [named_table, public, set, {keypos, 1}, {read_concurrency, true}]) of
_Result ->
ok
catch
error:badarg ->
?INT_LOG(warning, "Table ~p already exists", [?TBL])
end,
%% use insert_new here so that if we're in an appup we don't mess anything up
%%
%% until lager is completely started, allow all messages to go through
ets:insert_new(?TBL, {loglevel, {element(2, lager_util:config_to_mask(debug)), []}}),
ok.
get(Key) ->
case ets:lookup(?TBL, Key) of
[] ->
undefined;
[{Key, Res}] ->
Res
end.
get(Key, Default) ->
try ?MODULE:get(Key) of
undefined ->
Default;
Res ->
Res
catch
_:_ ->
Default
end.
set(Key, Value) ->
ets:insert(?TBL, {Key, Value}).

+ 175
- 34
src/lager_console_backend.erl 查看文件

@ -24,7 +24,7 @@
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
-record(state, {level, formatter,format_config}).
-record(state, {level, formatter,format_config,colors=[]}).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@ -32,35 +32,56 @@
-endif.
-include("lager.hrl").
-define(TERSE_FORMAT,[time, " [", severity,"] ", message, "\r\n"]).
-define(TERSE_FORMAT,[time, " ", color, "[", severity,"] ", message]).
%% @private
init(Level) when is_atom(Level) ->
init([Level,{lager_default_formatter,?TERSE_FORMAT}]);
init([Level, true]) -> % for backwards compatibility
init([Level,{lager_default_formatter,[{eol, "\r\n"}]}]);
init([Level,{lager_default_formatter,[{eol, eol()}]}]);
init([Level,false]) -> % for backwards compatibility
init([Level,{lager_default_formatter,?TERSE_FORMAT}]);
init([Level,{Formatter,FormatterConfig}]) when is_atom(Level), is_atom(Formatter)->
case lists:member(Level, ?LEVELS) of
true ->
{ok, #state{level=lager_util:level_to_num(Level),
init([Level,{lager_default_formatter,?TERSE_FORMAT ++ [eol()]}]);
init([Level,{Formatter,FormatterConfig}]) when is_atom(Formatter) ->
Colors = case application:get_env(lager, colored) of
{ok, true} ->
{ok, LagerColors} = application:get_env(lager, colors),
LagerColors;
_ -> []
end,
try {is_new_style_console_available(), lager_util:config_to_mask(Level)} of
{false, _} ->
Msg = "Lager's console backend is incompatible with the 'old' shell, not enabling it",
%% be as noisy as possible, log to every possible place
try
alarm_handler:set_alarm({?MODULE, "WARNING: " ++ Msg})
catch
_:_ ->
error_logger:warning_msg(Msg ++ "~n")
end,
io:format("WARNING: " ++ Msg ++ "~n"),
?INT_LOG(warning, Msg, []),
{error, {fatal, old_shell}};
{true, Levels} ->
{ok, #state{level=Levels,
formatter=Formatter,
format_config=FormatterConfig}};
_ ->
{error, bad_log_level}
end.
format_config=FormatterConfig,
colors=Colors}}
catch
_:_ ->
{error, {fatal, bad_log_level}}
end;
init(Level) ->
init([Level,{lager_default_formatter,?TERSE_FORMAT ++ [eol()]}]).
%% @private
handle_call(get_loglevel, #state{level=Level} = State) ->
{ok, Level, State};
handle_call({set_loglevel, Level}, State) ->
case lists:member(Level, ?LEVELS) of
true ->
{ok, ok, State#state{level=lager_util:level_to_num(Level)}};
_ ->
try lager_util:config_to_mask(Level) of
Levels ->
{ok, ok, State#state{level=Levels}}
catch
_:_ ->
{ok, {error, bad_log_level}, State}
end;
handle_call(_Request, State) ->
@ -68,10 +89,10 @@ handle_call(_Request, State) ->
%% @private
handle_event({log, Message},
#state{level=L,formatter=Formatter,format_config=FormatConfig} = State) ->
#state{level=L,formatter=Formatter,format_config=FormatConfig,colors=Colors} = State) ->
case lager_util:is_loggable(Message, L, ?MODULE) of
true ->
io:put_chars(user, Formatter:format(Message,FormatConfig)),
io:put_chars(user, Formatter:format(Message,FormatConfig,Colors)),
{ok, State};
false ->
{ok, State}
@ -91,6 +112,34 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
eol() ->
case application:get_env(lager, colored) of
{ok, true} ->
"\e[0m\r\n";
_ ->
"\r\n"
end.
-ifdef(TEST).
is_new_style_console_available() ->
true.
-else.
is_new_style_console_available() ->
%% Criteria:
%% 1. If the user has specified '-noshell' on the command line,
%% then we will pretend that the new-style console is available.
%% If there is no shell at all, then we don't have to worry
%% about log events being blocked by the old-style shell.
%% 2. Windows doesn't support the new shell, so all windows users
%% have is the oldshell.
%% 3. If the user_drv process iss registered, all is OK.
%% 'user_drv' is a registered proc name used by the "new"
%% console driver.
init:get_argument(noshell) /= error orelse
element(1, os:type()) == win32 orelse
is_pid(whereis(user_drv)).
-endif.
-ifdef(TEST).
console_log_test_() ->
%% tiny recursive fun that pretends to be a group leader
@ -116,8 +165,6 @@ console_log_test_() ->
application:load(lager),
application:set_env(lager, handlers, []),
application:set_env(lager, error_logger_redirect, false),
application:start(compiler),
application:start(syntax_tools),
application:start(lager),
whereis(user)
end,
@ -131,16 +178,17 @@ console_log_test_() ->
{"regular console logging",
fun() ->
Pid = spawn(F(self())),
gen_event:add_handler(lager_event, lager_console_backend, info),
unregister(user),
register(user, Pid),
erlang:group_leader(Pid, whereis(lager_event)),
lager_mochiglobal:put(loglevel, {?INFO, []}),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager:log(info, self(), "Test message"),
receive
{io_request, From, ReplyAs, {put_chars, unicode, Msg}} ->
From ! {io_reply, ReplyAs, ok},
?assertMatch([_, "[info]", "Test message\r\n"], re:split(Msg, " ", [{return, list}, {parts, 3}]))
TestMsg = "Test message" ++ eol(),
?assertMatch([_, "[info]", TestMsg], re:split(Msg, " ", [{return, list}, {parts, 3}]))
after
500 ->
?assert(false)
@ -154,13 +202,38 @@ console_log_test_() ->
register(user, Pid),
erlang:group_leader(Pid, whereis(lager_event)),
gen_event:add_handler(lager_event, lager_console_backend, [info, true]),
lager_mochiglobal:put(loglevel, {?INFO, []}),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager:info("Test message"),
PidStr = pid_to_list(self()),
receive
{io_request, _, _, {put_chars, unicode, Msg}} ->
TestMsg = "Test message" ++ eol(),
?assertMatch([_, _, "[info]", PidStr, _, TestMsg], re:split(Msg, "[ @]", [{return, list}, {parts, 6}]))
after
500 ->
?assert(false)
end
end
},
{"custom format console logging",
fun() ->
Pid = spawn(F(self())),
unregister(user),
register(user, Pid),
erlang:group_leader(Pid, whereis(lager_event)),
gen_event:add_handler(lager_event, lager_console_backend,
[info, {lager_default_formatter, [date,"#",time,"#",severity,"#",node,"#",pid,"#",
module,"#",function,"#",file,"#",line,"#",message,"\r\n"]}]),
lager_config:set(loglevel, {?INFO, []}),
lager:info("Test message"),
PidStr = pid_to_list(self()),
NodeStr = atom_to_list(node()),
ModuleStr = atom_to_list(?MODULE),
receive
{io_request, _, _, {put_chars, unicode, Msg}} ->
?assertMatch([_, _, "[info]", PidStr, _,"Test message\r\n"], re:split(Msg, "[ @]", [{return, list}, {parts, 6}]))
TestMsg = "Test message" ++ eol(),
?assertMatch([_, _, "info", NodeStr, PidStr, ModuleStr, _, _, _, TestMsg],
re:split(Msg, "#", [{return, list}, {parts, 10}]))
after
500 ->
?assert(false)
@ -174,7 +247,7 @@ console_log_test_() ->
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
erlang:group_leader(Pid, whereis(lager_event)),
lager_mochiglobal:put(loglevel, {?INFO, []}),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager:debug("Test message"),
receive
{io_request, From, ReplyAs, {put_chars, unicode, _Msg}} ->
@ -189,7 +262,8 @@ console_log_test_() ->
receive
{io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} ->
From1 ! {io_reply, ReplyAs1, ok},
?assertMatch([_, "[debug]", "Test message\r\n"], re:split(Msg1, " ", [{return, list}, {parts, 3}]))
TestMsg = "Test message" ++ eol(),
?assertMatch([_, "[debug]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}]))
after
500 ->
?assert(false)
@ -202,7 +276,7 @@ console_log_test_() ->
unregister(user),
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_mochiglobal:put(loglevel, {?INFO, []}),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
erlang:group_leader(Pid, whereis(lager_event)),
lager:debug("Test message"),
receive
@ -218,7 +292,8 @@ console_log_test_() ->
receive
{io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} ->
From1 ! {io_reply, ReplyAs1, ok},
?assertMatch([_, "[error]", "Test message\r\n"], re:split(Msg1, " ", [{return, list}, {parts, 3}]))
TestMsg = "Test message" ++ eol(),
?assertMatch([_, "[error]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}]))
after
1000 ->
?assert(false)
@ -233,8 +308,69 @@ console_log_test_() ->
?assert(true)
end
end
},
{"blacklisting a loglevel works",
fun() ->
Pid = spawn(F(self())),
unregister(user),
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager:set_loglevel(lager_console_backend, '!=info'),
erlang:group_leader(Pid, whereis(lager_event)),
lager:debug("Test message"),
receive
{io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} ->
From1 ! {io_reply, ReplyAs1, ok},
TestMsg = "Test message" ++ eol(),
?assertMatch([_, "[debug]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}]))
after
1000 ->
?assert(false)
end,
%% info is blacklisted
lager:info("Test message"),
receive
{io_request, From2, ReplyAs2, {put_chars, unicode, _Msg2}} ->
From2 ! {io_reply, ReplyAs2, ok},
?assert(false)
after
500 ->
?assert(true)
end
end
},
{"whitelisting a loglevel works",
fun() ->
Pid = spawn(F(self())),
unregister(user),
register(user, Pid),
gen_event:add_handler(lager_event, lager_console_backend, info),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(info)), []}),
lager:set_loglevel(lager_console_backend, '=debug'),
erlang:group_leader(Pid, whereis(lager_event)),
lager:debug("Test message"),
receive
{io_request, From1, ReplyAs1, {put_chars, unicode, Msg1}} ->
From1 ! {io_reply, ReplyAs1, ok},
TestMsg = "Test message" ++ eol(),
?assertMatch([_, "[debug]", TestMsg], re:split(Msg1, " ", [{return, list}, {parts, 3}]))
after
1000 ->
?assert(false)
end,
%% info is blacklisted
lager:error("Test message"),
receive
{io_request, From2, ReplyAs2, {put_chars, unicode, _Msg2}} ->
From2 ! {io_reply, ReplyAs2, ok},
?assert(false)
after
500 ->
?assert(true)
end
end
}
]
}.
@ -256,7 +392,12 @@ set_loglevel_test_() ->
fun() ->
?assertEqual(info, lager:get_loglevel(lager_console_backend)),
lager:set_loglevel(lager_console_backend, debug),
?assertEqual(debug, lager:get_loglevel(lager_console_backend))
?assertEqual(debug, lager:get_loglevel(lager_console_backend)),
lager:set_loglevel(lager_console_backend, '!=debug'),
?assertEqual(info, lager:get_loglevel(lager_console_backend)),
lager:set_loglevel(lager_console_backend, '!=info'),
?assertEqual(debug, lager:get_loglevel(lager_console_backend)),
ok
end
},
{"Get/set invalid loglevel test",

+ 1
- 3
src/lager_crash_log.erl 查看文件

@ -207,7 +207,7 @@ do_log({log, Event}, #state{name=Name, fd=FD, inode=Inode, flap=Flap,
Time = [Date, " ", TS," =", ReportStr, "====\n"],
NodeSuffix = other_node_suffix(Pid),
Msg = io_lib:format("~s~s~s", [Time, MsgStr, NodeSuffix]),
case file:write(NewFD, Msg) of
case file:write(NewFD, unicode:characters_to_binary(Msg)) of
{error, Reason} when Flap == false ->
?INT_LOG(error, "Failed to write log message to file ~s: ~s",
[Name, file:format_error(Reason)]),
@ -241,8 +241,6 @@ filesystem_test_() ->
application:set_env(lager, handlers, [{lager_test_backend, info}]),
application:set_env(lager, error_logger_redirect, true),
application:unset_env(lager, crash_log),
application:start(compiler),
application:start(syntax_tools),
application:start(lager),
timer:sleep(100),
lager_test_backend:flush()

+ 40
- 20
src/lager_default_formatter.erl 查看文件

@ -27,7 +27,7 @@
%%
%% Exported Functions
%%
-export([format/2]).
-export([format/2, format/3]).
%%
%% API Functions
@ -53,30 +53,36 @@
%%
%% `[{pid, ["My pid is ", pid], "Unknown Pid"}]' -> if pid is in the metada print "My pid is ?.?.?", otherwise print "Unknown Pid"
%% @end
-spec format(lager_msg:lager_msg(),list()) -> any().
format(Msg,[]) ->
format(Msg, [{eol, "\n"}]);
format(Msg,[{eol, EOL}]) ->
-spec format(lager_msg:lager_msg(),list(),list()) -> any().
format(Msg,[], Colors) ->
format(Msg, [{eol, "\n"}], Colors);
format(Msg,[{eol, EOL}], Colors) ->
format(Msg,
[date, " ", time, " [", severity, "] ",
[date, " ", time, " ", color, "[", severity, "] ",
{pid, ""},
{module, [
{pid, ["@"], ""},
module,
{function, [":", function], ""},
{line, [":",line], ""}], ""},
" ", message, EOL]);
format(Message,Config) ->
[ output(V,Message) || V <- Config ].
" ", message, EOL], Colors);
format(Message,Config,Colors) ->
[ case V of
color -> output_color(Message,Colors);
_ -> output(V,Message)
end || V <- Config ].
-spec format(lager_msg:lager_msg(),list()) -> any().
format(Msg, Config) ->
format(Msg, Config, []).
-spec output(term(),lager_msg:lager_msg()) -> iolist().
output(message,Msg) -> lager_msg:message(Msg);
output(date,Msg) ->
{D, _T} = lager_msg:timestamp(Msg),
{D, _T} = lager_msg:datetime(Msg),
D;
output(time,Msg) ->
{_D, T} = lager_msg:timestamp(Msg),
{_D, T} = lager_msg:datetime(Msg),
T;
output(node,Msg) ->
atom_to_list(node());
@ -99,6 +105,14 @@ output({Prop, Present, Absent}, Msg) when is_atom(Prop) ->
end;
output(Other,_) -> make_printable(Other).
output_color(_Msg,[]) -> [];
output_color(Msg,Colors) ->
Level = lager_msg:severity(Msg),
case lists:keyfind(Level, 1, Colors) of
{_, Color} -> Color;
_ -> []
end.
-spec make_printable(any()) -> iolist().
make_printable(A) when is_atom(A) -> atom_to_list(A);
make_printable(P) when is_pid(P) -> pid_to_list(P);
@ -117,11 +131,17 @@ get_metadata(Key, Metadata, Default) ->
end.
-ifdef(TEST).
date_time_now() ->
Now = os:timestamp(),
{Date, Time} = lager_util:format_time(lager_util:maybe_utc(lager_util:localtime_ms(Now))),
{Date, Time, Now}.
basic_test_() ->
{Date, Time, Now} = date_time_now(),
[{"Default formatting test",
?_assertEqual(iolist_to_binary([<<"Day Time [error] ">>, pid_to_list(self()), <<" Message\n">>]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] ", pid_to_list(self()), " Message\n"]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
@ -130,16 +150,16 @@ basic_test_() ->
{"Basic Formatting",
?_assertEqual(<<"Simplist Format">>,
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
["Simplist Format"])))
},
{"Default equivalent formatting test",
?_assertEqual(iolist_to_binary([<<"Day Time [error] ">>, pid_to_list(self()), <<" Message\n";>>;]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] ", pid_to_list(self()), " Message\n"]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
@ -147,9 +167,9 @@ basic_test_() ->
)))
},
{"Non existant metadata can default to string",
?_assertEqual(iolist_to_binary([<<"Day Time [error] Fallback Message\n">>;]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] Fallback Message\n";]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
@ -157,9 +177,9 @@ basic_test_() ->
)))
},
{"Non existant metadata can default to other metadata",
?_assertEqual(iolist_to_binary([<<"Day Time [error] Fallback Message\n">>;]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] Fallback Message\n";]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, "Fallback"}],
[]),

+ 497
- 129
src/lager_file_backend.erl 查看文件

@ -17,14 +17,16 @@
%% @doc File backend for lager, with multiple file support.
%% Multiple files are supported, each with the path and the loglevel being
%% configurable. The configuration paramter for this backend is a list of
%% 5-tuples of the form
%% `{FileName, Level, RotationSize, RotationDate, RotationCount}'.
%% key-value 2-tuples. See the init() function for the available options.
%% This backend supports external and internal log
%% rotation and will re-open handles to files if the inode changes. It will
%% also rotate the files itself if the size of the file exceeds the
%% `RotationSize' and keep `RotationCount' rotated files. `RotationDate' is
%% `size' and keep `count' rotated files. `date' is
%% an alternate rotation trigger, based on time. See the README for
%% documentation.
%% For performance, the file backend does delayed writes, although it will
%% sync at specific log levels, configured via the `sync_on' option. By default
%% the error level or above will trigger a sync.
-module(lager_file_backend).
@ -41,9 +43,20 @@
-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
code_change/3]).
-export([config_to_id/1]).
-define(DEFAULT_LOG_LEVEL, info).
-define(DEFAULT_ROTATION_SIZE, 10485760). %% 10mb
-define(DEFAULT_ROTATION_DATE, "$D0"). %% midnight
-define(DEFAULT_ROTATION_COUNT, 5).
-define(DEFAULT_SYNC_LEVEL, error).
-define(DEFAULT_SYNC_INTERVAL, 1000).
-define(DEFAULT_SYNC_SIZE, 1024*64). %% 64kb
-define(DEFAULT_CHECK_INTERVAL, 1000).
-record(state, {
name :: string(),
level :: integer(),
level :: {'mask', integer()},
fd :: file:io_device(),
inode :: integer(),
flap=false :: boolean(),
@ -51,40 +64,69 @@
date,
count = 10,
formatter,
formatter_config
formatter_config,
sync_on,
check_interval = ?DEFAULT_CHECK_INTERVAL,
sync_interval = ?DEFAULT_SYNC_INTERVAL,
sync_size = ?DEFAULT_SYNC_SIZE,
last_check = os:timestamp()
}).
%% @private
-spec init([{string(), lager:log_level()},...]) -> {ok, #state{}}.
-type option() :: {file, string()} | {level, lager:log_level()} |
{size, non_neg_integer()} | {date, string()} |
{count, non_neg_integer()} | {sync_interval, non_neg_integer()} |
{sync_size, non_neg_integer()} | {sync_on, lager:log_level()} |
{check_interval, non_neg_integer()} | {formatter, atom()} |
{formatter_config, term()}.
-spec init([option(),...]) -> {ok, #state{}} | {error, bad_config}.
init({FileName, LogLevel}) when is_list(FileName), is_atom(LogLevel) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}]);
init({FileName, LogLevel, Size, Date, Count}) when is_list(FileName), is_atom(LogLevel) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}, {size, Size}, {date, Date}, {count, Count}]);
init([{FileName, LogLevel, Size, Date, Count}, {Formatter,FormatterConfig}]) when is_list(FileName), is_atom(LogLevel), is_atom(Formatter) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}, {size, Size}, {date, Date}, {count, Count}, {formatter, Formatter}, {formatter_config, FormatterConfig}]);
init([LogFile,{Formatter}]) ->
%% backwards compatability hack
init([LogFile,{Formatter,[]}]);
init([LogFile,{Formatter,FormatterConfig}]) ->
case validate_logfile(LogFile) of
{Name, Level, Size, Date, Count} ->
init([{FileName, LogLevel}, {Formatter,FormatterConfig}]) when is_list(FileName), is_atom(LogLevel), is_atom(Formatter) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}, {formatter, Formatter}, {formatter_config, FormatterConfig}]);
init(LogFileConfig) when is_list(LogFileConfig) ->
case validate_logfile_proplist(LogFileConfig) of
false ->
%% falied to validate config
{error, {fatal, bad_config}};
Config ->
%% probabably a better way to do this, but whatever
[Name, Level, Date, Size, Count, SyncInterval, SyncSize, SyncOn, CheckInterval, Formatter, FormatterConfig] =
[proplists:get_value(Key, Config) || Key <- [file, level, date, size, count, sync_interval, sync_size, sync_on, check_interval, formatter, formatter_config]],
schedule_rotation(Name, Date),
State = case lager_util:open_logfile(Name, true) of
State0 = #state{name=Name, level=Level, size=Size, date=Date, count=Count, formatter=Formatter,
formatter_config=FormatterConfig, sync_on=SyncOn, sync_interval=SyncInterval, sync_size=SyncSize,
check_interval=CheckInterval},
State = case lager_util:open_logfile(Name, {SyncSize, SyncInterval}) of
{ok, {FD, Inode, _}} ->
#state{name=Name, level=lager_util:level_to_num(Level),
fd=FD, inode=Inode, size=Size, date=Date, count=Count, formatter=Formatter, formatter_config=FormatterConfig};
State0#state{fd=FD, inode=Inode};
{error, Reason} ->
?INT_LOG(error, "Failed to open log file ~s with error ~s",
[Name, file:format_error(Reason)]),
#state{name=Name, level=lager_util:level_to_num(Level),
flap=true, size=Size, date=Date, count=Count, formatter=Formatter, formatter_config=FormatterConfig}
?INT_LOG(error, "Failed to open log file ~s with error ~s", [Name, file:format_error(Reason)]),
State0#state{flap=true}
end,
{ok, State};
false ->
ignore
end;
init(LogFile) ->
init([LogFile,{lager_default_formatter,[]}]).
{ok, State}
end.
%% @private
handle_call({set_loglevel, Level}, #state{name=Ident} = State) ->
?INT_LOG(notice, "Changed loglevel of ~s to ~p", [Ident, Level]),
{ok, ok, State#state{level=lager_util:level_to_num(Level)}};
case validate_loglevel(Level) of
false ->
{ok, {error, bad_loglevel}, State};
Levels ->
?INT_LOG(notice, "Changed loglevel of ~s to ~p", [Ident, Level]),
{ok, ok, State#state{level=Levels}}
end;
handle_call(get_loglevel, #state{level=Level} = State) ->
{ok, Level, State};
handle_call(_Request, State) ->
@ -95,7 +137,7 @@ handle_event({log, Message},
#state{name=Name, level=L,formatter=Formatter,formatter_config=FormatConfig} = State) ->
case lager_util:is_loggable(Message,L,{lager_file_backend, Name}) of
true ->
{ok,write(State, lager_msg:severity_as_int(Message), Formatter:format(Message,FormatConfig)) };
{ok,write(State, lager_msg:timestamp(Message), lager_msg:severity_as_int(Message), Formatter:format(Message,FormatConfig)) };
false ->
{ok, State}
end;
@ -121,85 +163,199 @@ terminate(_Reason, #state{fd=FD}) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% @private convert the config into a gen_event handler ID
config_to_id({Name,_Severity}) when is_list(Name) ->
{?MODULE, Name};
config_to_id({Name,_Severity,_Size,_Rotation,_Count}) ->
{?MODULE, Name};
config_to_id([{Name,_Severity,_Size,_Rotation,_Count}, _Format]) ->
{?MODULE, Name};
config_to_id([{Name,_Severity}, _Format]) when is_list(Name) ->
{?MODULE, Name};
config_to_id(Config) ->
case proplists:get_value(file, Config) of
undefined ->
erlang:error(no_file);
File ->
{?MODULE, File}
end.
write(#state{name=Name, fd=FD, inode=Inode, flap=Flap, size=RotSize,
count=Count} = State, Level, Msg) ->
case lager_util:ensure_logfile(Name, FD, Inode, true) of
{ok, {_, _, Size}} when RotSize /= 0, Size > RotSize ->
lager_util:rotate_logfile(Name, Count),
write(State, Level, Msg);
{ok, {NewFD, NewInode, _}} ->
%% delayed_write doesn't report errors
_ = file:write(NewFD, Msg),
case Level of
_ when Level =< ?ERROR ->
%% force a sync on any message at error severity or above
Flap2 = case file:datasync(NewFD) of
{error, Reason2} when Flap == false ->
?INT_LOG(error, "Failed to write log message to file ~s: ~s",
[Name, file:format_error(Reason2)]),
true;
count=Count} = State, Timestamp, Level, Msg) ->
LastCheck = timer:now_diff(Timestamp, State#state.last_check) div 1000,
case LastCheck >= State#state.check_interval orelse FD == undefined of
true ->
%% need to check for rotation
case lager_util:ensure_logfile(Name, FD, Inode, {State#state.sync_size, State#state.sync_interval}) of
{ok, {_, _, Size}} when RotSize /= 0, Size > RotSize ->
case lager_util:rotate_logfile(Name, Count) of
ok ->
false;
%% go around the loop again, we'll do another rotation check and hit the next clause of ensure_logfile
write(State, Timestamp, Level, Msg);
{error, Reason} ->
case Flap of
true ->
State;
_ ->
?INT_LOG(error, "Failed to rotate log file ~s with error ~s", [Name, file:format_error(Reason)]),
State#state{flap=true}
end
end;
{ok, {NewFD, NewInode, _}} ->
%% update our last check and try again
do_write(State#state{last_check=Timestamp, fd=NewFD, inode=NewInode}, Level, Msg);
{error, Reason} ->
case Flap of
true ->
State;
_ ->
Flap
end,
State#state{fd=NewFD, inode=NewInode, flap=Flap2};
_ ->
State#state{fd=NewFD, inode=NewInode}
?INT_LOG(error, "Failed to reopen log file ~s with error ~s", [Name, file:format_error(Reason)]),
State#state{flap=true}
end
end;
{error, Reason} ->
case Flap of
true ->
State;
false ->
do_write(State, Level, Msg)
end.
do_write(#state{fd=FD, name=Name, flap=Flap} = State, Level, Msg) ->
%% delayed_write doesn't report errors
_ = file:write(FD, unicode:characters_to_binary(Msg)),
{mask, SyncLevel} = State#state.sync_on,
case (Level band SyncLevel) /= 0 of
true ->
%% force a sync on any message that matches the 'sync_on' bitmask
Flap2 = case file:datasync(FD) of
{error, Reason2} when Flap == false ->
?INT_LOG(error, "Failed to write log message to file ~s: ~s",
[Name, file:format_error(Reason2)]),
true;
ok ->
false;
_ ->
?INT_LOG(error, "Failed to reopen log file ~s with error ~s",
[Name, file:format_error(Reason)]),
State#state{flap=true}
Flap
end,
State#state{flap=Flap2};
_ ->
State
end.
validate_loglevel(Level) ->
try lager_util:config_to_mask(Level) of
Levels ->
Levels
catch
_:_ ->
false
end.
validate_logfile_proplist(List) ->
try validate_logfile_proplist(List, []) of
Res ->
case proplists:get_value(file, Res) of
undefined ->
?INT_LOG(error, "Missing required file option", []),
false;
_File ->
%% merge with the default options
{ok, DefaultRotationDate} = lager_util:parse_rotation_date_spec(?DEFAULT_ROTATION_DATE),
lists:keymerge(1, lists:sort(Res), lists:sort([
{level, validate_loglevel(?DEFAULT_LOG_LEVEL)}, {date, DefaultRotationDate},
{size, ?DEFAULT_ROTATION_SIZE}, {count, ?DEFAULT_ROTATION_COUNT},
{sync_on, validate_loglevel(?DEFAULT_SYNC_LEVEL)}, {sync_interval, ?DEFAULT_SYNC_INTERVAL},
{sync_size, ?DEFAULT_SYNC_SIZE}, {check_interval, ?DEFAULT_CHECK_INTERVAL},
{formatter, lager_default_formatter}, {formatter_config, []}
]))
end
catch
{bad_config, Msg, Value} ->
?INT_LOG(error, "~s ~p for file ~p",
[Msg, Value, proplists:get_value(file, List)]),
false
end.
validate_logfile({Name, Level}) ->
case lists:member(Level, ?LEVELS) of
true ->
{Name, Level, 0, undefined, 0};
validate_logfile_proplist([], Acc) ->
Acc;
validate_logfile_proplist([{file, File}|Tail], Acc) ->
%% is there any reasonable validation we can do here?
validate_logfile_proplist(Tail, [{file, File}|Acc]);
validate_logfile_proplist([{level, Level}|Tail], Acc) ->
case validate_loglevel(Level) of
false ->
throw({bad_config, "Invalid loglevel", Level});
Res ->
validate_logfile_proplist(Tail, [{level, Res}|Acc])
end;
validate_logfile_proplist([{size, Size}|Tail], Acc) ->
case Size of
S when is_integer(S), S >= 0 ->
validate_logfile_proplist(Tail, [{size, Size}|Acc]);
_ ->
?INT_LOG(error, "Invalid log level of ~p for ~s.",
[Level, Name]),
false
throw({bad_config, "Invalid rotation size", Size})
end;
validate_logfile({Name, Level, Size, Date, Count}) ->
ValidLevel = (lists:member(Level, ?LEVELS)),
ValidSize = (is_integer(Size) andalso Size >= 0),
ValidCount = (is_integer(Count) andalso Count >= 0),
case {ValidLevel, ValidSize, ValidCount} of
{false, _, _} ->
?INT_LOG(error, "Invalid log level of ~p for ~s.",
[Level, Name]),
false;
{_, false, _} ->
?INT_LOG(error, "Invalid rotation size of ~p for ~s.",
[Size, Name]),
false;
{_, _, false} ->
?INT_LOG(error, "Invalid rotation count of ~p for ~s.",
[Count, Name]),
false;
{true, true, true} ->
case lager_util:parse_rotation_date_spec(Date) of
{ok, Spec} ->
{Name, Level, Size, Spec, Count};
{error, _} when Date == "" ->
%% blank ones are fine.
{Name, Level, Size, undefined, Count};
{error, _} ->
?INT_LOG(error, "Invalid rotation date of ~p for ~s.",
[Date, Name]),
false
end
validate_logfile_proplist([{count, Count}|Tail], Acc) ->
case Count of
C when is_integer(C), C >= 0 ->
validate_logfile_proplist(Tail, [{count, Count}|Acc]);
_ ->
throw({bad_config, "Invalid rotation count", Count})
end;
validate_logfile_proplist([{date, Date}|Tail], Acc) ->
case lager_util:parse_rotation_date_spec(Date) of
{ok, Spec} ->
validate_logfile_proplist(Tail, [{date, Spec}|Acc]);
{error, _} when Date == "" ->
%% legacy config allowed blanks
validate_logfile_proplist(Tail, [{date, undefined}|Acc]);
{error, _} ->
throw({bad_config, "Invalid rotation date", Date})
end;
validate_logfile_proplist([{sync_interval, SyncInt}|Tail], Acc) ->
case SyncInt of
Val when is_integer(Val), Val >= 0 ->
validate_logfile_proplist(Tail, [{sync_interval, Val}|Acc]);
_ ->
throw({bad_config, "Invalid sync interval", SyncInt})
end;
validate_logfile_proplist([{sync_size, SyncSize}|Tail], Acc) ->
case SyncSize of
Val when is_integer(Val), Val >= 0 ->
validate_logfile_proplist(Tail, [{sync_size, Val}|Acc]);
_ ->
throw({bad_config, "Invalid sync size", SyncSize})
end;
validate_logfile_proplist([{check_interval, CheckInt}|Tail], Acc) ->
case CheckInt of
Val when is_integer(Val), Val >= 0 ->
validate_logfile_proplist(Tail, [{check_interval, Val}|Acc]);
always ->
validate_logfile_proplist(Tail, [{check_interval, 0}|Acc]);
_ ->
throw({bad_config, "Invalid check interval", CheckInt})
end;
validate_logfile(H) ->
?INT_LOG(error, "Invalid log file config ~p.", [H]),
false.
validate_logfile_proplist([{sync_on, Level}|Tail], Acc) ->
case validate_loglevel(Level) of
false ->
throw({bad_config, "Invalid sync on level", Level});
Res ->
validate_logfile_proplist(Tail, [{sync_on, Res}|Acc])
end;
validate_logfile_proplist([{formatter, Fmt}|Tail], Acc) ->
case is_atom(Fmt) of
true ->
validate_logfile_proplist(Tail, [{formatter, Fmt}|Acc]);
false ->
throw({bad_config, "Invalid formatter module", Fmt})
end;
validate_logfile_proplist([{formatter_config, FmtCfg}|Tail], Acc) ->
case is_list(FmtCfg) of
true ->
validate_logfile_proplist(Tail, [{formatter_config, FmtCfg}|Acc]);
false ->
throw({bad_config, "Invalid formatter config", FmtCfg})
end;
validate_logfile_proplist([Other|_Tail], _Acc) ->
throw({bad_config, "Invalid option", Other}).
schedule_rotation(_, undefined) ->
ok;
@ -211,27 +367,97 @@ schedule_rotation(Name, Date) ->
get_loglevel_test() ->
{ok, Level, _} = handle_call(get_loglevel,
#state{name="bar", level=lager_util:level_to_num(info), fd=0, inode=0}),
?assertEqual(Level, lager_util:level_to_num(info)),
#state{name="bar", level=lager_util:config_to_mask(info), fd=0, inode=0}),
?assertEqual(Level, lager_util:config_to_mask(info)),
{ok, Level2, _} = handle_call(get_loglevel,
#state{name="foo", level=lager_util:level_to_num(warning), fd=0, inode=0}),
?assertEqual(Level2, lager_util:level_to_num(warning)).
rotation_test() ->
{ok, {FD, Inode, _}} = lager_util:open_logfile("test.log", true),
?assertMatch(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode},
write(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode}, 0, "hello world")),
file:delete("test.log"),
Result = write(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode}, 0, "hello world"),
%% assert file has changed
?assert(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode} =/= Result),
?assertMatch(#state{name="test.log", level=?DEBUG}, Result),
file:rename("test.log", "test.log.1"),
Result2 = write(Result, 0, "hello world"),
%% assert file has changed
?assert(Result =/= Result2),
?assertMatch(#state{name="test.log", level=?DEBUG}, Result2),
ok.
#state{name="foo", level=lager_util:config_to_mask(warning), fd=0, inode=0}),
?assertEqual(Level2, lager_util:config_to_mask(warning)).
rotation_test_() ->
{foreach,
fun() ->
[file:delete(F)||F <- filelib:wildcard("test.log*")],
SyncLevel = validate_loglevel(?DEFAULT_SYNC_LEVEL),
SyncSize = ?DEFAULT_SYNC_SIZE,
SyncInterval = ?DEFAULT_SYNC_INTERVAL,
CheckInterval = 0, %% hard to test delayed mode
#state{name="test.log", level=?DEBUG, sync_on=SyncLevel,
sync_size=SyncSize, sync_interval=SyncInterval, check_interval=CheckInterval}
end,
fun(_) ->
[file:delete(F)||F <- filelib:wildcard("test.log*")]
end,
[fun(DefaultState = #state{sync_size=SyncSize, sync_interval = SyncInterval}) ->
{"External rotation should work",
fun() ->
{ok, {FD, Inode, _}} = lager_util:open_logfile("test.log", {SyncSize, SyncInterval}),
State0 = DefaultState#state{fd=FD, inode=Inode},
?assertMatch(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode},
write(State0, os:timestamp(), ?DEBUG, "hello world")),
file:delete("test.log"),
Result = write(State0, os:timestamp(), ?DEBUG, "hello world"),
%% assert file has changed
?assert(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode} =/= Result),
?assertMatch(#state{name="test.log", level=?DEBUG}, Result),
file:rename("test.log", "test.log.1"),
Result2 = write(Result, os:timestamp(), ?DEBUG, "hello world"),
%% assert file has changed
?assert(Result =/= Result2),
?assertMatch(#state{name="test.log", level=?DEBUG}, Result2),
ok
end}
end,
fun(DefaultState = #state{sync_size=SyncSize, sync_interval = SyncInterval}) ->
{"Internal rotation and delayed write",
fun() ->
CheckInterval = 3000, % 3 sec
RotationSize = 15,
PreviousCheck = os:timestamp(),
{ok, {FD, Inode, _}} = lager_util:open_logfile("test.log", {SyncSize, SyncInterval}),
State0 = DefaultState#state{
fd=FD, inode=Inode, size=RotationSize,
check_interval=CheckInterval, last_check=PreviousCheck},
%% new message within check interval with sync_on level
Msg1Timestamp = add_secs(PreviousCheck, 1),
State0 = State1 = write(State0, Msg1Timestamp, ?ERROR, "big big message 1"),
%% new message within check interval under sync_on level
%% not written to disk yet
Msg2Timestamp = add_secs(PreviousCheck, 2),
State0 = State2 = write(State1, Msg2Timestamp, ?DEBUG, "buffered message 2"),
%% although file size is big enough...
{ok, FInfo} = file:read_file_info("test.log"),
?assert(RotationSize < FInfo#file_info.size),
%% ...no rotation yet
?assertEqual(PreviousCheck, State2#state.last_check),
?assertNot(filelib:is_regular("test.log.0")),
%% new message after check interval
Msg3Timestamp = add_secs(PreviousCheck, 4),
_State3 = write(State2, Msg3Timestamp, ?DEBUG, "message 3"),
%% rotation happened
?assert(filelib:is_regular("test.log.0")),
{ok, Bin1} = file:read_file("test.log.0"),
{ok, Bin2} = file:read_file("test.log"),
%% message 1-3 written to file
?assertEqual(<<"big big message 1buffered message 2">>, Bin1),
%% message 4 buffered, not yet written to file
?assertEqual(<<"">>, Bin2),
ok
end}
end
]}.
add_secs({Mega, Secs, Micro}, Add) ->
NewSecs = Secs + Add,
{Mega + NewSecs div 10000000, NewSecs rem 10000000, Micro}.
filesystem_test_() ->
{foreach,
@ -241,30 +467,51 @@ filesystem_test_() ->
application:load(lager),
application:set_env(lager, handlers, [{lager_test_backend, info}]),
application:set_env(lager, error_logger_redirect, false),
application:start(compiler),
application:start(syntax_tools),
application:set_env(lager, async_threshold, undefined),
application:start(lager)
end,
fun(_) ->
file:delete("test.log"),
file:delete("test.log.0"),
file:delete("test.log.1"),
application:stop(lager),
error_logger:tty(true)
end,
[
{"under normal circumstances, file should be opened",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{"test.log", info}, {lager_default_formatter}]),
lager:log(error, self(), "Test message"),
{ok, Bin} = file:read_file("test.log"),
Pid = pid_to_list(self()),
?assertMatch([_, _, "[error]", Pid, "Test message\n"], re:split(Bin, " ", [{return, list}, {parts, 5}]))
end
},
{"don't choke on unicode",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{"test.log", info}, {lager_default_formatter}]),
lager:log(error, self(),"~ts", [[20013,25991,27979,35797]]),
{ok, Bin} = file:read_file("test.log"),
Pid = pid_to_list(self()),
?assertMatch([_, _, "[error]", Pid, [228,184,173,230,150,135,230,181,139,232,175,149, $\n]], re:split(Bin, " ", [{return, list}, {parts, 5}]))
end
},
{"don't choke on latin-1",
fun() ->
%% XXX if this test fails, check that this file is encoded latin-1, not utf-8!
gen_event:add_handler(lager_event, lager_file_backend, [{"test.log", info}, {lager_default_formatter}]),
lager:log(error, self(),"~ts", ["LÆÝÎN-ï"]),
{ok, Bin} = file:read_file("test.log"),
Pid = pid_to_list(self()),
Res = re:split(Bin, " ", [{return, list}, {parts, 5}]),
?assertMatch([_, _, "[error]", Pid, [76,195,134,195,157,195,142,78,45,195,175,$\n]], Res)
end
},
{"file can't be opened on startup triggers an error message",
fun() ->
{ok, FInfo} = file:read_file_info("test.log"),
file:write_file_info("test.log", FInfo#file_info{mode = 0}),
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info, 10*1024*1024, "$D0", 5}),
?assertEqual(1, lager_test_backend:count()),
{_Level, _Time,Message,_Metadata} = lager_test_backend:pop(),
?assertEqual("Failed to open log file test.log with error permission denied", lists:flatten(Message))
@ -272,7 +519,7 @@ filesystem_test_() ->
},
{"file that becomes unavailable at runtime should trigger an error message",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 0}]),
?assertEqual(0, lager_test_backend:count()),
lager:log(error, self(), "Test message"),
?assertEqual(1, lager_test_backend:count()),
@ -293,7 +540,7 @@ filesystem_test_() ->
{ok, FInfo} = file:read_file_info("test.log"),
OldPerms = FInfo#file_info.mode,
file:write_file_info("test.log", FInfo#file_info{mode = 0}),
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"},{check_interval, 0}]),
?assertEqual(1, lager_test_backend:count()),
{_Level, _Time, Message,_Metadata} = lager_test_backend:pop(),
?assertEqual("Failed to open log file test.log with error permission denied", lists:flatten(Message)),
@ -306,7 +553,7 @@ filesystem_test_() ->
},
{"external logfile rotation/deletion should be handled",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 0}]),
?assertEqual(0, lager_test_backend:count()),
lager:log(error, self(), "Test message1"),
?assertEqual(1, lager_test_backend:count()),
@ -322,6 +569,68 @@ filesystem_test_() ->
?assertMatch([_, _, "[error]", Pid, "Test message3\n"], re:split(Bin2, " ", [{return, list}, {parts, 5}]))
end
},
{"internal size rotation should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 0}, {size, 10}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
?assert(filelib:is_regular("test.log.0"))
end
},
{"internal time rotation should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 1000}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
whereis(lager_event) ! {rotate, "test.log"},
lager:log(error, self(), "Test message1"),
?assert(filelib:is_regular("test.log.0"))
end
},
{"sync_on option should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {sync_on, "=info"}, {check_interval, 5000}, {sync_interval, 5000}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
lager:log(info, self(), "Test message1"),
{ok, Bin} = file:read_file("test.log"),
?assert(<<>> /= Bin)
end
},
{"sync_on none option should work (also tests sync_interval)",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {sync_on, "none"}, {check_interval, 5000}, {sync_interval, 1000}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
lager:log(info, self(), "Test message1"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
timer:sleep(1000),
{ok, Bin} = file:read_file("test.log"),
?assert(<<>> /= Bin)
end
},
{"sync_size option should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {sync_on, "none"}, {check_interval, 5001}, {sync_size, 640}, {sync_interval, 5001}]),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
%% now we've written enough bytes
lager:log(error, self(), "Test messageis64bytes"),
{ok, Bin} = file:read_file("test.log"),
?assert(<<>> /= Bin)
end
},
{"runtime level changes",
fun() ->
gen_event:add_handler(lager_event, {lager_file_backend, "test.log"}, {"test.log", info}),
@ -341,7 +650,7 @@ filesystem_test_() ->
},
{"invalid runtime level changes",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{"test.log", info, 10*1024*1024, "$D0", 5}, {lager_default_formatter}]),
gen_event:add_handler(lager_event, lager_file_backend, {"test3.log", info}),
?assertEqual({error, bad_module}, lager:set_loglevel(lager_file_backend, "test.log", warning))
end
@ -352,8 +661,8 @@ filesystem_test_() ->
{"test.log", critical}),
lager:error("Test message"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
{Level, _} = lager_mochiglobal:get(loglevel),
lager_mochiglobal:put(loglevel, {Level, [{[{module,
{Level, _} = lager_config:get(loglevel),
lager_config:set(loglevel, {Level, [{[{module,
?MODULE}], ?DEBUG,
{lager_file_backend, "test.log"}}]}),
lager:error("Test message"),
@ -365,13 +674,13 @@ filesystem_test_() ->
{"tracing should not duplicate messages",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend,
{"test.log", critical}),
[{file, "test.log"}, {level, critical}, {check_interval, always}]),
lager:critical("Test message"),
{ok, Bin1} = file:read_file("test.log"),
?assertMatch([_, _, "[critical]", _, "Test message\n"], re:split(Bin1, " ", [{return, list}, {parts, 5}])),
ok = file:delete("test.log"),
{Level, _} = lager_mochiglobal:get(loglevel),
lager_mochiglobal:put(loglevel, {Level, [{[{module,
{Level, _} = lager_config:get(loglevel),
lager_config:set(loglevel, {Level, [{[{module,
?MODULE}], ?DEBUG,
{lager_file_backend, "test.log"}}]}),
lager:critical("Test message"),
@ -388,6 +697,8 @@ filesystem_test_() ->
file:delete("foo.log"),
{ok, _} = lager:trace_file("foo.log", [{module, ?MODULE}]),
lager:error("Test message"),
%% not elegible for trace
lager:log(error, self(), "Test message"),
{ok, Bin3} = file:read_file("foo.log"),
?assertMatch([_, _, "[error]", _, "Test message\n"], re:split(Bin3, " ", [{return, list}, {parts, 5}]))
end
@ -423,5 +734,62 @@ formatting_test_() ->
}
]}.
config_validation_test_() ->
[
{"missing file",
?_assertEqual(false,
validate_logfile_proplist([{level, info},{size, 10}]))
},
{"bad level",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {level, blah},{size, 10}]))
},
{"bad size",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {size, infinity}]))
},
{"bad count",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {count, infinity}]))
},
{"bad date",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {date, "midnight"}]))
},
{"blank date is ok",
?_assertMatch([_|_],
validate_logfile_proplist([{file, "test.log"}, {date, ""}]))
},
{"bad sync_interval",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {sync_interval, infinity}]))
},
{"bad sync_size",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {sync_size, infinity}]))
},
{"bad check_interval",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {check_interval, infinity}]))
},
{"bad sync_on level",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {sync_on, infinity}]))
},
{"bad formatter module",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {formatter, "io:format"}]))
},
{"bad formatter config",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {formatter_config, blah}]))
},
{"unknown option",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {rhubarb, spicy}]))
}
].
-endif.

+ 25
- 12
src/lager_format.erl 查看文件

@ -29,18 +29,31 @@
format(FmtStr, Args, MaxLen) ->
format(FmtStr, Args, MaxLen, []).
format(FmtStr, Args, MaxLen, Opts) ->
Options = make_options(Opts, #options{}),
Cs = collect(FmtStr, Args),
{Cs2, MaxLen2} = build(Cs, [], MaxLen, Options),
%% count how many terms remain
{Count, StrLen} = lists:foldl(
fun({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
{Terms + 1, Chars};
(_, {Terms, Chars}) ->
{Terms, Chars + 1}
end, {0, 0}, Cs2),
build2(Cs2, Count, MaxLen2 - StrLen).
format([], [], _, _) ->
"";
format(FmtStr, Args, MaxLen, Opts) when is_atom(FmtStr) ->
format(atom_to_list(FmtStr), Args, MaxLen, Opts);
format(FmtStr, Args, MaxLen, Opts) when is_binary(FmtStr) ->
format(binary_to_list(FmtStr), Args, MaxLen, Opts);
format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) ->
case lager_stdlib:string_p(FmtStr) of
true ->
Options = make_options(Opts, #options{}),
Cs = collect(FmtStr, Args),
{Cs2, MaxLen2} = build(Cs, [], MaxLen, Options),
%% count how many terms remain
{Count, StrLen} = lists:foldl(
fun({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
{Terms + 1, Chars};
(_, {Terms, Chars}) ->
{Terms, Chars + 1}
end, {0, 0}, Cs2),
build2(Cs2, Count, MaxLen2 - StrLen);
false ->
erlang:error(badarg)
end;
format(_FmtStr, _Args, _MaxLen, _Opts) ->
erlang:error(badarg).
collect([$~|Fmt0], Args0) ->
{C,Fmt1,Args1} = collect_cseq(Fmt0, Args0),

+ 17
- 10
src/lager_handler_watcher.erl 查看文件

@ -23,6 +23,8 @@
-behaviour(gen_server).
-include("lager.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
@ -73,6 +75,8 @@ handle_info({gen_event_EXIT, Module, Reason}, #state{module=Module,
handle_info(reinstall_handler, #state{module=Module, config=Config, event=Event} = State) ->
install_handler(Event, Module, Config),
{noreply, State};
handle_info(stop, State) ->
{stop, normal, State};
handle_info(_Info, State) ->
{noreply, State}.
@ -87,11 +91,18 @@ code_change(_OldVsn, State, _Extra) ->
install_handler(Event, Module, Config) ->
case gen_event:add_sup_handler(Event, Module, Config) of
ok ->
_ = lager:log(debug, self(), "Lager installed handler ~p into ~p", [Module, Event]),
?INT_LOG(debug, "Lager installed handler ~p into ~p", [Module, Event]),
lager:update_loglevel_config(),
ok;
{error, {fatal, Reason}} ->
?INT_LOG(error, "Lager fatally failed to install handler ~p into"
" ~p, NOT retrying: ~p", [Module, Event, Reason]),
%% tell ourselves to stop
self() ! stop,
ok;
Error ->
%% try to reinstall it later
_ = lager:log(error, self(), "Lager failed to install handler ~p into"
?INT_LOG(error, "Lager failed to install handler ~p into"
" ~p, retrying later : ~p", [Module, Event, Error]),
erlang:send_after(5000, self(), reinstall_handler),
ok
@ -112,8 +123,6 @@ reinstall_on_initial_failure_test_() ->
application:set_env(lager, handlers, [{lager_test_backend, info}, {lager_crash_backend, [from_now(2), undefined]}]),
application:set_env(lager, error_logger_redirect, false),
application:unset_env(lager, crash_log),
application:start(compiler),
application:start(syntax_tools),
application:start(lager),
try
?assertEqual(1, lager_test_backend:count()),
@ -140,18 +149,16 @@ reinstall_on_runtime_failure_test_() ->
application:set_env(lager, handlers, [{lager_test_backend, info}, {lager_crash_backend, [undefined, from_now(5)]}]),
application:set_env(lager, error_logger_redirect, false),
application:unset_env(lager, crash_log),
application:start(compiler),
application:start(syntax_tools),
application:start(lager),
try
?assertEqual(0, lager_test_backend:count()),
?assert(lists:member(lager_crash_backend, gen_event:which_handlers(lager_event))),
timer:sleep(6000),
?assertEqual(2, lager_test_backend:count()),
{_Level, _Time, Message, _Metadata} = lager_test_backend:pop(),
?assertEqual("Lager event handler lager_crash_backend exited with reason crash", lists:flatten(Message)),
{_Level2, _Time2, Message2, _Metadata} = lager_test_backend:pop(),
?assertMatch("Lager failed to install handler lager_crash_backend into lager_event, retrying later :"++_, lists:flatten(Message2)),
{_Severity, _Date, Msg, _Metadata} = lager_test_backend:pop(),
?assertEqual("Lager event handler lager_crash_backend exited with reason crash", lists:flatten(Msg)),
{_Severity2, _Date2, Msg2, _Metadata2} = lager_test_backend:pop(),
?assertMatch("Lager failed to install handler lager_crash_backend into lager_event, retrying later :"++_, lists:flatten(Msg2)),
?assertEqual(false, lists:member(lager_crash_backend, gen_event:which_handlers(lager_event)))
after
application:stop(lager),

+ 1
- 1
src/lager_handler_watcher_sup.erl 查看文件

@ -35,5 +35,5 @@ init([]) ->
{ok, {{simple_one_for_one, 10, 60},
[
{lager_handler_watcher, {lager_handler_watcher, start_link, []},
transient, 5000, worker, [lager_handler_watcher]}
temporary, 5000, worker, [lager_handler_watcher]}
]}}.

+ 0
- 107
src/lager_mochiglobal.erl 查看文件

@ -1,107 +0,0 @@
%% @author Bob Ippolito <bob@mochimedia.com>
%% @copyright 2010 Mochi Media, Inc.
%% @doc Abuse module constant pools as a "read-only shared heap" (since erts 5.6)
%% <a href="http://www.erlang.org/pipermail/erlang-questions/2009-March/042503.html">[1]</a>.
-module(lager_mochiglobal).
-author("Bob Ippolito <bob@mochimedia.com>").
-export([get/1, get/2, put/2, delete/1]).
-spec get(atom()) -> any() | undefined.
%% @equiv get(K, undefined)
get(K) ->
get(K, undefined).
-spec get(atom(), T) -> any() | T.
%% @doc Get the term for K or return Default.
get(K, Default) ->
get(K, Default, key_to_module(K)).
get(_K, Default, Mod) ->
try Mod:term()
catch error:undef ->
Default
end.
-spec put(atom(), any()) -> ok.
%% @doc Store term V at K, replaces an existing term if present.
put(K, V) ->
put(K, V, key_to_module(K)).
put(_K, V, Mod) ->
Bin = compile(Mod, V),
code:purge(Mod),
{module, Mod} = code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
ok.
-spec delete(atom()) -> boolean().
%% @doc Delete term stored at K, no-op if non-existent.
delete(K) ->
delete(K, key_to_module(K)).
delete(_K, Mod) ->
code:purge(Mod),
code:delete(Mod).
-spec key_to_module(atom()) -> atom().
key_to_module(K) ->
list_to_atom("lager_mochiglobal:" ++ atom_to_list(K)).
-spec compile(atom(), any()) -> binary().
compile(Module, T) ->
{ok, Module, Bin} = compile:forms(forms(Module, T),
[verbose, report_errors]),
Bin.
-spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
forms(Module, T) ->
[erl_syntax:revert(X) || X <- term_to_abstract(Module, term, T)].
-spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
term_to_abstract(Module, Getter, T) ->
[%% -module(Module).
erl_syntax:attribute(
erl_syntax:atom(module),
[erl_syntax:atom(Module)]),
%% -export([Getter/0]).
erl_syntax:attribute(
erl_syntax:atom(export),
[erl_syntax:list(
[erl_syntax:arity_qualifier(
erl_syntax:atom(Getter),
erl_syntax:integer(0))])]),
%% Getter() -> T.
erl_syntax:function(
erl_syntax:atom(Getter),
[erl_syntax:clause([], none, [erl_syntax:abstract(T)])])].
%%
%% Tests
%%
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
get_put_delete_test() ->
K = '$$test$$mochiglobal',
delete(K),
?assertEqual(
bar,
get(K, bar)),
try
?MODULE:put(K, baz),
?assertEqual(
baz,
get(K, bar)),
?MODULE:put(K, wibble),
?assertEqual(
wibble,
?MODULE:get(K))
after
delete(K)
end,
?assertEqual(
bar,
get(K, bar)),
?assertEqual(
undefined,
?MODULE:get(K)),
ok.
-endif.

+ 18
- 5
src/lager_msg.erl 查看文件

@ -1,8 +1,9 @@
-module(lager_msg).
-export([new/5]).
-export([new/4, new/5]).
-export([message/1]).
-export([timestamp/1]).
-export([datetime/1]).
-export([severity/1]).
-export([severity_as_int/1]).
-export([metadata/1]).
@ -12,26 +13,38 @@
destinations :: list(),
metadata :: [tuple()],
severity :: lager:log_level(),
timestamp :: {string(), string()},
datetime :: {string(), string()},
timestamp :: erlang:timestamp(),
message :: list()
}).
-opaque lager_msg() :: #lager_msg{}.
-export_type([lager_msg/0]).
-spec new(list(), {string(), string()}, atom(), [tuple()], list()) -> lager_msg().
%% create with provided timestamp, handy for testing mostly
-spec new(list(), erlang:timestamp(), lager:log_level(), [tuple()], list()) -> lager_msg().
new(Msg, Timestamp, Severity, Metadata, Destinations) ->
#lager_msg{message=Msg, timestamp=Timestamp, severity=Severity,
{Date, Time} = lager_util:format_time(lager_util:maybe_utc(lager_util:localtime_ms(Timestamp))),
#lager_msg{message=Msg, datetime={Date, Time}, timestamp=Timestamp, severity=Severity,
metadata=Metadata, destinations=Destinations}.
-spec new(list(), lager:log_level(), [tuple()], list()) -> lager_msg().
new(Msg, Severity, Metadata, Destinations) ->
Now = os:timestamp(),
new(Msg, Now, Severity, Metadata, Destinations).
-spec message(lager_msg()) -> list().
message(Msg) ->
Msg#lager_msg.message.
-spec timestamp(lager_msg()) -> {string(), string()}.
-spec timestamp(lager_msg()) -> erlang:timestamp().
timestamp(Msg) ->
Msg#lager_msg.timestamp.
-spec datetime(lager_msg()) -> {string(), string()}.
datetime(Msg) ->
Msg#lager_msg.datetime.
-spec severity(lager_msg()) -> lager:log_level().
severity(Msg) ->
Msg#lager_msg.severity.

+ 4
- 0
src/lager_sup.erl 查看文件

@ -32,6 +32,8 @@ start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
init([]) ->
%% set up the config, is safe even during relups
lager_config:new(),
Children = [
{lager, {gen_event, start_link, [{local, lager_event}]},
permanent, 5000, worker, [dynamic]},
@ -42,6 +44,8 @@ init([]) ->
Crash = case application:get_env(lager, crash_log) of
{ok, undefined} ->
[];
{ok, false} ->
[];
{ok, File} ->
MaxBytes = case application:get_env(lager, crash_log_msg_size) of
{ok, Val} when is_integer(Val) andalso Val > 0 -> Val;

+ 161
- 17
src/lager_transform.erl 查看文件

@ -18,7 +18,7 @@
%% This parse transform rewrites functions calls to lager:Severity/1,2 into
%% a more complicated function that captures module, function, line, pid and
%% time as well. The entire function call is then wrapped in a case that
%% checks the mochiglobal 'loglevel' value, so the code isn't executed if
%% checks the lager_config 'loglevel' value, so the code isn't executed if
%% nothing wishes to consume the message.
-module(lager_transform).
@ -31,10 +31,13 @@
parse_transform(AST, Options) ->
TruncSize = proplists:get_value(lager_truncation_size, Options, ?DEFAULT_TRUNCATION),
put(truncation_size, TruncSize),
erlang:put(records, []),
%% .app file should either be in the outdir, or the same dir as the source file
guess_application(proplists:get_value(outdir, Options), hd(AST)),
walk_ast([], AST).
walk_ast(Acc, []) ->
lists:reverse(Acc);
insert_record_attribute(Acc);
walk_ast(Acc, [{attribute, _, module, {Module, _PmodArgs}}=H|T]) ->
%% A wild parameterized module appears!
put(module, Module),
@ -46,6 +49,14 @@ walk_ast(Acc, [{function, Line, Name, Arity, Clauses}|T]) ->
put(function, Name),
walk_ast([{function, Line, Name, Arity,
walk_clauses([], Clauses)}|Acc], T);
walk_ast(Acc, [{attribute, _, record, {Name, Fields}}=H|T]) ->
FieldNames = lists:map(fun({record_field, _, {atom, _, FieldName}}) ->
FieldName;
({record_field, _, {atom, _, FieldName}, _Default}) ->
FieldName
end, Fields),
stash_record({Name, FieldNames}),
walk_ast([H|Acc], T);
walk_ast(Acc, [H|T]) ->
walk_ast([H|Acc], T).
@ -63,7 +74,8 @@ transform_statement({call, Line, {remote, _Line1, {atom, _Line2, lager},
{atom, _Line3, Severity}}, Arguments0} = Stmt) ->
case lists:member(Severity, ?LEVELS) of
true ->
DefaultAttrs = {cons, Line, {tuple, Line, [
SeverityAsInt=lager_util:level_to_num(Severity),
DefaultAttrs0 = {cons, Line, {tuple, Line, [
{atom, Line, module}, {atom, Line, get(module)}]},
{cons, Line, {tuple, Line, [
{atom, Line, function}, {atom, Line, get(function)}]},
@ -74,7 +86,22 @@ transform_statement({call, Line, {remote, _Line1, {atom, _Line2, lager},
{atom, Line, pid},
{call, Line, {atom, Line, pid_to_list}, [
{call, Line, {atom, Line ,self}, []}]}]},
{nil, Line}}}}},
{cons, Line, {tuple, Line, [
{atom, Line, node},
{call, Line, {atom, Line, node}, []}]},
%% get the metadata with lager:md(), this will always return a list so we can use it as the tail here
{call, Line, {remote, Line, {atom, Line, lager}, {atom, Line, md}}, []}}}}}},
%{nil, Line}}}}}}},
DefaultAttrs = case erlang:get(application) of
undefined ->
DefaultAttrs0;
App ->
%% stick the application in the attribute list
concat_lists({cons, Line, {tuple, Line, [
{atom, Line, application},
{atom, Line, App}]},
{nil, Line}}, DefaultAttrs0)
end,
{Traces, Message, Arguments} = case Arguments0 of
[Format] ->
{DefaultAttrs, Format, {atom, Line, none}};
@ -83,28 +110,80 @@ transform_statement({call, Line, {remote, _Line1, {atom, _Line2, lager},
%% [Format, Args] or [Attr, Format].
%% The trace attributes will be a list of tuples, so check
%% for that.
case Arg1 of
{cons, _, {tuple, _, _}, _} ->
case {element(1, Arg1), Arg1} of
{_, {cons, _, {tuple, _, _}, _}} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
{Type, _} when Type == var;
Type == lc;
Type == call;
Type == record_field ->
%% crap, its not a literal. look at the second
%% argument to see if it is a string
case Arg2 of
{string, _, _} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
_ ->
%% not a string, going to have to guess
%% it's the argument list
{DefaultAttrs, Arg1, Arg2}
end;
_ ->
{DefaultAttrs, Arg1, Arg2}
end;
[Attrs, Format, Args] ->
{concat_lists(Attrs, DefaultAttrs), Format, Args}
end,
{call, Line, {remote, Line, {atom,Line,lager},{atom,Line,dispatch_log}},
%% Generate some unique variable names so we don't accidentaly export from case clauses.
%% Note that these are not actual atoms, but the AST treats variable names as atoms.
LevelVar = make_varname("__Level", Line),
TracesVar = make_varname("__Traces", Line),
PidVar = make_varname("__Pid", Line),
%% Wrap the call to lager_dispatch log in a case that will avoid doing any work if this message is not elegible for logging
%% case {whereis(lager_event(lager_event), lager_config:get(loglevel, {?LOG_NONE, []})} of
{'case', Line,
{tuple, Line,
[{call, Line, {atom, Line, whereis}, [{atom, Line, lager_event}]},
{call, Line, {remote, Line, {atom, Line, lager_config}, {atom, Line, get}}, [{atom, Line, loglevel}, {tuple, Line, [{integer, Line, 0},{nil, Line}]}]}]},
[
{atom,Line,Severity},
Traces,
Message,
Arguments,
{integer, Line, get(truncation_size)}
]
};
false ->
Stmt
end;
%% {undefined, _} -> {error, lager_not_running}
{clause, Line,
[{tuple, Line, [{atom, Line, undefined}, {var, Line, '_'}]}],
[],
%% trick the linter into avoiding a 'term constructed by not used' error:
%% (fun() -> {error, lager_not_running} end)();
[{call, Line, {'fun', Line, {clauses, [{clause, Line, [],[], [{tuple, Line, [{atom, Line, error},{atom, Line, lager_not_running}]}]}]}}, []}]},
%% If we care about the loglevel, or there's any traces installed, we have do more checking
%% {Level, Traces} when (Level band SeverityAsInt) /= 0 orelse Traces /= [] ->
{clause, Line,
[{tuple, Line, [{var, Line, PidVar}, {tuple, Line, [{var, Line, LevelVar}, {var, Line, TracesVar}]}]}],
[[{op, Line, 'orelse',
{op, Line, '/=', {op, Line, 'band', {var, Line, LevelVar}, {integer, Line, SeverityAsInt}}, {integer, Line, 0}},
{op, Line, '/=', {var, Line, TracesVar}, {nil, Line}}}]],
[
%% do the call to lager:dispatch_log
{call, Line, {remote, Line, {atom, Line, lager}, {atom, Line, do_log}},
[
{atom,Line,Severity},
Traces,
Message,
Arguments,
{integer, Line, get(truncation_size)},
{integer, Line, SeverityAsInt},
{var, Line, LevelVar},
{var, Line, TracesVar},
{var, Line, PidVar}
]
}
]},
%% otherwise, do nothing
%% _ -> ok
{clause, Line, [{var, Line, '_'}],[],[{atom, Line, ok}]}
]};
false ->
Stmt
end;
transform_statement({call, Line, {remote, Line1, {atom, Line2, boston_lager},
{atom, Line3, Severity}}, Arguments}) ->
NewArgs = case Arguments of
@ -121,8 +200,73 @@ transform_statement(Stmt) when is_list(Stmt) ->
transform_statement(Stmt) ->
Stmt.
make_varname(Prefix, Line) ->
list_to_atom(Prefix ++ atom_to_list(get(module)) ++ integer_to_list(Line)).
%% concat 2 list ASTs by replacing the terminating [] in A with the contents of B
concat_lists({var, Line, _Name}=Var, B) ->
%% concatenating a var with a cons
{call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}},
[{cons, Line, Var, B}]};
concat_lists({lc, Line, _Body, _Generator} = LC, B) ->
%% concatenating a LC with a cons
{call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}},
[{cons, Line, LC, B}]};
concat_lists({call, Line, _Function, _Args} = Call, B) ->
%% concatenating a call with a cons
{call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}},
[{cons, Line, Call, B}]};
concat_lists({record_field, Line, _Var, _Record, _Field} = Rec, B) ->
%% concatenating a record_field with a cons
{call, Line, {remote, Line, {atom, Line, lists},{atom, Line, flatten}},
[{cons, Line, Rec, B}]};
concat_lists({nil, _Line}, B) ->
B;
concat_lists({cons, Line, Element, Tail}, B) ->
{cons, Line, Element, concat_lists(Tail, B)}.
stash_record(Record) ->
Records = case erlang:get(records) of
undefined ->
[];
R ->
R
end,
erlang:put(records, [Record|Records]).
insert_record_attribute(AST) ->
lists:foldl(fun({attribute, Line, module, _}=E, Acc) ->
[E, {attribute, Line, lager_records, erlang:get(records)}|Acc];
(E, Acc) ->
[E|Acc]
end, [], AST).
guess_application(Dirname, Attr) when Dirname /= undefined ->
case find_app_file(Dirname) of
no_idea ->
%% try it based on source file directory (app.src most likely)
guess_application(undefined, Attr);
_ ->
ok
end;
guess_application(undefined, {attribute, _, file, {Filename, _}}) ->
Dir = filename:dirname(Filename),
find_app_file(Dir);
guess_application(_, _) ->
ok.
find_app_file(Dir) ->
case filelib:wildcard(Dir++"/*.{app,app.src}") of
[] ->
no_idea;
[File] ->
case file:consult(File) of
{ok, [{application, Appname, _Attributes}|_]} ->
erlang:put(application, Appname);
_ ->
no_idea
end;
_ ->
%% multiple files, uh oh
no_idea
end.

+ 70
- 8
src/lager_trunc_io.erl 查看文件

@ -149,7 +149,7 @@ print(Bin, Max, _Options) when is_binary(Bin), Max < 2 ->
{"<<...>>", 7};
print(Binary, Max, Options) when is_binary(Binary) ->
B = binary_to_list(Binary, 1, lists:min([Max, byte_size(Binary)])),
{L, Len} = case Options#print_options.lists_as_strings orelse
{Res, Length} = case Options#print_options.lists_as_strings orelse
Options#print_options.force_strings of
true ->
Depth = Options#print_options.depth,
@ -161,7 +161,14 @@ print(Binary, Max, Options) when is_binary(Binary) ->
string:substr(B, 1, MaxSize);
false -> B
end,
try alist(In, Max -1, Options) of
MaxLen = case Options#print_options.force_strings of
true ->
Max;
false ->
%% make room for the leading doublequote
Max - 1
end,
try alist(In, MaxLen, Options) of
{L0, Len0} ->
case Options#print_options.force_strings of
false ->
@ -191,12 +198,6 @@ print(Binary, Max, Options) when is_binary(Binary) ->
_ ->
list_body(B, Max-4, dec_depth(Options), true)
end,
{Res, Length} = case L of
[91, X, 93] ->
{X, Len-2};
X ->
{X, Len}
end,
case Options#print_options.force_strings of
true ->
{Res, Length};
@ -262,6 +263,11 @@ print(Port, _Max, _Options) when is_port(Port) ->
L = erlang:port_to_list(Port),
{L, length(L)};
print({'$lager_record', Name, Fields}, Max, Options) ->
Leader = "#" ++ atom_to_list(Name) ++ "{",
{RC, Len} = record_fields(Fields, Max - length(Leader) + 1, dec_depth(Options)),
{[Leader, RC, "}"], Len + length(Leader) + 1};
print(Tuple, Max, Options) when is_tuple(Tuple) ->
{TC, Len} = tuple_contents(Tuple, Max-2, Options),
{[${, TC, $}], Len + 2};
@ -389,6 +395,17 @@ alist([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:
alist([H|T], Max, #print_options{force_strings=true} = Options) when is_integer(H) ->
{L, Len} = alist(T, Max-1, Options),
{[H|L], Len + 1};
alist([H|T], Max, Options = #print_options{force_strings=true}) when is_binary(H); is_list(H) ->
{List, Len} = print(H, Max, Options),
case (Max - Len) =< 0 of
true ->
%% no more room to print anything
{List, Len};
false ->
%% no need to decrement depth, as we're in printable string mode
{Final, FLen} = alist(T, Max - Len, Options),
{[List|Final], FLen+Len}
end;
alist(_, _, #print_options{force_strings=true}) ->
erlang:error(badarg);
alist([H|_L], _Max, _Options) ->
@ -436,6 +453,23 @@ escape($\f) -> "\\f";
escape($\b) -> "\\b";
escape($\v) -> "\\v".
record_fields([], _, _) ->
{"", 0};
record_fields(_, Max, #print_options{depth=D}) when Max < 4; D == 0 ->
{"...", 3};
record_fields([{Field, Value}|T], Max, Options) ->
{ExtraChars, Terminator} = case T of
[] ->
{1, []};
_ ->
{2, ","}
end,
{FieldStr, FieldLen} = print(Field, Max - ExtraChars, Options),
{ValueStr, ValueLen} = print(Value, Max - (FieldLen + ExtraChars), Options),
{Final, FLen} = record_fields(T, Max - (FieldLen + ValueLen + ExtraChars), dec_depth(Options)),
{[FieldStr++"="++ValueStr++Terminator|Final], FLen + FieldLen + ValueLen + ExtraChars}.
-ifdef(TEST).
%%--------------------
%% The start of a test suite. So far, it only checks for not crashing.
@ -571,6 +605,9 @@ binary_printing_test() ->
?assertEqual("<<\"hello\\fworld\">>", lists:flatten(format("~p", [<<"hello\fworld">>], 50))),
?assertEqual("<<\"hello\\vworld\">>", lists:flatten(format("~p", [<<"hello\vworld">>], 50))),
?assertEqual(" hello", lists:flatten(format("~10s", [<<"hello">>], 50))),
?assertEqual("[a]", lists:flatten(format("~s", [<<"[a]">>], 50))),
?assertEqual("[a]", lists:flatten(format("~s", [[<<"[a]">>]], 50))),
ok.
bitstring_printing_test() ->
@ -628,6 +665,20 @@ list_printing_test() ->
?assertEqual("[9|9]", lists:flatten(format("~p", [[9|9]], 50))),
ok.
iolist_printing_test() ->
?assertEqual("iolist: HelloIamaniolist",
lists:flatten(format("iolist: ~s", [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]], 1000))),
?assertEqual("123...",
lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))),
?assertEqual("123456...",
lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))),
?assertEqual("123456789H...",
lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))),
?assertEqual("123456789HellIamaniolist",
lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))),
ok.
tuple_printing_test() ->
?assertEqual("{}", lists:flatten(format("~p", [{}], 50))),
?assertEqual("{}", lists:flatten(format("~w", [{}], 50))),
@ -721,4 +772,15 @@ depth_limit_test() ->
ok.
print_terms_without_format_string_test() ->
?assertError(badarg, format({hello, world}, [], 50)),
?assertError(badarg, format([{google, bomb}], [], 50)),
?assertError(badarg, format([$h,$e,$l,$l,$o, 3594], [], 50)),
?assertEqual("helloworld", lists:flatten(format([$h,$e,$l,$l,$o, "world"], [], 50))),
?assertEqual("hello", lists:flatten(format(<<"hello">>, [], 50))),
?assertEqual("hello", lists:flatten(format('hello', [], 50))),
?assertError(badarg, format(<<1, 2, 3, 1:7>>, [], 100)),
?assertError(badarg, format(65535, [], 50)),
ok.
-endif.

+ 293
- 94
src/lager_util.erl 查看文件

@ -18,10 +18,11 @@
-include_lib("kernel/include/file.hrl").
-export([levels/0, level_to_num/1, num_to_level/1, open_logfile/2,
ensure_logfile/4, rotate_logfile/2, format_time/0, format_time/1,
localtime_ms/0, maybe_utc/1, parse_rotation_date_spec/1,
calculate_next_rotation/1, validate_trace/1, check_traces/4, is_loggable/3]).
-export([levels/0, level_to_num/1, num_to_level/1, config_to_mask/1, config_to_levels/1, mask_to_levels/1,
open_logfile/2, ensure_logfile/4, rotate_logfile/2, format_time/0, format_time/1,
localtime_ms/0, localtime_ms/1, maybe_utc/1, parse_rotation_date_spec/1,
calculate_next_rotation/1, validate_trace/1, check_traces/4, is_loggable/3,
trace_filter/1, trace_filter/2]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@ -30,34 +31,104 @@
-include("lager.hrl").
levels() ->
[debug, info, notice, warning, error, critical, alert, emergency].
level_to_num(debug) -> 7;
level_to_num(info) -> 6;
level_to_num(notice) -> 5;
level_to_num(warning) -> 4;
level_to_num(error) -> 3;
level_to_num(critical) -> 2;
level_to_num(alert) -> 1;
level_to_num(emergency) -> 0;
level_to_num(none) -> -1.
num_to_level(7) -> debug;
num_to_level(6) -> info;
num_to_level(5) -> notice;
num_to_level(4) -> warning;
num_to_level(3) -> error;
num_to_level(2) -> critical;
num_to_level(1) -> alert;
num_to_level(0) -> emergency;
num_to_level(-1) -> none.
[debug, info, notice, warning, error, critical, alert, emergency, none].
level_to_num(debug) -> ?DEBUG;
level_to_num(info) -> ?INFO;
level_to_num(notice) -> ?NOTICE;
level_to_num(warning) -> ?WARNING;
level_to_num(error) -> ?ERROR;
level_to_num(critical) -> ?CRITICAL;
level_to_num(alert) -> ?ALERT;
level_to_num(emergency) -> ?EMERGENCY;
level_to_num(none) -> ?LOG_NONE.
num_to_level(?DEBUG) -> debug;
num_to_level(?INFO) -> info;
num_to_level(?NOTICE) -> notice;
num_to_level(?WARNING) -> warning;
num_to_level(?ERROR) -> error;
num_to_level(?CRITICAL) -> critical;
num_to_level(?ALERT) -> alert;
num_to_level(?EMERGENCY) -> emergency;
num_to_level(?LOG_NONE) -> none.
-spec config_to_mask(atom()|string()) -> {'mask', integer()}.
config_to_mask(Conf) ->
Levels = config_to_levels(Conf),
{mask, lists:foldl(fun(Level, Acc) ->
level_to_num(Level) bor Acc
end, 0, Levels)}.
-spec mask_to_levels(non_neg_integer()) -> [lager:log_level()].
mask_to_levels(Mask) ->
mask_to_levels(Mask, levels(), []).
mask_to_levels(_Mask, [], Acc) ->
lists:reverse(Acc);
mask_to_levels(Mask, [Level|Levels], Acc) ->
NewAcc = case (level_to_num(Level) band Mask) /= 0 of
true ->
[Level|Acc];
false ->
Acc
end,
mask_to_levels(Mask, Levels, NewAcc).
-spec config_to_levels(atom()|string()) -> [lager:log_level()].
config_to_levels(Conf) when is_atom(Conf) ->
config_to_levels(atom_to_list(Conf));
config_to_levels([$! | Rest]) ->
levels() -- config_to_levels(Rest);
config_to_levels([$=, $< | Rest]) ->
[_|Levels] = config_to_levels_int(Rest),
lists:filter(fun(E) -> not lists:member(E, Levels) end, levels());
config_to_levels([$<, $= | Rest]) ->
[_|Levels] = config_to_levels_int(Rest),
lists:filter(fun(E) -> not lists:member(E, Levels) end, levels());
config_to_levels([$>, $= | Rest]) ->
config_to_levels_int(Rest);
config_to_levels([$=, $> | Rest]) ->
config_to_levels_int(Rest);
config_to_levels([$= | Rest]) ->
[level_to_atom(Rest)];
config_to_levels([$< | Rest]) ->
Levels = config_to_levels_int(Rest),
lists:filter(fun(E) -> not lists:member(E, Levels) end, levels());
config_to_levels([$> | Rest]) ->
[_|Levels] = config_to_levels_int(Rest),
lists:filter(fun(E) -> lists:member(E, Levels) end, levels());
config_to_levels(Conf) ->
config_to_levels_int(Conf).
%% internal function to break the recursion loop
config_to_levels_int(Conf) ->
Level = level_to_atom(Conf),
lists:dropwhile(fun(E) -> E /= Level end, levels()).
level_to_atom(String) ->
Levels = levels(),
try list_to_existing_atom(String) of
Atom ->
case lists:member(Atom, Levels) of
true ->
Atom;
false ->
erlang:error(badarg)
end
catch
_:_ ->
erlang:error(badarg)
end.
open_logfile(Name, Buffer) ->
case filelib:ensure_dir(Name) of
ok ->
Options = [append, raw] ++
if Buffer == true -> [delayed_write];
true -> []
case Buffer of
{Size, Interval} when is_integer(Interval), Interval >= 0, is_integer(Size), Size >= 0 ->
[{delayed_write, Size, Interval}];
_ -> []
end,
case file:open(Name, Options) of
{ok, FD} ->
@ -107,10 +178,15 @@ ensure_logfile(Name, FD, Inode, Buffer) ->
%% returns localtime with milliseconds included
localtime_ms() ->
{_, _, Micro} = Now = os:timestamp(),
Now = os:timestamp(),
localtime_ms(Now).
localtime_ms(Now) ->
{_, _, Micro} = Now,
{Date, {Hours, Minutes, Seconds}} = calendar:now_to_local_time(Now),
{Date, {Hours, Minutes, Seconds, Micro div 1000 rem 1000}}.
maybe_utc({Date, {H, M, S, Ms}}) ->
case lager_stdlib:maybe_utc({Date, {H, M, S}}) of
{utc, {Date1, {H1, M1, S1}}} ->
@ -119,16 +195,18 @@ maybe_utc({Date, {H, M, S, Ms}}) ->
{Date1, {H1, M1, S1, Ms}}
end.
%% renames and deletes failing are OK
%% renames failing are OK
rotate_logfile(File, 0) ->
_ = file:delete(File),
ok;
file:delete(File);
rotate_logfile(File, 1) ->
_ = file:rename(File, File++".0"),
rotate_logfile(File, 0);
case file:rename(File, File++".0") of
ok ->
ok;
_ ->
rotate_logfile(File, 0)
end;
rotate_logfile(File, Count) ->
_ =file:rename(File ++ "." ++ integer_to_list(Count - 2), File ++ "." ++
integer_to_list(Count - 1)),
_ = file:rename(File ++ "." ++ integer_to_list(Count - 2), File ++ "." ++ integer_to_list(Count - 1)),
rotate_logfile(File, Count - 1).
format_time() ->
@ -275,23 +353,34 @@ calculate_next_rotation([{date, Date}|T], {{Year, Month, Day}, _} = Now) ->
NewNow = calendar:gregorian_seconds_to_datetime(Seconds),
calculate_next_rotation(T, NewNow).
validate_trace({Filter, Level, {Destination, ID}}) when is_list(Filter), is_atom(Level), is_atom(Destination) ->
trace_filter(Query) ->
trace_filter(?DEFAULT_TRACER, Query).
%% TODO: Support multiple trace modules
trace_filter(Module, Query) when Query == none; Query == [] ->
trace_filter(Module, glc:null(false));
trace_filter(Module, Query) when is_list(Query) ->
trace_filter(Module, glc_lib:reduce(trace_any(Query)));
trace_filter(Module, Query) ->
{ok, _} = glc:compile(Module, Query).
validate_trace({Filter, Level, {Destination, ID}}) when is_tuple(Filter); is_list(Filter), is_atom(Level), is_atom(Destination) ->
case validate_trace({Filter, Level, Destination}) of
{ok, {F, L, D}} ->
{ok, {F, L, {D, ID}}};
Error ->
Error
end;
validate_trace({Filter, Level, Destination}) when is_list(Filter), is_atom(Level), is_atom(Destination) ->
try level_to_num(Level) of
validate_trace({Filter, Level, Destination}) when is_tuple(Filter); is_list(Filter), is_atom(Level), is_atom(Destination) ->
ValidFilter = validate_trace_filter(Filter),
try config_to_mask(Level) of
_ when not ValidFilter ->
{error, invalid_trace};
L when is_list(Filter) ->
{ok, {trace_all(Filter), L, Destination}};
L ->
case lists:all(fun({Key, _Value}) when is_atom(Key) -> true; (_) ->
false end, Filter) of
true ->
{ok, {Filter, L, Destination}};
_ ->
{error, invalid_filter}
end
{ok, {Filter, L, Destination}}
catch
_:_ ->
{error, invalid_level}
@ -299,37 +388,74 @@ validate_trace({Filter, Level, Destination}) when is_list(Filter), is_atom(Level
validate_trace(_) ->
{error, invalid_trace}.
validate_trace_filter(Filter) when is_tuple(Filter), is_atom(element(1, Filter)) =:= false ->
false;
validate_trace_filter(Filter) ->
case lists:all(fun({Key, '*'}) when is_atom(Key) -> true;
({Key, _Value}) when is_atom(Key) -> true;
({Key, '=', _Value}) when is_atom(Key) -> true;
({Key, '<', _Value}) when is_atom(Key) -> true;
({Key, '>', _Value}) when is_atom(Key) -> true;
(_) -> false end, Filter) of
true ->
true;
_ ->
false
end.
trace_all(Query) ->
glc:all(trace_acc(Query)).
trace_any(Query) ->
glc:any(Query).
trace_acc(Query) ->
trace_acc(Query, []).
trace_acc([], Acc) ->
lists:reverse(Acc);
trace_acc([{Key, '*'}|T], Acc) ->
trace_acc(T, [glc:wc(Key)|Acc]);
trace_acc([{Key, Val}|T], Acc) ->
trace_acc(T, [glc:eq(Key, Val)|Acc]);
trace_acc([{Key, '=', Val}|T], Acc) ->
trace_acc(T, [glc:eq(Key, Val)|Acc]);
trace_acc([{Key, '>', Val}|T], Acc) ->
trace_acc(T, [glc:gt(Key, Val)|Acc]);
trace_acc([{Key, '<', Val}|T], Acc) ->
trace_acc(T, [glc:lt(Key, Val)|Acc]).
check_traces(_, _, [], Acc) ->
lists:flatten(Acc);
check_traces(Attrs, Level, [{_, FilterLevel, _}|Flows], Acc) when Level > FilterLevel ->
check_traces(Attrs, Level, [{_, {mask, FilterLevel}, _}|Flows], Acc) when (Level band FilterLevel) == 0 ->
check_traces(Attrs, Level, Flows, Acc);
check_traces(Attrs, Level, [{Filter, _, _}|Flows], Acc) when length(Attrs) < length(Filter) ->
check_traces(Attrs, Level, Flows, Acc);
check_traces(Attrs, Level, [Flow|Flows], Acc) ->
check_traces(Attrs, Level, Flows, [check_trace(Attrs, Flow)|Acc]).
check_trace(Attrs, {Filter, _Level, Dest}) ->
case check_trace_iter(Attrs, Filter) of
true ->
Dest;
false ->
[]
end.
check_trace_iter(_, []) ->
true;
check_trace_iter(Attrs, [{Key, Match}|T]) ->
case lists:keyfind(Key, 1, Attrs) of
{Key, _} when Match == '*' ->
check_trace_iter(Attrs, T);
{Key, Match} ->
check_trace_iter(Attrs, T);
_ ->
false
check_trace(Attrs, {Filter, _Level, Dest}) when is_list(Filter) ->
check_trace(Attrs, {trace_all(Filter), _Level, Dest});
check_trace(Attrs, {Filter, _Level, Dest}) when is_tuple(Filter) ->
Made = gre:make(Attrs, [list]),
glc:handle(?DEFAULT_TRACER, Made),
Match = glc_lib:matches(Filter, Made),
case Match of
true ->
Dest;
false ->
[]
end.
-spec is_loggable(lager_msg:lager_msg(),integer(),term()) -> boolean().
-spec is_loggable(lager_msg:lager_msg(), non_neg_integer()|{'mask', non_neg_integer()}, term()) -> boolean().
is_loggable(Msg, {mask, Mask}, MyName) ->
%% using syslog style comparison flags
%S = lager_msg:severity_as_int(Msg),
%?debugFmt("comparing masks ~.2B and ~.2B -> ~p~n", [S, Mask, S band Mask]),
(lager_msg:severity_as_int(Msg) band Mask) /= 0 orelse
lists:member(MyName, lager_msg:destinations(Msg));
is_loggable(Msg ,SeverityThreshold,MyName) ->
lager_msg:severity_as_int(Msg) =< SeverityThreshold orelse
lists:member(MyName, lager_msg:destinations(Msg)).
@ -439,40 +565,76 @@ rotate_file_test() ->
rotate_logfile("rotation.log", 10)
end || N <- lists:seq(0, 20)].
rotate_file_fail_test() ->
%% make sure the directory exists
?assertEqual(ok, filelib:ensure_dir("rotation/rotation.log")),
%% fix the permissions on it
os:cmd("chown -R u+rwx rotation"),
%% delete any old files
[ok = file:delete(F) || F <- filelib:wildcard("rotation/*")],
%% write a file
file:write_file("rotation/rotation.log", "hello"),
%% hose up the permissions
os:cmd("chown u-w rotation"),
?assertMatch({error, _}, rotate_logfile("rotation.log", 10)),
?assert(filelib:is_regular("rotation/rotation.log")),
os:cmd("chown u+w rotation"),
?assertMatch(ok, rotate_logfile("rotation/rotation.log", 10)),
?assert(filelib:is_regular("rotation/rotation.log.0")),
?assertEqual(false, filelib:is_regular("rotation/rotation.log")),
ok.
check_trace_test() ->
?assertEqual([foo], check_traces([{module, ?MODULE}], 0, [{[{module, ?MODULE}],
0, foo},
{[{module, test}], 0, bar}], [])),
?assertEqual([], check_traces([{module, ?MODULE}], 0, [{[{module, ?MODULE},
{foo, bar}], 0, foo},
{[{module, test}], 0, bar}], [])),
?assertEqual([bar], check_traces([{module, ?MODULE}], 0, [{[{module, ?MODULE},
{foo, bar}], 0, foo},
{[{module, '*'}], 0, bar}], [])),
?assertEqual([bar], check_traces([{module, ?MODULE}], 0, [{[{module, '*'},
{foo, bar}], 0, foo},
{[{module, '*'}], 0, bar}], [])),
?assertEqual([bar], check_traces([{module, ?MODULE}], 0, [{[{module, '*'},
{foo, '*'}], 0, foo},
{[{module, '*'}], 0, bar}], [])),
?assertEqual([bar, foo], check_traces([{module, ?MODULE}, {foo, bar}], 0, [{[{module, '*'},
{foo, '*'}], 0, foo},
{[{module, '*'}], 0, bar}], [])),
?assertEqual([], check_traces([{module, ?MODULE}, {foo, bar}], 6, [{[{module, '*'},
{foo, '*'}], 0, foo},
{[{module, '*'}], 0, bar}], [])),
?assertEqual([foo], check_traces([{module, ?MODULE}, {foo, bar}], 6, [{[{module, '*'},
{foo, '*'}], 7, foo},
{[{module, '*'}], 0, bar}], [])),
trace_filter(none),
%% match by module
?assertEqual([foo], check_traces([{module, ?MODULE}], ?EMERGENCY, [
{[{module, ?MODULE}], config_to_mask(emergency), foo},
{[{module, test}], config_to_mask(emergency), bar}], [])),
%% match by module, but other unsatisfyable attribute
?assertEqual([], check_traces([{module, ?MODULE}], ?EMERGENCY, [
{[{module, ?MODULE}, {foo, bar}], config_to_mask(emergency), foo},
{[{module, test}], config_to_mask(emergency), bar}], [])),
%% match by wildcard module
?assertEqual([bar], check_traces([{module, ?MODULE}], ?EMERGENCY, [
{[{module, ?MODULE}, {foo, bar}], config_to_mask(emergency), foo},
{[{module, '*'}], config_to_mask(emergency), bar}], [])),
%% wildcard module, one trace with unsatisfyable attribute
?assertEqual([bar], check_traces([{module, ?MODULE}], ?EMERGENCY, [
{[{module, '*'}, {foo, bar}], config_to_mask(emergency), foo},
{[{module, '*'}], config_to_mask(emergency), bar}], [])),
%% wildcard but not present custom trace attribute
?assertEqual([bar], check_traces([{module, ?MODULE}], ?EMERGENCY, [
{[{module, '*'}, {foo, '*'}], config_to_mask(emergency), foo},
{[{module, '*'}], config_to_mask(emergency), bar}], [])),
%% wildcarding a custom attribute works when it is present
?assertEqual([bar, foo], check_traces([{module, ?MODULE}, {foo, bar}], ?EMERGENCY, [
{[{module, '*'}, {foo, '*'}], config_to_mask(emergency), foo},
{[{module, '*'}], config_to_mask(emergency), bar}], [])),
%% denied by level
?assertEqual([], check_traces([{module, ?MODULE}, {foo, bar}], ?INFO, [
{[{module, '*'}, {foo, '*'}], config_to_mask(emergency), foo},
{[{module, '*'}], config_to_mask(emergency), bar}], [])),
%% allowed by level
?assertEqual([foo], check_traces([{module, ?MODULE}, {foo, bar}], ?INFO, [
{[{module, '*'}, {foo, '*'}], config_to_mask(debug), foo},
{[{module, '*'}], config_to_mask(emergency), bar}], [])),
?assertEqual([anythingbutnotice, infoandbelow, infoonly], check_traces([{module, ?MODULE}], ?INFO, [
{[{module, '*'}], config_to_mask('=debug'), debugonly},
{[{module, '*'}], config_to_mask('=info'), infoonly},
{[{module, '*'}], config_to_mask('<=info'), infoandbelow},
{[{module, '*'}], config_to_mask('!=info'), anythingbutinfo},
{[{module, '*'}], config_to_mask('!=notice'), anythingbutnotice}
], [])),
ok.
is_loggable_test_() ->
[
{"Loggable by severity only", ?_assert(is_loggable(lager_msg:new("",{"",""}, alert, [], []),2,me))},
{"Not loggable by severity only", ?_assertNot(is_loggable(lager_msg:new("",{"",""}, critical, [], []),1,me))},
{"Loggable by severity with destination", ?_assert(is_loggable(lager_msg:new("",{"",""}, alert, [], [you]),2,me))},
{"Not loggable by severity with destination", ?_assertNot(is_loggable(lager_msg:new("",{"",""}, critical, [], [you]),1,me))},
{"Loggable by destination overriding severity", ?_assert(is_loggable(lager_msg:new("",{"",""}, critical, [], [me]),1,me))}
{"Loggable by severity only", ?_assert(is_loggable(lager_msg:new("", alert, [], []),2,me))},
{"Not loggable by severity only", ?_assertNot(is_loggable(lager_msg:new("", critical, [], []),1,me))},
{"Loggable by severity with destination", ?_assert(is_loggable(lager_msg:new("", alert, [], [you]),2,me))},
{"Not loggable by severity with destination", ?_assertNot(is_loggable(lager_msg:new("", critical, [], [you]),1,me))},
{"Loggable by destination overriding severity", ?_assert(is_loggable(lager_msg:new("", critical, [], [me]),1,me))}
].
format_time_test_() ->
@ -504,4 +666,41 @@ format_time_test_() ->
end)
].
config_to_levels_test() ->
?assertEqual([none], config_to_levels('none')),
?assertEqual({mask, 0}, config_to_mask('none')),
?assertEqual([debug], config_to_levels('=debug')),
?assertEqual([debug], config_to_levels('<info')),
?assertEqual(levels() -- [debug], config_to_levels('!=debug')),
?assertEqual(levels() -- [debug], config_to_levels('>debug')),
?assertEqual(levels() -- [debug], config_to_levels('>=info')),
?assertEqual(levels() -- [debug], config_to_levels('=>info')),
?assertEqual([debug, info, notice], config_to_levels('<=notice')),
?assertEqual([debug, info, notice], config_to_levels('=<notice')),
?assertEqual([debug], config_to_levels('<info')),
?assertEqual([debug], config_to_levels('!info')),
?assertError(badarg, config_to_levels(ok)),
?assertError(badarg, config_to_levels('<=>info')),
?assertError(badarg, config_to_levels('=<=info')),
?assertError(badarg, config_to_levels('<==>=<=>info')),
%% double negatives DO work, however
?assertEqual([debug], config_to_levels('!!=debug')),
?assertEqual(levels() -- [debug], config_to_levels('!!!=debug')),
ok.
config_to_mask_test() ->
?assertEqual({mask, 0}, config_to_mask('none')),
?assertEqual({mask, ?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY}, config_to_mask('debug')),
?assertEqual({mask, ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY}, config_to_mask('warning')),
?assertEqual({mask, ?DEBUG bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY}, config_to_mask('!=info')),
ok.
mask_to_levels_test() ->
?assertEqual([], mask_to_levels(0)),
?assertEqual([debug], mask_to_levels(2#10000000)),
?assertEqual([debug, info], mask_to_levels(2#11000000)),
?assertEqual([debug, info, emergency], mask_to_levels(2#11000001)),
?assertEqual([debug, notice, error], mask_to_levels(?DEBUG bor ?NOTICE bor ?ERROR)),
ok.
-endif.

+ 496
- 27
test/lager_test_backend.erl 查看文件

@ -24,15 +24,16 @@
code_change/3]).
-record(state, {level, buffer, ignored}).
-record(test, {attrs, format, args}).
-compile([{parse_transform, lager_transform}]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-export([pop/0, count/0, count_ignored/0, flush/0]).
-export([pop/0, count/0, count_ignored/0, flush/0, print_state/0]).
-endif.
init(Level) ->
{ok, #state{level=lager_util:level_to_num(Level), buffer=[], ignored=[]}}.
{ok, #state{level=lager_util:config_to_mask(Level), buffer=[], ignored=[]}}.
handle_call(count, #state{buffer=Buffer} = State) ->
{ok, length(Buffer), State};
@ -50,19 +51,29 @@ handle_call(pop, #state{buffer=Buffer} = State) ->
handle_call(get_loglevel, #state{level=Level} = State) ->
{ok, Level, State};
handle_call({set_loglevel, Level}, State) ->
{ok, ok, State#state{level=lager_util:level_to_num(Level)}};
{ok, ok, State#state{level=lager_util:config_to_mask(Level)}};
handle_call(print_state, State) ->
spawn(fun() -> lager:info("State ~p", [lager:pr(State, ?MODULE)]) end),
timer:sleep(100),
{ok, ok, State};
handle_call(print_bad_state, State) ->
spawn(fun() -> lager:info("State ~p", [lager:pr({state, 1}, ?MODULE)]) end),
timer:sleep(100),
{ok, ok, State};
handle_call(_Request, State) ->
{ok, ok, State}.
handle_event({log, Msg},
#state{level=LogLevel,buffer=Buffer,ignored=Ignored} = State) ->
case lager_util:is_loggable(Msg, LogLevel, ?MODULE) of
true -> {ok, State#state{buffer=Buffer ++
[{lager_msg:severity_as_int(Msg),
lager_msg:timestamp(Msg),
lager_msg:message(Msg), lager_msg:metadata(Msg)}]}};
_ -> {ok, State#state{ignored=Ignored ++ [ignored]}}
end;
#state{level=LogLevel,buffer=Buffer,ignored=Ignored} = State) ->
case lager_util:is_loggable(Msg, LogLevel, ?MODULE) of
true ->
{ok, State#state{buffer=Buffer ++
[{lager_msg:severity_as_int(Msg),
lager_msg:datetime(Msg),
lager_msg:message(Msg), lager_msg:metadata(Msg)}]}};
_ ->
{ok, State#state{ignored=Ignored ++ [ignored]}}
end;
handle_event(_Event, State) ->
{ok, State}.
@ -89,6 +100,12 @@ count_ignored() ->
flush() ->
gen_event:call(lager_event, ?MODULE, flush).
print_state() ->
gen_event:call(lager_event, ?MODULE, print_state).
print_bad_state() ->
gen_event:call(lager_event, ?MODULE, print_bad_state).
has_line_numbers() ->
%% are we R15 or greater
Rel = erlang:system_info(otp_release),
@ -164,17 +181,137 @@ lager_test_() ->
ok
end
},
{"variables inplace of literals in logging statements work",
fun() ->
?assertEqual(0, count()),
Attr = [{a, alpha}, {b, beta}],
Fmt = "format ~p",
Args = [world],
lager:info(Attr, "hello"),
lager:info(Attr, "hello ~p", [world]),
lager:info(Fmt, [world]),
lager:info("hello ~p", Args),
lager:info(Attr, "hello ~p", Args),
lager:info([{d, delta}, {g, gamma}], Fmt, Args),
?assertEqual(6, count()),
{_Level, _Time, Message, Metadata} = pop(),
?assertMatch([{a, alpha}, {b, beta}|_], Metadata),
?assertEqual("hello", lists:flatten(Message)),
{_Level, _Time2, Message2, _Metadata2} = pop(),
?assertEqual("hello world", lists:flatten(Message2)),
{_Level, _Time3, Message3, _Metadata3} = pop(),
?assertEqual("format world", lists:flatten(Message3)),
{_Level, _Time4, Message4, _Metadata4} = pop(),
?assertEqual("hello world", lists:flatten(Message4)),
{_Level, _Time5, Message5, _Metadata5} = pop(),
?assertEqual("hello world", lists:flatten(Message5)),
{_Level, _Time6, Message6, Metadata6} = pop(),
?assertMatch([{d, delta}, {g, gamma}|_], Metadata6),
?assertEqual("format world", lists:flatten(Message6)),
ok
end
},
{"list comprehension inplace of literals in logging statements work",
fun() ->
?assertEqual(0, count()),
Attr = [{a, alpha}, {b, beta}],
Fmt = "format ~p",
Args = [world],
lager:info([{K, atom_to_list(V)} || {K, V} <- Attr], "hello"),
lager:info([{K, atom_to_list(V)} || {K, V} <- Attr], "hello ~p", [{atom, X} || X <- Args]),
lager:info([X || X <- Fmt], [world]),
lager:info("hello ~p", [{atom, X} || X <- Args]),
lager:info([{K, atom_to_list(V)} || {K, V} <- Attr], "hello ~p", [{atom, X} || X <- Args]),
lager:info([{d, delta}, {g, gamma}], Fmt, [{atom, X} || X <- Args]),
?assertEqual(6, count()),
{_Level, _Time, Message, Metadata} = pop(),
?assertMatch([{a, "alpha"}, {b, "beta"}|_], Metadata),
?assertEqual("hello", lists:flatten(Message)),
{_Level, _Time2, Message2, _Metadata2} = pop(),
?assertEqual("hello {atom,world}", lists:flatten(Message2)),
{_Level, _Time3, Message3, _Metadata3} = pop(),
?assertEqual("format world", lists:flatten(Message3)),
{_Level, _Time4, Message4, _Metadata4} = pop(),
?assertEqual("hello {atom,world}", lists:flatten(Message4)),
{_Level, _Time5, Message5, _Metadata5} = pop(),
?assertEqual("hello {atom,world}", lists:flatten(Message5)),
{_Level, _Time6, Message6, Metadata6} = pop(),
?assertMatch([{d, delta}, {g, gamma}|_], Metadata6),
?assertEqual("format {atom,world}", lists:flatten(Message6)),
ok
end
},
{"function calls inplace of literals in logging statements work",
fun() ->
?assertEqual(0, count()),
put(attrs, [{a, alpha}, {b, beta}]),
put(format, "format ~p"),
put(args, [world]),
lager:info(get(attrs), "hello"),
lager:info(get(attrs), "hello ~p", get(args)),
lager:info(get(format), [world]),
lager:info("hello ~p", erlang:get(args)),
lager:info(fun() -> get(attrs) end(), "hello ~p", get(args)),
lager:info([{d, delta}, {g, gamma}], get(format), get(args)),
?assertEqual(6, count()),
{_Level, _Time, Message, Metadata} = pop(),
?assertMatch([{a, alpha}, {b, beta}|_], Metadata),
?assertEqual("hello", lists:flatten(Message)),
{_Level, _Time2, Message2, _Metadata2} = pop(),
?assertEqual("hello world", lists:flatten(Message2)),
{_Level, _Time3, Message3, _Metadata3} = pop(),
?assertEqual("format world", lists:flatten(Message3)),
{_Level, _Time4, Message4, _Metadata4} = pop(),
?assertEqual("hello world", lists:flatten(Message4)),
{_Level, _Time5, Message5, _Metadata5} = pop(),
?assertEqual("hello world", lists:flatten(Message5)),
{_Level, _Time6, Message6, Metadata6} = pop(),
?assertMatch([{d, delta}, {g, gamma}|_], Metadata6),
?assertEqual("format world", lists:flatten(Message6)),
ok
end
},
{"record fields inplace of literals in logging statements work",
fun() ->
?assertEqual(0, count()),
Test = #test{attrs=[{a, alpha}, {b, beta}], format="format ~p", args=[world]},
lager:info(Test#test.attrs, "hello"),
lager:info(Test#test.attrs, "hello ~p", Test#test.args),
lager:info(Test#test.format, [world]),
lager:info("hello ~p", Test#test.args),
lager:info(Test#test.attrs, "hello ~p", Test#test.args),
lager:info([{d, delta}, {g, gamma}], Test#test.format, Test#test.args),
?assertEqual(6, count()),
{_Level, _Time, Message, Metadata} = pop(),
?assertMatch([{a, alpha}, {b, beta}|_], Metadata),
?assertEqual("hello", lists:flatten(Message)),
{_Level, _Time2, Message2, _Metadata2} = pop(),
?assertEqual("hello world", lists:flatten(Message2)),
{_Level, _Time3, Message3, _Metadata3} = pop(),
?assertEqual("format world", lists:flatten(Message3)),
{_Level, _Time4, Message4, _Metadata4} = pop(),
?assertEqual("hello world", lists:flatten(Message4)),
{_Level, _Time5, Message5, _Metadata5} = pop(),
?assertEqual("hello world", lists:flatten(Message5)),
{_Level, _Time6, Message6, Metadata6} = pop(),
?assertMatch([{d, delta}, {g, gamma}|_], Metadata6),
?assertEqual("format world", lists:flatten(Message6)),
ok
end
},
{"log messages below the threshold are ignored",
fun() ->
?assertEqual(0, count()),
lager:debug("this message will be ignored"),
?assertEqual(0, count()),
?assertEqual(0, count_ignored()),
lager_mochiglobal:put(loglevel, {?DEBUG, []}),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(debug)), []}),
lager:debug("this message should be ignored"),
?assertEqual(0, count()),
?assertEqual(1, count_ignored()),
lager:set_loglevel(?MODULE, debug),
?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
lager:debug("this message should be logged"),
?assertEqual(1, count()),
?assertEqual(1, count_ignored()),
@ -184,40 +321,229 @@ lager_test_() ->
},
{"tracing works",
fun() ->
lager_mochiglobal:put(loglevel, {?ERROR, []}),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}),
ok = lager:info("hello world"),
?assertEqual(0, count()),
lager_mochiglobal:put(loglevel, {?ERROR, [{[{module,
?MODULE}], ?DEBUG, ?MODULE}]}),
lager:trace(?MODULE, [{module, ?MODULE}], debug),
?assertMatch({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, _}, lager_config:get(loglevel)),
%% elegible for tracing
ok = lager:info("hello world"),
%% NOT elegible for tracing
ok = lager:log(info, [{pid, self()}], "hello world"),
?assertEqual(1, count()),
ok
end
},
{"tracing works with custom attributes",
fun() ->
lager_mochiglobal:put(loglevel, {?ERROR, []}),
lager:set_loglevel(?MODULE, error),
?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(0, count()),
lager_mochiglobal:put(loglevel, {?ERROR,
[{[{requestid, 6}], ?DEBUG, ?MODULE}]}),
lager:trace(?MODULE, [{requestid, 6}, {foo, bar}], debug),
lager:info([{requestid, 6}, {foo, bar}], "hello world"),
?assertEqual(1, count()),
lager_mochiglobal:put(loglevel, {?ERROR,
[{[{requestid, '*'}], ?DEBUG, ?MODULE}]}),
lager:trace(?MODULE, [{requestid, '*'}], debug),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(2, count()),
lager:clear_all_traces(),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(2, count()),
ok
end
},
{"tracing works with custom attributes and event stream processing",
fun() ->
lager:set_loglevel(?MODULE, error),
?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(0, count()),
lager:trace(?MODULE, [{requestid, '>', 5}, {requestid, '<', 7}, {foo, bar}], debug),
lager:info([{requestid, 5}, {foo, bar}], "hello world"),
lager:info([{requestid, 6}, {foo, bar}], "hello world"),
?assertEqual(1, count()),
lager:clear_all_traces(),
lager:trace(?MODULE, [{requestid, '>', 8}, {foo, bar}]),
lager:info([{foo, bar}], "hello world"),
lager:info([{requestid, 6}], "hello world"),
lager:info([{requestid, 7}], "hello world"),
lager:info([{requestid, 8}], "hello world"),
lager:info([{requestid, 9}, {foo, bar}], "hello world"),
lager:info([{requestid, 10}], "hello world"),
?assertEqual(2, count()),
lager:trace(?MODULE, [{requestid, '>', 8}]),
lager:info([{foo, bar}], "hello world"),
lager:info([{requestid, 6}], "hello world"),
lager:info([{requestid, 7}], "hello world"),
lager:info([{requestid, 8}], "hello world"),
lager:info([{requestid, 9}, {foo, bar}], "hello world"),
lager:info([{requestid, 10}], "hello world"),
?assertEqual(4, count()),
lager:trace(?MODULE, [{foo, '=', bar}]),
lager:info([{foo, bar}], "hello world"),
lager:info([{requestid, 6}], "hello world"),
lager:info([{requestid, 7}], "hello world"),
lager:info([{requestid, 8}], "hello world"),
lager:info([{requestid, 9}, {foo, bar}], "hello world"),
lager:info([{requestid, 10}], "hello world"),
?assertEqual(7, count()),
lager:clear_all_traces(),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(7, count()),
ok
end
},
{"tracing custom attributes works with event stream processing statistics and reductions",
fun() ->
lager:set_loglevel(?MODULE, error),
?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(error)), []}),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(0, count()),
lager:trace(?MODULE, [{beta, '*'}]),
lager:trace(?MODULE, [{meta, "data"}]),
lager:info([{meta, "data"}], "hello world"),
lager:info([{beta, 2}], "hello world"),
lager:info([{beta, 2.1}, {foo, bar}], "hello world"),
lager:info([{meta, <<"data">>}], "hello world"),
?assertEqual(8, ?DEFAULT_TRACER:info(input)),
?assertEqual(6, ?DEFAULT_TRACER:info(output)),
?assertEqual(2, ?DEFAULT_TRACER:info(filter)),
lager:clear_all_traces(),
lager:trace(?MODULE, [{meta, "data"}]),
lager:trace(?MODULE, [{beta, '>', 2}, {beta, '<', 2.12}]),
lager:info([{meta, "data"}], "hello world"),
lager:info([{beta, 2}], "hello world"),
lager:info([{beta, 2.1}, {foo, bar}], "hello world"),
lager:info([{meta, <<"data">>}], "hello world"),
?assertEqual(8, ?DEFAULT_TRACER:info(input)),
?assertEqual(4, ?DEFAULT_TRACER:info(output)),
?assertEqual(4, ?DEFAULT_TRACER:info(filter)),
lager:clear_all_traces(),
lager:trace_console([{beta, '>', 2}, {meta, "data"}]),
lager:trace_console([{beta, '>', 2}, {beta, '<', 2.12}]),
Reduced = {all,[{any,[{beta,'<',2.12},{meta,'=',"data"}]},
{beta,'>',2}]},
?assertEqual(Reduced, ?DEFAULT_TRACER:info('query')),
lager:clear_all_traces(),
lager:info([{requestid, 6}], "hello world"),
?assertEqual(5, count()),
ok
end
},
{"persistent traces work",
fun() ->
?assertEqual(0, count()),
lager:debug([{foo, bar}], "hello world"),
?assertEqual(0, count()),
application:stop(lager),
application:set_env(lager, traces, [{lager_test_backend, [{foo, bar}], debug}]),
application:start(lager),
lager:debug([{foo, bar}], "hello world"),
?assertEqual(1, count()),
application:unset_env(lager, traces),
ok
end
},
{"tracing honors loglevel",
fun() ->
lager_mochiglobal:put(loglevel, {?ERROR, [{[{module,
?MODULE}], ?NOTICE, ?MODULE}]}),
lager:set_loglevel(?MODULE, error),
?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
{ok, T} = lager:trace(?MODULE, [{module, ?MODULE}], notice),
ok = lager:info("hello world"),
?assertEqual(0, count()),
ok = lager:notice("hello world"),
?assertEqual(1, count()),
lager:stop_trace(T),
ok = lager:notice("hello world"),
?assertEqual(1, count()),
ok
end
},
{"record printing works",
fun() ->
print_state(),
{Level, _Time, Message, _Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(info)),
{mask, Mask} = lager_util:config_to_mask(info),
?assertEqual("State #state{level={mask,"++integer_to_list(Mask)++"},buffer=[],ignored=[]}", lists:flatten(Message)),
ok
end
},
{"record printing fails gracefully",
fun() ->
print_bad_state(),
{Level, _Time, Message, _Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(info)),
?assertEqual("State {state,1}", lists:flatten(Message)),
ok
end
},
{"record printing fails gracefully when no lager_record attribute",
fun() ->
spawn(fun() -> lager:info("State ~p", [lager:pr({state, 1}, lager)]) end),
timer:sleep(100),
{Level, _Time, Message, _Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(info)),
?assertEqual("State {state,1}", lists:flatten(Message)),
ok
end
},
{"record printing fails gracefully when input is not a tuple",
fun() ->
spawn(fun() -> lager:info("State ~p", [lager:pr(ok, lager)]) end),
timer:sleep(100),
{Level, _Time, Message, _Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(info)),
?assertEqual("State ok", lists:flatten(Message)),
ok
end
},
{"record printing fails gracefully when module is invalid",
fun() ->
spawn(fun() -> lager:info("State ~p", [lager:pr({state, 1}, not_a_module)]) end),
timer:sleep(100),
{Level, _Time, Message, _Metadata} = pop(),
?assertMatch(Level, lager_util:level_to_num(info)),
?assertEqual("State {state,1}", lists:flatten(Message)),
ok
end
},
{"installing a new handler adjusts the global loglevel if necessary",
fun() ->
?assertEqual({?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
supervisor:start_child(lager_handler_watcher_sup, [lager_event, {?MODULE, foo}, debug]),
?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
ok
end
},
{"metadata in the process dictionary works",
fun() ->
lager:md([{platypus, gravid}, {sloth, hirsute}, {duck, erroneous}]),
lager:info("I sing the animal kingdom electric!"),
{_Level, _Time, _Message, Metadata} = pop(),
?assertEqual(gravid, proplists:get_value(platypus, Metadata)),
?assertEqual(hirsute, proplists:get_value(sloth, Metadata)),
?assertEqual(erroneous, proplists:get_value(duck, Metadata)),
?assertEqual(undefined, proplists:get_value(eagle, Metadata)),
lager:md([{platypus, gravid}, {sloth, hirsute}, {eagle, superincumbent}]),
lager:info("I sing the animal kingdom dielectric!"),
{_Level2, _Time2, _Message2, Metadata2} = pop(),
?assertEqual(gravid, proplists:get_value(platypus, Metadata2)),
?assertEqual(hirsute, proplists:get_value(sloth, Metadata2)),
?assertEqual(undefined, proplists:get_value(duck, Metadata2)),
?assertEqual(superincumbent, proplists:get_value(eagle, Metadata2)),
ok
end
},
{"can't store invalid metadata",
fun() ->
?assertEqual(ok, lager:md([{platypus, gravid}, {sloth, hirsute}, {duck, erroneous}])),
?assertError(badarg, lager:md({flamboyant, flamingo})),
?assertError(badarg, lager:md("zookeeper zephyr")),
ok
end
}
@ -229,8 +555,6 @@ setup() ->
application:load(lager),
application:set_env(lager, handlers, [{?MODULE, info}]),
application:set_env(lager, error_logger_redirect, false),
application:start(compiler),
application:start(syntax_tools),
application:start(lager),
gen_event:call(lager_event, ?MODULE, flush).
@ -284,8 +608,6 @@ error_logger_redirect_crash_test_() ->
application:load(lager),
application:set_env(lager, error_logger_redirect, true),
application:set_env(lager, handlers, [{?MODULE, error}]),
application:start(compiler),
application:start(syntax_tools),
application:start(lager),
crash:start()
end,
@ -542,6 +864,17 @@ error_logger_redirect_test_() ->
end
},
{"supervisor reports with real error and pid",
fun() ->
sync_error_logger:error_report(supervisor_report, [{errorContext, france}, {offender, [{name, mini_steve}, {mfargs, {a, b, [c]}}, {pid, bleh}]}, {reason, {function_clause,[{crash,handle_info,[foo]}]}}, {supervisor, somepid}]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(error),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Supervisor somepid had child mini_steve started with a:b(c) at bleh exit with reason no function clause matching crash:handle_info(foo) in context france", lists:flatten(Msg))
end
},
{"supervisor_bridge reports",
fun() ->
sync_error_logger:error_report(supervisor_report, [{errorContext, france}, {offender, [{mod, mini_steve}, {pid, bleh}]}, {reason, fired}, {supervisor, {local, steve}}]),
@ -566,6 +899,7 @@ error_logger_redirect_test_() ->
{"supervisor progress report",
fun() ->
lager:set_loglevel(?MODULE, debug),
?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
sync_error_logger:info_report(progress, [{supervisor, {local, foo}}, {started, [{mfargs, {foo, bar, 1}}, {pid, baz}]}]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
@ -574,6 +908,18 @@ error_logger_redirect_test_() ->
?assertEqual("Supervisor foo started foo:bar/1 at pid baz", lists:flatten(Msg))
end
},
{"supervisor progress report with pid",
fun() ->
lager:set_loglevel(?MODULE, debug),
?assertEqual({?DEBUG bor ?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
sync_error_logger:info_report(progress, [{supervisor, somepid}, {started, [{mfargs, {foo, bar, 1}}, {pid, baz}]}]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(debug),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Supervisor somepid started foo:bar/1 at pid baz", lists:flatten(Msg))
end
},
{"crash report for emfile",
fun() ->
sync_error_logger:error_report(crash_report, [[{pid, self()}, {registered_name, []}, {error_info, {error, emfile, [{stack, trace, 1}]}}], []]),
@ -701,20 +1047,91 @@ error_logger_redirect_test_() ->
test_body(Expected, lists:flatten(Msg))
end
},
{"webmachine error reports",
fun() ->
Path = "/cgi-bin/phpmyadmin",
Reason = {error,{error,{badmatch,{error,timeout}},
[{myapp,dostuff,2,[{file,"src/myapp.erl"},{line,123}]},
{webmachine_resource,resource_call,3,[{file,"src/webmachine_resource.erl"},{line,169}]}]}},
sync_error_logger:error_msg("webmachine error: path=~p~n~p~n", [Path, Reason]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(error),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Webmachine error at path \"/cgi-bin/phpmyadmin\" : no match of right hand value {error,timeout} in myapp:dostuff/2 line 123", lists:flatten(Msg))
end
},
{"Cowboy error reports, 8 arg version",
fun() ->
Stack = [{my_handler,init, 3,[{file,"src/my_handler.erl"},{line,123}]},
{cowboy_handler,handler_init,4,[{file,"src/cowboy_handler.erl"},{line,169}]}],
sync_error_logger:error_msg(
"** Cowboy handler ~p terminating in ~p/~p~n"
" for the reason ~p:~p~n"
"** Options were ~p~n"
"** Request was ~p~n"
"** Stacktrace: ~p~n~n",
[my_handler, init, 3, error, {badmatch, {error, timeout}}, [],
"Request", Stack]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(error),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Cowboy handler my_handler terminated in my_handler:init/3 with reason: no match of right hand value {error,timeout} in my_handler:init/3 line 123", lists:flatten(Msg))
end
},
{"Cowboy error reports, 10 arg version",
fun() ->
Stack = [{my_handler,somecallback, 3,[{file,"src/my_handler.erl"},{line,123}]},
{cowboy_handler,handler_init,4,[{file,"src/cowboy_handler.erl"},{line,169}]}],
sync_error_logger:error_msg(
"** Cowboy handler ~p terminating in ~p/~p~n"
" for the reason ~p:~p~n** Message was ~p~n"
"** Options were ~p~n** Handler state was ~p~n"
"** Request was ~p~n** Stacktrace: ~p~n~n",
[my_handler, somecallback, 3, error, {badmatch, {error, timeout}}, hello, [],
{}, "Request", Stack]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(error),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Cowboy handler my_handler terminated in my_handler:somecallback/3 with reason: no match of right hand value {error,timeout} in my_handler:somecallback/3 line 123", lists:flatten(Msg))
end
},
{"Cowboy error reports, 5 arg version",
fun() ->
sync_error_logger:error_msg(
"** Cowboy handler ~p terminating; "
"function ~p/~p was not exported~n"
"** Request was ~p~n** State was ~p~n~n",
[my_handler, to_json, 2, "Request", {}]),
_ = gen_event:which_handlers(error_logger),
{Level, _, Msg,Metadata} = pop(),
?assertEqual(lager_util:level_to_num(error),Level),
?assertEqual(self(),proplists:get_value(pid,Metadata)),
?assertEqual("Cowboy handler my_handler terminated with reason: call to undefined function my_handler:to_json/2", lists:flatten(Msg))
end
},
{"messages should not be generated if they don't satisfy the threshold",
fun() ->
lager:set_loglevel(?MODULE, error),
?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
sync_error_logger:info_report([hello, world]),
_ = gen_event:which_handlers(error_logger),
?assertEqual(0, count()),
?assertEqual(0, count_ignored()),
lager:set_loglevel(?MODULE, info),
?assertEqual({?INFO bor ?NOTICE bor ?WARNING bor ?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
sync_error_logger:info_report([hello, world]),
_ = gen_event:which_handlers(error_logger),
?assertEqual(1, count()),
?assertEqual(0, count_ignored()),
lager:set_loglevel(?MODULE, error),
lager_mochiglobal:put(loglevel, {?DEBUG, []}),
?assertEqual({?ERROR bor ?CRITICAL bor ?ALERT bor ?EMERGENCY, []}, lager_config:get(loglevel)),
lager_config:set(loglevel, {element(2, lager_util:config_to_mask(debug)), []}),
sync_error_logger:info_report([hello, world]),
_ = gen_event:which_handlers(error_logger),
?assertEqual(1, count()),
@ -729,6 +1146,58 @@ safe_format_test() ->
?assertEqual("FORMAT ERROR: \"~p ~p ~p\" [foo,bar]", lists:flatten(lager:safe_format("~p ~p ~p", [foo, bar], 1024))),
ok.
async_threshold_test_() ->
{foreach,
fun() ->
error_logger:tty(false),
application:load(lager),
application:set_env(lager, error_logger_redirect, false),
application:set_env(lager, async_threshold, 10),
application:set_env(lager, handlers, [{?MODULE, info}]),
application:start(lager)
end,
fun(_) ->
application:unset_env(lager, async_threshold),
application:stop(lager),
error_logger:tty(true)
end,
[
{"async threshold works",
fun() ->
%% we start out async
?assertEqual(true, lager_config:get(async)),
%% put a ton of things in the queue
Workers = [spawn_monitor(fun() -> [lager:info("hello world") || _ <- lists:seq(1, 1000)] end) || _ <- lists:seq(1, 15)],
%% serialize on mailbox
_ = gen_event:which_handlers(lager_event),
timer:sleep(500),
%% there should be a ton of outstanding messages now, so async is false
?assertEqual(false, lager_config:get(async)),
%% wait for all the workers to return, meaning that all the messages have been logged (since we're in sync mode)
collect_workers(Workers),
%% serialize ont the mailbox again
_ = gen_event:which_handlers(lager_event),
%% just in case...
timer:sleep(100),
%% async is true again now that the mailbox has drained
?assertEqual(true, lager_config:get(async)),
ok
end
}
]
}.
collect_workers([]) ->
ok;
collect_workers(Workers) ->
receive
{'DOWN', Ref, _, _, _} ->
collect_workers(lists:keydelete(Ref, 2, Workers))
end.
-endif.

+ 11
- 6
test/trunc_io_eqc.erl 查看文件

@ -36,11 +36,11 @@
%%====================================================================
eqc_test_() ->
{timeout, 30,
{timeout, 60,
{spawn,
[
{timeout, 15, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(14, ?QC_OUT(prop_format()))))},
{timeout, 15, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(14, ?QC_OUT(prop_equivalence()))))}
{timeout, 30, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(14, ?QC_OUT(prop_format()))))},
{timeout, 30, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(14, ?QC_OUT(prop_equivalence()))))}
]
}}.
@ -66,7 +66,7 @@ gen_fmt_args() ->
"~~",
{"~10000000.p", gen_any(5)},
{"~w", gen_any(5)},
{"~s", oneof([gen_print_str(), gen_atom(), gen_quoted_atom(), gen_print_bin()])},
{"~s", oneof([gen_print_str(), gen_atom(), gen_quoted_atom(), gen_print_bin(), gen_iolist(5)])},
{"~1000000.P", gen_any(5), 4},
{"~W", gen_any(5), 4},
{"~i", gen_any(5)},
@ -91,7 +91,7 @@ gen_fmt_args() ->
%% Generates a printable string
gen_print_str() ->
?LET(Xs, list(char()), [X || X <- Xs, io_lib:printable_list([X]), X /= $~]).
?LET(Xs, list(char()), [X || X <- Xs, io_lib:printable_list([X]), X /= $~, X < 255]).
gen_print_bin() ->
?LET(Xs, gen_print_str(), list_to_binary(Xs)).
@ -110,7 +110,12 @@ gen_any(MaxDepth) ->
gen_fun()] ++
[?LAZY(list(gen_any(MaxDepth - 1))) || MaxDepth /= 0] ++
[?LAZY(gen_tuple(gen_any(MaxDepth - 1))) || MaxDepth /= 0]).
gen_iolist(0) ->
[];
gen_iolist(Depth) ->
list(oneof([gen_char(), gen_print_str(), gen_print_bin(), gen_iolist(Depth-1)])).
gen_atom() ->
elements([abc, def, ghi]).

Loading…
取消
儲存