浏览代码

Improve the performance of the file backend

This is done via a combination of several things:
* Make the loglevel that triggers a sync configurable
* Make the delayed_write size and intervals configurable
* Make the interval at which external rotation is checked for
  configurable
* Store the timestamp a lager_msg was created inside the lager_msg

To support these changes, several other things had to be modified:
* lager_msg:timestamp now returns a timestamp
* lager_msg:datetime was added to return the {date, time} of a message,
  like lager_msg:timestamp used to
* The configuration syntax for file backends was changed to be of the
  form {lager_file_backend, proplist()} and the old syntax was
  deprecated

Additionally, the defaults for the check_interval was raised from
'always' to 1 second, and the sync_interval was changed from 2 seconds
to one second.
pull/120/head
Andrew Thompson 12 年前
父节点
当前提交
79ea55a196
共有 9 个文件被更改,包括 456 次插入150 次删除
  1. +0
    -1
      include/lager.hrl
  2. +3
    -3
      src/lager.app.src
  3. +1
    -2
      src/lager.erl
  4. +55
    -12
      src/lager_app.erl
  5. +17
    -11
      src/lager_default_formatter.erl
  6. +346
    -107
      src/lager_file_backend.erl
  7. +17
    -4
      src/lager_msg.erl
  8. +16
    -9
      src/lager_util.erl
  9. +1
    -1
      test/lager_test_backend.erl

+ 0
- 1
include/lager.hrl 查看文件

@ -59,7 +59,6 @@
-define(NOTIFY(Level, Pid, Format, Args),
gen_event:notify(lager_event, {log, lager_msg:new(io_lib:format(Format, Args),
lager_util:format_time(),
Level,
[{pid,Pid},{line,?LINE},{file,?FILE},{module,?MODULE}],
[])}

+ 3
- 3
src/lager.app.src 查看文件

@ -16,9 +16,9 @@
{handlers, [
{lager_console_backend, info},
{lager_file_backend, [
{"log/error.log", error, 10485760, "$D0", 5},
{"log/console.log", info, 10485760, "$D0", 5}
]}
{file, "log/error.log"}, {level, error}, {size, 10485760}, {date, "$D0"}, {count, 5}]},
{lager_file_backend, [
{file, "log/console.log"}, {level, info}, {size, 10485760}, {date, "$D0"}, {count, 5}]}
]},
%% Whether to write a crash log, and where. Undefined means no crash logger.
{crash_log, "log/crash.log"},

+ 1
- 2
src/lager.erl 查看文件

@ -75,14 +75,13 @@ do_log(Severity, Metadata, Format, Args, Size, SeverityAsInt, LevelThreshold, Tr
end,
case (LevelThreshold band SeverityAsInt) /= 0 orelse Destinations /= [] of
true ->
Timestamp = lager_util:format_time(),
Msg = case Args of
A when is_list(A) ->
safe_format_chop(Format,Args,Size);
_ ->
Format
end,
LagerMsg = lager_msg:new(Msg, Timestamp,
LagerMsg = lager_msg:new(Msg,
Severity, Metadata, Destinations),
case lager_config:get(async, false) of
true ->

+ 55
- 12
src/lager_app.erl 查看文件

@ -104,31 +104,35 @@ stop(Handlers) ->
expand_handlers([]) ->
[];
expand_handlers([{lager_file_backend, [{Key, _Value}|_]=Config}|T]) when is_atom(Key) ->
%% this is definitely a new-style config, no expansion needed
[maybe_make_handler_id(lager_file_backend, Config) | expand_handlers(T)];
expand_handlers([{lager_file_backend, Configs}|T]) ->
?INT_LOG(notice, "Deprecated lager_file_backend config detected, please consider updating it", []),
[ {lager_file_backend:config_to_id(Config), Config} || Config <- Configs] ++
expand_handlers(T);
expand_handlers([{Mod, Config}|T]) when is_atom(Mod) ->
[maybe_make_handler_id(Mod, Config) | expand_handlers(T)];
expand_handlers([H|T]) ->
[H | expand_handlers(T)].
maybe_make_handler_id(Mod, Config) ->
%% Allow the backend to generate a gen_event handler id, if it wants to.
%% We don't use erlang:function_exported here because that requires the module
%% already be loaded, which is unlikely at this phase of startup. Using code:load
%% caused undesireable side-effects with generating code-coverage reports.
Res = try Mod:config_to_id(Config) of
try Mod:config_to_id(Config) of
Id ->
{Id, Config}
catch
_:_ ->
error:undef ->
{Mod, Config}
end,
[Res | expand_handlers(T)];
expand_handlers([H|T]) ->
[H | expand_handlers(T)].
end.
-ifdef(TEST).
application_config_mangling_test_() ->
[{"Explode the file backend handlers",
[
{"Explode the file backend handlers",
?_assertMatch(
[{lager_console_backend, info},
{{lager_file_backend,"error.log"},{"error.log",error,10485760, "$D0",5}},
@ -139,7 +143,21 @@ application_config_mangling_test_() ->
{"error.log", error, 10485760, "$D0", 5},
{"console.log", info, 10485760, "$D0", 5}
]}]
))},
))
},
{"Explode the short form of backend file handlers",
?_assertMatch(
[{lager_console_backend, info},
{{lager_file_backend,"error.log"},{"error.log",error}},
{{lager_file_backend,"console.log"},{"console.log",info}}
],
expand_handlers([{lager_console_backend, info},
{lager_file_backend, [
{"error.log", error},
{"console.log", info}
]}]
))
},
{"Explode with formatter info",
?_assertMatch(
[{{lager_file_backend,"test.log"}, [{"test.log", debug, 10485760, "$D0", 5},{lager_default_formatter,["[",severity,"] ", message, "\n"]}]},
@ -150,5 +168,30 @@ application_config_mangling_test_() ->
]
}])
)
}].
},
{"Explode short form with short formatter info",
?_assertMatch(
[{{lager_file_backend,"test.log"}, [{"test.log", debug},{lager_default_formatter,["[",severity,"] ", message, "\n"]}]},
{{lager_file_backend,"test2.log"}, [{"test2.log",debug},{lager_default_formatter}]}],
expand_handlers([{lager_file_backend, [
[{"test.log", debug},{lager_default_formatter,["[",severity,"] ", message, "\n"]}],
[{"test2.log",debug},{lager_default_formatter}]
]
}])
)
},
{"New form needs no expansion",
?_assertMatch([
{{lager_file_backend,"test.log"}, [{file, "test.log"}]},
{{lager_file_backend,"test2.log"}, [{file, "test2.log"}, {level, info}, {sync_on, none}]},
{{lager_file_backend,"test3.log"}, [{formatter, lager_default_formatter}, {file, "test3.log"}]}
],
expand_handlers([
{lager_file_backend, [{file, "test.log"}]},
{lager_file_backend, [{file, "test2.log"}, {level, info}, {sync_on, none}]},
{lager_file_backend, [{formatter, lager_default_formatter},{file, "test3.log"}]}
])
)
}
].
-endif.

+ 17
- 11
src/lager_default_formatter.erl 查看文件

@ -73,10 +73,10 @@ format(Message,Config) ->
-spec output(term(),lager_msg:lager_msg()) -> iolist().
output(message,Msg) -> lager_msg:message(Msg);
output(date,Msg) ->
{D, _T} = lager_msg:timestamp(Msg),
{D, _T} = lager_msg:datetime(Msg),
D;
output(time,Msg) ->
{_D, T} = lager_msg:timestamp(Msg),
{_D, T} = lager_msg:datetime(Msg),
T;
output(severity,Msg) ->
atom_to_list(lager_msg:severity(Msg));
@ -115,11 +115,17 @@ get_metadata(Key, Metadata, Default) ->
end.
-ifdef(TEST).
date_time_now() ->
Now = os:timestamp(),
{Date, Time} = lager_util:format_time(lager_util:maybe_utc(lager_util:localtime_ms(Now))),
{Date, Time, Now}.
basic_test_() ->
{Date, Time, Now} = date_time_now(),
[{"Default formatting test",
?_assertEqual(iolist_to_binary([<<"Day Time [error] ">>, pid_to_list(self()), <<" Message\n">>]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] ", pid_to_list(self()), " Message\n"]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
@ -128,16 +134,16 @@ basic_test_() ->
{"Basic Formatting",
?_assertEqual(<<"Simplist Format">>,
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
["Simplist Format"])))
},
{"Default equivalent formatting test",
?_assertEqual(iolist_to_binary([<<"Day Time [error] ">>, pid_to_list(self()), <<" Message\n";>>;]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] ", pid_to_list(self()), " Message\n"]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
@ -145,9 +151,9 @@ basic_test_() ->
)))
},
{"Non existant metadata can default to string",
?_assertEqual(iolist_to_binary([<<"Day Time [error] Fallback Message\n">>;]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] Fallback Message\n";]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, self()}],
[]),
@ -155,9 +161,9 @@ basic_test_() ->
)))
},
{"Non existant metadata can default to other metadata",
?_assertEqual(iolist_to_binary([<<"Day Time [error] Fallback Message\n">>;]),
?_assertEqual(iolist_to_binary([Date, " ", Time, " [error] Fallback Message\n";]),
iolist_to_binary(format(lager_msg:new("Message",
{"Day", "Time"},
Now,
error,
[{pid, "Fallback"}],
[]),

+ 346
- 107
src/lager_file_backend.erl 查看文件

@ -43,6 +43,15 @@
-export([config_to_id/1]).
-define(DEFAULT_LOG_LEVEL, info).
-define(DEFAULT_ROTATION_SIZE, 10485760). %% 10mb
-define(DEFAULT_ROTATION_DATE, "$D0"). %% midnight
-define(DEFAULT_ROTATION_COUNT, 5).
-define(DEFAULT_SYNC_LEVEL, error).
-define(DEFAULT_SYNC_INTERVAL, 1000).
-define(DEFAULT_SYNC_SIZE, 1024*64). %% 64kb
-define(DEFAULT_CHECK_INTERVAL, 1000).
-record(state, {
name :: string(),
level :: {'mask', integer()},
@ -53,35 +62,53 @@
date,
count = 10,
formatter,
formatter_config
formatter_config,
sync_on,
check_interval = ?DEFAULT_CHECK_INTERVAL,
sync_interval = ?DEFAULT_SYNC_INTERVAL,
sync_size = ?DEFAULT_SYNC_SIZE,
last_check = os:timestamp()
}).
%% @private
-spec init([{string(), lager:log_level()},...]) -> {ok, #state{}}.
init({FileName, LogLevel}) when is_list(FileName), is_atom(LogLevel) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}]);
init({FileName, LogLevel, Size, Date, Count}) when is_list(FileName), is_atom(LogLevel) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}, {size, Size}, {date, Date}, {count, Count}]);
init([{FileName, LogLevel, Size, Date, Count}, {Formatter,FormatterConfig}]) when is_list(FileName), is_atom(LogLevel), is_atom(Formatter) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}, {size, Size}, {date, Date}, {count, Count}, {formatter, Formatter}, {formatter_config, FormatterConfig}]);
init([LogFile,{Formatter}]) ->
%% backwards compatability hack
init([LogFile,{Formatter,[]}]);
init([LogFile,{Formatter,FormatterConfig}]) ->
case validate_logfile(LogFile) of
{Name, Level, Size, Date, Count} ->
init([{FileName, LogLevel}, {Formatter,FormatterConfig}]) when is_list(FileName), is_atom(LogLevel), is_atom(Formatter) ->
%% backwards compatability hack
init([{file, FileName}, {level, LogLevel}, {formatter, Formatter}, {formatter_config, FormatterConfig}]);
init(LogFileConfig) when is_list(LogFileConfig) ->
case validate_logfile_proplist(LogFileConfig) of
false ->
%% falied to validate config
ignore;
Config ->
%% probabably a better way to do this, but whatever
[Name, Level, Date, Size, Count, SyncInterval, SyncSize, SyncOn, CheckInterval, Formatter, FormatterConfig] =
[proplists:get_value(Key, Config) || Key <- [file, level, date, size, count, sync_interval, sync_size, sync_on, check_interval, formatter, formatter_config]],
schedule_rotation(Name, Date),
State = case lager_util:open_logfile(Name, true) of
State0 = #state{name=Name, level=Level, size=Size, date=Date, count=Count, formatter=Formatter,
formatter_config=FormatterConfig, sync_on=SyncOn, sync_interval=SyncInterval, sync_size=SyncSize,
check_interval=CheckInterval},
State = case lager_util:open_logfile(Name, {SyncSize, SyncInterval}) of
{ok, {FD, Inode, _}} ->
#state{name=Name, level=Level,
fd=FD, inode=Inode, size=Size, date=Date, count=Count, formatter=Formatter, formatter_config=FormatterConfig};
State0#state{fd=FD, inode=Inode};
{error, Reason} ->
?INT_LOG(error, "Failed to open log file ~s with error ~s",
[Name, file:format_error(Reason)]),
#state{name=Name, level=Level,
flap=true, size=Size, date=Date, count=Count, formatter=Formatter, formatter_config=FormatterConfig}
?INT_LOG(error, "Failed to open log file ~s with error ~s", [Name, file:format_error(Reason)]),
State0#state{flap=true}
end,
{ok, State};
false ->
ignore
end;
init(LogFile) ->
init([LogFile,{lager_default_formatter,[]}]).
{ok, State}
end.
%% @private
handle_call({set_loglevel, Level}, #state{name=Ident} = State) ->
@ -102,7 +129,7 @@ handle_event({log, Message},
#state{name=Name, level=L,formatter=Formatter,formatter_config=FormatConfig} = State) ->
case lager_util:is_loggable(Message,L,{lager_file_backend, Name}) of
true ->
{ok,write(State, lager_msg:severity_as_int(Message), Formatter:format(Message,FormatConfig)) };
{ok,write(State, lager_msg:timestamp(Message), lager_msg:severity_as_int(Message), Formatter:format(Message,FormatConfig)) };
false ->
{ok, State}
end;
@ -129,93 +156,71 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% convert the config into a gen_event handler ID
config_to_id({Name,_Severity}) ->
config_to_id({Name,_Severity}) when is_list(Name) ->
{?MODULE, Name};
config_to_id({Name,_Severity,_Size,_Rotation,_Count}) ->
{?MODULE, Name};
config_to_id([{Name,_Severity,_Size,_Rotation,_Count}, _Format]) ->
{?MODULE, Name}.
{?MODULE, Name};
config_to_id([{Name,_Severity}, _Format]) when is_list(Name) ->
{?MODULE, Name};
config_to_id(Config) ->
case proplists:get_value(file, Config) of
undefined ->
erlang:error(no_file);
File ->
{?MODULE, File}
end.
write(#state{name=Name, fd=FD, inode=Inode, flap=Flap, size=RotSize,
count=Count} = State, Level, Msg) ->
case lager_util:ensure_logfile(Name, FD, Inode, true) of
{ok, {_, _, Size}} when RotSize /= 0, Size > RotSize ->
lager_util:rotate_logfile(Name, Count),
write(State, Level, Msg);
{ok, {NewFD, NewInode, _}} ->
%% delayed_write doesn't report errors
_ = file:write(NewFD, Msg),
case Level of
_ when Level =< ?ERROR ->
%% force a sync on any message at error severity or above
Flap2 = case file:datasync(NewFD) of
{error, Reason2} when Flap == false ->
?INT_LOG(error, "Failed to write log message to file ~s: ~s",
[Name, file:format_error(Reason2)]),
true;
ok ->
false;
count=Count} = State, Timestamp, Level, Msg) ->
LastCheck = timer:now_diff(os:timestamp(), Timestamp) div 1000,
case LastCheck >= State#state.check_interval orelse FD == undefined of
true ->
%% need to check for rotation
case lager_util:ensure_logfile(Name, FD, Inode, {State#state.sync_size, State#state.sync_interval}) of
{ok, {_, _, Size}} when RotSize /= 0, Size > RotSize ->
lager_util:rotate_logfile(Name, Count),
%% go around the loop again, we'll do another rotation check and hit the next clause here
write(State, Timestamp, Level, Msg);
{ok, {NewFD, NewInode, _}} ->
%% update our last check and try again
do_write(State#state{last_check=Timestamp, fd=NewFD, inode=NewInode}, Level, Msg);
{error, Reason} ->
case Flap of
true ->
State;
_ ->
Flap
end,
State#state{fd=NewFD, inode=NewInode, flap=Flap2};
_ ->
State#state{fd=NewFD, inode=NewInode}
?INT_LOG(error, "Failed to reopen log file ~s with error ~s", [Name, file:format_error(Reason)]),
State#state{flap=true}
end
end;
{error, Reason} ->
case Flap of
true ->
State;
_ ->
?INT_LOG(error, "Failed to reopen log file ~s with error ~s",
[Name, file:format_error(Reason)]),
State#state{flap=true}
end
false ->
do_write(State, Level, Msg)
end.
validate_logfile({Name, Level}) ->
case validate_loglevel(Level) of
false ->
?INT_LOG(error, "Invalid log level of ~p for ~s.",
[Level, Name]),
false;
Levels ->
{Name, Levels, 0, undefined, 0}
end;
validate_logfile({Name, Level, Size, Date, Count}) ->
ValidLevel = validate_loglevel(Level),
ValidSize = (is_integer(Size) andalso Size >= 0),
ValidCount = (is_integer(Count) andalso Count >= 0),
case {ValidLevel, ValidSize, ValidCount} of
{false, _, _} ->
?INT_LOG(error, "Invalid log level of ~p for ~s.",
[Level, Name]),
false;
{_, false, _} ->
?INT_LOG(error, "Invalid rotation size of ~p for ~s.",
[Size, Name]),
false;
{_, _, false} ->
?INT_LOG(error, "Invalid rotation count of ~p for ~s.",
[Count, Name]),
false;
{Levels, true, true} ->
case lager_util:parse_rotation_date_spec(Date) of
{ok, Spec} ->
{Name, Levels, Size, Spec, Count};
{error, _} when Date == "" ->
%% blank ones are fine.
{Name, Levels, Size, undefined, Count};
{error, _} ->
?INT_LOG(error, "Invalid rotation date of ~p for ~s.",
[Date, Name]),
false
end
end;
validate_logfile(H) ->
?INT_LOG(error, "Invalid log file config ~p.", [H]),
false.
do_write(#state{fd=FD, name=Name, flap=Flap} = State, Level, Msg) ->
%% delayed_write doesn't report errors
_ = file:write(FD, Msg),
{mask, SyncLevel} = State#state.sync_on,
case (Level band SyncLevel) /= 0 of
true ->
%% force a sync on any message that matches the 'sync_on' bitmask
Flap2 = case file:datasync(FD) of
{error, Reason2} when Flap == false ->
?INT_LOG(error, "Failed to write log message to file ~s: ~s",
[Name, file:format_error(Reason2)]),
true;
ok ->
false;
_ ->
Flap
end,
State#state{flap=Flap2};
_ ->
State
end.
validate_loglevel(Level) ->
try lager_util:config_to_mask(Level) of
@ -226,6 +231,113 @@ validate_loglevel(Level) ->
false
end.
validate_logfile_proplist(List) ->
try validate_logfile_proplist(List, []) of
Res ->
case proplists:get_value(file, Res) of
undefined ->
?INT_LOG(error, "Missing required file option", []),
false;
_File ->
%% merge with the default options
{ok, DefaultRotationDate} = lager_util:parse_rotation_date_spec(?DEFAULT_ROTATION_DATE),
lists:keymerge(1, lists:sort(Res), lists:sort([
{level, validate_loglevel(?DEFAULT_LOG_LEVEL)}, {date, DefaultRotationDate},
{size, ?DEFAULT_ROTATION_SIZE}, {count, ?DEFAULT_ROTATION_COUNT},
{sync_on, validate_loglevel(?DEFAULT_SYNC_LEVEL)}, {sync_interval, ?DEFAULT_SYNC_INTERVAL},
{sync_size, ?DEFAULT_SYNC_SIZE}, {check_interval, ?DEFAULT_CHECK_INTERVAL},
{formatter, lager_default_formatter}, {formatter_config, []}
]))
end
catch
{bad_config, Msg, Value} ->
?INT_LOG(error, "~s ~p for file ~p",
[Msg, Value, proplists:get_value(file, List)]),
false
end.
validate_logfile_proplist([], Acc) ->
Acc;
validate_logfile_proplist([{file, File}|Tail], Acc) ->
%% is there any reasonable validation we can do here?
validate_logfile_proplist(Tail, [{file, File}|Acc]);
validate_logfile_proplist([{level, Level}|Tail], Acc) ->
case validate_loglevel(Level) of
false ->
throw({bad_config, "Invalid loglevel", Level});
Res ->
validate_logfile_proplist(Tail, [{level, Res}|Acc])
end;
validate_logfile_proplist([{size, Size}|Tail], Acc) ->
case Size of
S when is_integer(S), S >= 0 ->
validate_logfile_proplist(Tail, [{size, Size}|Acc]);
_ ->
throw({bad_config, "Invalid rotation size", Size})
end;
validate_logfile_proplist([{count, Count}|Tail], Acc) ->
case Count of
C when is_integer(C), C >= 0 ->
validate_logfile_proplist(Tail, [{count, Count}|Acc]);
_ ->
throw({bad_config, "Invalid rotation count", Count})
end;
validate_logfile_proplist([{date, Date}|Tail], Acc) ->
case lager_util:parse_rotation_date_spec(Date) of
{ok, Spec} ->
validate_logfile_proplist(Tail, [{date, Spec}|Acc]);
{error, _} when Date == "" ->
%% legacy config allowed blanks
validate_logfile_proplist(Tail, Acc);
{error, _} ->
throw({bad_config, "Invalid rotation date", Date})
end;
validate_logfile_proplist([{sync_interval, SyncInt}|Tail], Acc) ->
case SyncInt of
Val when is_integer(Val), Val >= 0 ->
validate_logfile_proplist(Tail, [{sync_interval, Val}|Acc]);
_ ->
throw({bad_config, "Invalid sync interval", SyncInt})
end;
validate_logfile_proplist([{sync_size, SyncSize}|Tail], Acc) ->
case SyncSize of
Val when is_integer(Val), Val >= 0 ->
validate_logfile_proplist(Tail, [{sync_size, Val}|Acc]);
_ ->
throw({bad_config, "Invalid sync size", SyncSize})
end;
validate_logfile_proplist([{check_interval, CheckInt}|Tail], Acc) ->
case CheckInt of
Val when is_integer(Val), Val >= 0 ->
validate_logfile_proplist(Tail, [{check_interval, Val}|Acc]);
always ->
validate_logfile_proplist(Tail, [{check_interval, 0}|Acc]);
_ ->
throw({bad_config, "Invalid check interval", CheckInt})
end;
validate_logfile_proplist([{sync_on, Level}|Tail], Acc) ->
case validate_loglevel(Level) of
false ->
throw({bad_config, "Invalid sync on level", Level});
Res ->
validate_logfile_proplist(Tail, [{sync_on, Res}|Acc])
end;
validate_logfile_proplist([{formatter, Fmt}|Tail], Acc) ->
case is_atom(Fmt) of
true ->
validate_logfile_proplist(Tail, [{formatter, Fmt}|Acc]);
false ->
throw({bad_config, "Invalid formatter module", Fmt})
end;
validate_logfile_proplist([{formatter_config, FmtCfg}|Tail], Acc) ->
case is_list(FmtCfg) of
true ->
validate_logfile_proplist(Tail, [{formatter_config, FmtCfg}|Acc]);
false ->
throw({bad_config, "Invalid formatter config", FmtCfg})
end;
validate_logfile_proplist([Other|_Tail], _Acc) ->
throw({bad_config, "Invalid option", Other}).
schedule_rotation(_, undefined) ->
ok;
@ -244,16 +356,22 @@ get_loglevel_test() ->
?assertEqual(Level2, lager_util:config_to_mask(warning)).
rotation_test() ->
{ok, {FD, Inode, _}} = lager_util:open_logfile("test.log", true),
SyncLevel = validate_loglevel(?DEFAULT_SYNC_LEVEL),
SyncSize = ?DEFAULT_SYNC_SIZE,
SyncInterval = ?DEFAULT_SYNC_INTERVAL,
CheckInterval = 0, %% hard to test delayed mode
{ok, {FD, Inode, _}} = lager_util:open_logfile("test.log", {SyncSize, SyncInterval}),
State0 = #state{name="test.log", level=?DEBUG, fd=FD, inode=Inode, sync_on=SyncLevel,
sync_size=SyncSize, sync_interval=SyncInterval, check_interval=CheckInterval},
?assertMatch(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode},
write(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode}, 0, "hello world")),
write(State0, os:timestamp(), ?DEBUG, "hello world")),
file:delete("test.log"),
Result = write(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode}, 0, "hello world"),
Result = write(State0, os:timestamp(), ?DEBUG, "hello world"),
%% assert file has changed
?assert(#state{name="test.log", level=?DEBUG, fd=FD, inode=Inode} =/= Result),
?assertMatch(#state{name="test.log", level=?DEBUG}, Result),
file:rename("test.log", "test.log.1"),
Result2 = write(Result, 0, "hello world"),
Result2 = write(Result, os:timestamp(), ?DEBUG, "hello world"),
%% assert file has changed
?assert(Result =/= Result2),
?assertMatch(#state{name="test.log", level=?DEBUG}, Result2),
@ -271,13 +389,15 @@ filesystem_test_() ->
end,
fun(_) ->
file:delete("test.log"),
file:delete("test.log.0"),
file:delete("test.log.1"),
application:stop(lager),
error_logger:tty(true)
end,
[
{"under normal circumstances, file should be opened",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{"test.log", info}, {lager_default_formatter}]),
lager:log(error, self(), "Test message"),
{ok, Bin} = file:read_file("test.log"),
Pid = pid_to_list(self()),
@ -288,7 +408,7 @@ filesystem_test_() ->
fun() ->
{ok, FInfo} = file:read_file_info("test.log"),
file:write_file_info("test.log", FInfo#file_info{mode = 0}),
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info, 10*1024*1024, "$D0", 5}),
?assertEqual(1, lager_test_backend:count()),
{_Level, _Time,Message,_Metadata} = lager_test_backend:pop(),
?assertEqual("Failed to open log file test.log with error permission denied", lists:flatten(Message))
@ -296,7 +416,7 @@ filesystem_test_() ->
},
{"file that becomes unavailable at runtime should trigger an error message",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 0}]),
?assertEqual(0, lager_test_backend:count()),
lager:log(error, self(), "Test message"),
?assertEqual(1, lager_test_backend:count()),
@ -317,7 +437,7 @@ filesystem_test_() ->
{ok, FInfo} = file:read_file_info("test.log"),
OldPerms = FInfo#file_info.mode,
file:write_file_info("test.log", FInfo#file_info{mode = 0}),
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"},{check_interval, 0}]),
?assertEqual(1, lager_test_backend:count()),
{_Level, _Time, Message,_Metadata} = lager_test_backend:pop(),
?assertEqual("Failed to open log file test.log with error permission denied", lists:flatten(Message)),
@ -330,7 +450,7 @@ filesystem_test_() ->
},
{"external logfile rotation/deletion should be handled",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 0}]),
?assertEqual(0, lager_test_backend:count()),
lager:log(error, self(), "Test message1"),
?assertEqual(1, lager_test_backend:count()),
@ -346,6 +466,68 @@ filesystem_test_() ->
?assertMatch([_, _, "[error]", Pid, "Test message3\n"], re:split(Bin2, " ", [{return, list}, {parts, 5}]))
end
},
{"internal size rotation should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 0}, {size, 10}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
?assert(filelib:is_regular("test.log.0"))
end
},
{"internal time rotation should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {check_interval, 1000}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
whereis(lager_event) ! {rotate, "test.log"},
lager:log(error, self(), "Test message1"),
?assert(filelib:is_regular("test.log.0"))
end
},
{"sync_on option should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {sync_on, "=info"}, {check_interval, 5000}, {sync_interval, 5000}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
lager:log(info, self(), "Test message1"),
{ok, Bin} = file:read_file("test.log"),
?assert(<<>> /= Bin)
end
},
{"sync_on none option should work (also tests sync_interval)",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {sync_on, "none"}, {check_interval, 5000}, {sync_interval, 1000}]),
lager:log(error, self(), "Test message1"),
lager:log(error, self(), "Test message1"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
lager:log(info, self(), "Test message1"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
timer:sleep(1000),
{ok, Bin} = file:read_file("test.log"),
?assert(<<>> /= Bin)
end
},
{"sync_size option should work",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, [{file, "test.log"}, {level, info}, {sync_on, "none"}, {check_interval, 5001}, {sync_size, 640}, {sync_interval, 5001}]),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
lager:log(error, self(), "Test messageis64bytes"),
?assertEqual({ok, <<>>}, file:read_file("test.log")),
%% now we've written enough bytes
lager:log(error, self(), "Test messageis64bytes"),
{ok, Bin} = file:read_file("test.log"),
?assert(<<>> /= Bin)
end
},
{"runtime level changes",
fun() ->
gen_event:add_handler(lager_event, {lager_file_backend, "test.log"}, {"test.log", info}),
@ -365,7 +547,7 @@ filesystem_test_() ->
},
{"invalid runtime level changes",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend, {"test.log", info}),
gen_event:add_handler(lager_event, lager_file_backend, [{"test.log", info, 10*1024*1024, "$D0", 5}, {lager_default_formatter}]),
gen_event:add_handler(lager_event, lager_file_backend, {"test3.log", info}),
?assertEqual({error, bad_module}, lager:set_loglevel(lager_file_backend, "test.log", warning))
end
@ -389,7 +571,7 @@ filesystem_test_() ->
{"tracing should not duplicate messages",
fun() ->
gen_event:add_handler(lager_event, lager_file_backend,
{"test.log", critical}),
[{file, "test.log"}, {level, critical}, {check_interval, always}]),
lager:critical("Test message"),
{ok, Bin1} = file:read_file("test.log"),
?assertMatch([_, _, "[critical]", _, "Test message\n"], re:split(Bin1, " ", [{return, list}, {parts, 5}])),
@ -449,5 +631,62 @@ formatting_test_() ->
}
]}.
config_validation_test_() ->
[
{"missing file",
?_assertEqual(false,
validate_logfile_proplist([{level, info},{size, 10}]))
},
{"bad level",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {level, blah},{size, 10}]))
},
{"bad size",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {size, infinity}]))
},
{"bad count",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {count, infinity}]))
},
{"bad date",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {date, "midnight"}]))
},
{"blank date is ok",
?_assertMatch([_|_],
validate_logfile_proplist([{file, "test.log"}, {date, ""}]))
},
{"bad sync_interval",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {sync_interval, infinity}]))
},
{"bad sync_size",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {sync_size, infinity}]))
},
{"bad check_interval",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {check_interval, infinity}]))
},
{"bad sync_on level",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {sync_on, infinity}]))
},
{"bad formatter module",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {formatter, "io:format"}]))
},
{"bad formatter config",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {formatter_config, blah}]))
},
{"unknown option",
?_assertEqual(false,
validate_logfile_proplist([{file, "test.log"}, {rhubarb, spicy}]))
}
].
-endif.

+ 17
- 4
src/lager_msg.erl 查看文件

@ -1,8 +1,9 @@
-module(lager_msg).
-export([new/5]).
-export([new/4, new/5]).
-export([message/1]).
-export([timestamp/1]).
-export([datetime/1]).
-export([severity/1]).
-export([severity_as_int/1]).
-export([metadata/1]).
@ -12,18 +13,26 @@
destinations :: list(),
metadata :: [tuple()],
severity :: lager:log_level(),
timestamp :: {string(), string()},
datetime :: {string(), string()},
timestamp :: erlang:datetime(),
message :: list()
}).
-opaque lager_msg() :: #lager_msg{}.
-export_type([lager_msg/0]).
-spec new(list(), {string(), string()}, atom(), [tuple()], list()) -> lager_msg().
%% create with provided timestamp, handy for testing mostly
-spec new(list(), erlang:timestamp(), lager:log_level(), [tuple()], list()) -> lager_msg().
new(Msg, Timestamp, Severity, Metadata, Destinations) ->
#lager_msg{message=Msg, timestamp=Timestamp, severity=Severity,
{Date, Time} = lager_util:format_time(lager_util:maybe_utc(lager_util:localtime_ms(Timestamp))),
#lager_msg{message=Msg, datetime={Date, Time}, timestamp=Timestamp, severity=Severity,
metadata=Metadata, destinations=Destinations}.
-spec new(list(), lager:log_level(), [tuple()], list()) -> lager_msg().
new(Msg, Severity, Metadata, Destinations) ->
Now = os:timestamp(),
new(Msg, Now, Severity, Metadata, Destinations).
-spec message(lager_msg()) -> list().
message(Msg) ->
Msg#lager_msg.message.
@ -32,6 +41,10 @@ message(Msg) ->
timestamp(Msg) ->
Msg#lager_msg.timestamp.
-spec datetime(lager_msg()) -> calendar:datetime().
datetime(Msg) ->
Msg#lager_msg.datetime.
-spec severity(lager_msg()) -> lager:log_level().
severity(Msg) ->
Msg#lager_msg.severity.

+ 16
- 9
src/lager_util.erl 查看文件

@ -20,7 +20,7 @@
-export([levels/0, level_to_num/1, num_to_level/1, config_to_mask/1, config_to_levels/1, mask_to_levels/1,
open_logfile/2, ensure_logfile/4, rotate_logfile/2, format_time/0, format_time/1,
localtime_ms/0, maybe_utc/1, parse_rotation_date_spec/1,
localtime_ms/0, localtime_ms/1, maybe_utc/1, parse_rotation_date_spec/1,
calculate_next_rotation/1, validate_trace/1, check_traces/4, is_loggable/3]).
-ifdef(TEST).
@ -124,8 +124,10 @@ open_logfile(Name, Buffer) ->
case filelib:ensure_dir(Name) of
ok ->
Options = [append, raw] ++
if Buffer == true -> [delayed_write];
true -> []
case Buffer of
{Size, Interval} when is_integer(Interval), Interval >= 0, is_integer(Size), Size >= 0 ->
[{delayed_write, Size, Interval}];
_ -> []
end,
case file:open(Name, Options) of
{ok, FD} ->
@ -175,10 +177,15 @@ ensure_logfile(Name, FD, Inode, Buffer) ->
%% returns localtime with milliseconds included
localtime_ms() ->
{_, _, Micro} = Now = os:timestamp(),
Now = os:timestamp(),
localtime_ms(Now).
localtime_ms(Now) ->
{_, _, Micro} = Now,
{Date, {Hours, Minutes, Seconds}} = calendar:now_to_local_time(Now),
{Date, {Hours, Minutes, Seconds, Micro div 1000 rem 1000}}.
maybe_utc({Date, {H, M, S, Ms}}) ->
case lager_stdlib:maybe_utc({Date, {H, M, S}}) of
{utc, {Date1, {H1, M1, S1}}} ->
@ -558,11 +565,11 @@ check_trace_test() ->
is_loggable_test_() ->
[
{"Loggable by severity only", ?_assert(is_loggable(lager_msg:new("",{"",""}, alert, [], []),2,me))},
{"Not loggable by severity only", ?_assertNot(is_loggable(lager_msg:new("",{"",""}, critical, [], []),1,me))},
{"Loggable by severity with destination", ?_assert(is_loggable(lager_msg:new("",{"",""}, alert, [], [you]),2,me))},
{"Not loggable by severity with destination", ?_assertNot(is_loggable(lager_msg:new("",{"",""}, critical, [], [you]),1,me))},
{"Loggable by destination overriding severity", ?_assert(is_loggable(lager_msg:new("",{"",""}, critical, [], [me]),1,me))}
{"Loggable by severity only", ?_assert(is_loggable(lager_msg:new("", alert, [], []),2,me))},
{"Not loggable by severity only", ?_assertNot(is_loggable(lager_msg:new("", critical, [], []),1,me))},
{"Loggable by severity with destination", ?_assert(is_loggable(lager_msg:new("", alert, [], [you]),2,me))},
{"Not loggable by severity with destination", ?_assertNot(is_loggable(lager_msg:new("", critical, [], [you]),1,me))},
{"Loggable by destination overriding severity", ?_assert(is_loggable(lager_msg:new("", critical, [], [me]),1,me))}
].
format_time_test_() ->

+ 1
- 1
test/lager_test_backend.erl 查看文件

@ -69,7 +69,7 @@ handle_event({log, Msg},
true ->
{ok, State#state{buffer=Buffer ++
[{lager_msg:severity_as_int(Msg),
lager_msg:timestamp(Msg),
lager_msg:datetime(Msg),
lager_msg:message(Msg), lager_msg:metadata(Msg)}]}};
_ ->
{ok, State#state{ignored=Ignored ++ [ignored]}}

正在加载...
取消
保存