Skip to content

Commit

Permalink
[#136] – Rename 'log_queries' option to 'verbose' and log all operati…
Browse files Browse the repository at this point in the history
…ons.
  • Loading branch information
cabol committed Mar 30, 2017
1 parent af9c439 commit e293316
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 13 deletions.
33 changes: 21 additions & 12 deletions src/adapters/sumo_store_mnesia.erl
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
| snmp | storage_properties.

-type state() :: #{
log_queries => boolean(),
verbose => boolean(),
default_options => [{option(), term()}]
}.

Expand All @@ -55,8 +55,8 @@
-spec init(term()) -> {ok, state()}.
init(Options) ->
DefaultOptions = parse(Options),
LogQueries = application:get_env(sumo_db, log_queries, false),
{ok, #{default_options => DefaultOptions, log_queries => LogQueries}}.
Verbose = application:get_env(sumo_db, verbose, false),
{ok, #{default_options => DefaultOptions, verbose => Verbose}}.

-spec persist(Doc, State) -> Response when
Doc :: sumo_internal:doc(),
Expand All @@ -83,6 +83,7 @@ persist(Doc, State) ->
{error, Reason, State};
{atomic, ok} ->
NewDoc = sumo_internal:set_field(IdField, NewId, Doc),
_ = maybe_log(persist, [DocName, NewDoc], State),
{ok, NewDoc, State}
end.

Expand All @@ -96,6 +97,7 @@ fetch(DocName, Id, State) ->
[Result] = mnesia:dirty_read(DocName, Id),
Schema = sumo_internal:get_schema(DocName),
Fields = schema_field_names(Schema),
_ = maybe_log(fetch, [DocName, Id], State),
{ok, wakeup(result_to_doc(Result, Fields)), State}
catch
_:_ -> {error, notfound, State}
Expand All @@ -117,6 +119,7 @@ delete_by(DocName, Conditions, State) ->
{aborted, Reason} ->
{error, Reason, State};
{atomic, Result} ->
_ = maybe_log(delete_by, [DocName, Conditions], State),
{ok, Result, State}
end.

Expand All @@ -128,6 +131,7 @@ delete_all(DocName, State) ->
Count = mnesia:table_info(DocName, size),
case mnesia:clear_table(DocName) of
{atomic, ok} ->
_ = maybe_log(delete_all, [DocName], State),
{ok, Count, State};
{aborted, Reason} ->
{error, Reason, State}
Expand Down Expand Up @@ -200,7 +204,7 @@ find_by(DocName, Conditions, [], Limit, Offset, State) ->
Schema = sumo_internal:get_schema(DocName),
Fields = schema_field_names(Schema),
Docs = [wakeup(result_to_doc(Result, Fields)) || Result <- Results],
_ = maybe_log_query(DocName, Conditions, Limit, Offset, MatchSpec, State),
_ = maybe_log(find_by, [DocName, Conditions, Limit, Offset, MatchSpec], State),
{ok, Docs, State}
end;
find_by(_DocName, _Conditions, _Sort, _Limit, _Offset, State) ->
Expand Down Expand Up @@ -386,14 +390,6 @@ result_to_doc(Result, Fields) ->
transform_conditions(DocName, Conditions) ->
sumo_utils:transform_conditions(fun validate_date/1, DocName, Conditions, [date]).

%% @private
maybe_log_query(DocName, Conditions, Limit, Offset, MatchSpec, #{log_queries := true}) ->
Msg = "find_by(~p, ~p, [], ~p, ~p)~nMatchSpec: ~p",
Args = [DocName, Conditions, Limit, Offset, MatchSpec],
lager:debug(Msg, Args);
maybe_log_query(_, _, _, _, _, _) ->
ok.

%% @private
validate_date({FieldType, _, FieldValue}) ->
case {FieldType, sumo_utils:is_datetime(FieldValue)} of
Expand Down Expand Up @@ -427,3 +423,16 @@ wakeup_fun(date, _, {Date, _} = _FieldValue, _) ->
Date;
wakeup_fun(_, _, FieldValue, _) ->
FieldValue.

%% @private
maybe_log(Fun, Args, #{verbose := true}) ->
lager:debug(log_format(Fun), Args);
maybe_log(_, _, _) ->
ok.

%% @private
log_format(persist) -> "persist(~p, ~p)";
log_format(fetch) -> "fetch(~p, ~p)";
log_format(delete_by) -> "delete_by(~p, ~p)";
log_format(delete_all) -> "delete_all(~p)";
log_format(find_by) -> "find_by(~p, ~p, [], ~p, ~p)~nMatchSpec: ~p".
2 changes: 1 addition & 1 deletion test/test.config
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[
{sumo_db, [
{wpool_opts, [{overrun_warning, 100}]},
{log_queries, true},
{verbose, true},
{query_timeout, 30000},
{storage_backends, [
{sumo_test_backend_mnesia, sumo_backend_mnesia, []}
Expand Down

0 comments on commit e293316

Please sign in to comment.