diff --git a/Makefile b/Makefile index d7b97a697..5fe275e9e 100644 --- a/Makefile +++ b/Makefile @@ -15,11 +15,10 @@ run_inv_web_service: # # KAFKA_TOPIC="platform.system-profile" KAFKA_GROUP="inventory" KAFKA_BOOTSTRAP_SERVERS="localhost:29092" # - INVENTORY_LOGGING_CONFIG_FILE=logconfig.ini INVENTORY_LOG_LEVEL=DEBUG gunicorn -b :8080 run + INVENTORY_LOG_LEVEL=DEBUG gunicorn -b :8080 run run_inv_mq_service: - PAYLOAD_TRACKER_SERVICE_NAME=inventory-mq-service INVENTORY_LOGGING_CONFIG_FILE=logconfig.ini \ - INVENTORY_LOG_LEVEL=DEBUG python inv_mq_service.py + PAYLOAD_TRACKER_SERVICE_NAME=inventory-mq-service INVENTORY_LOG_LEVEL=DEBUG python inv_mq_service.py run_inv_mq_service_test_producer: python utils/kafka_producer.py diff --git a/README.md b/README.md index 8b92a6a02..a1d28579c 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ runs. A command to run the server in a cluster. ``` -gunicorn -c gunicorn.conf.py --log-config=$INVENTORY_LOGGING_CONFIG_FILE run +gunicorn -c gunicorn.conf.py run ``` Running the server locally for development. In this case it’s not necessary to diff --git a/app/__init__.py b/app/__init__.py index 10eb0eeb7..f1e037d72 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -39,7 +39,7 @@ def create_app(runtime_environment): # This feels like a hack but it is needed. The logging configuration # needs to be setup before the flask app is initialized. - configure_logging(runtime_environment) + configure_logging() app_config = Config(runtime_environment) app_config.log_configuration() diff --git a/app/logging.py b/app/logging.py index 5defbde5b..3ca6fa90b 100644 --- a/app/logging.py +++ b/app/logging.py @@ -1,79 +1,61 @@ import logging.config import os +from logging import NullHandler from threading import local import logstash_formatter import watchtower from boto3.session import Session from gunicorn import glogging +from yaml import safe_load OPENSHIFT_ENVIRONMENT_NAME_FILE = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" DEFAULT_AWS_LOGGING_NAMESPACE = "inventory-dev" -LOGGER_PREFIX = "inventory." +DEFAULT_LOGGING_CONFIG_FILE = "logconfig.yaml" +LOGGER_NAME = "inventory" threadctx = local() -def configure_logging(runtime_environment): - env_var_name = "INVENTORY_LOGGING_CONFIG_FILE" - log_config_file = os.getenv(env_var_name) - if log_config_file is not None: - # The logging module throws an odd error (KeyError) if the - # config file is not found. Hopefully, this makes it more clear. - try: - fh = open(log_config_file) - fh.close() - except FileNotFoundError: - print( - f"Error reading the logging configuration file. Verify the {env_var_name} environment variable is " - "set correctly. Aborting..." - ) - raise - - logging.config.fileConfig(fname=log_config_file) +def configure_logging(): + log_config_file = os.getenv("INVENTORY_LOGGING_CONFIG_FILE", DEFAULT_LOGGING_CONFIG_FILE) + with open(log_config_file) as log_config_file: + logconfig_dict = safe_load(log_config_file) - if runtime_environment.logging_enabled: - _configure_watchtower_logging_handler() - _configure_contextual_logging_filter() + logging.config.dictConfig(logconfig_dict) + logger = logging.getLogger(LOGGER_NAME) + log_level = os.getenv("INVENTORY_LOG_LEVEL", "INFO").upper() + logger.setLevel(log_level) -def _configure_watchtower_logging_handler(): +def cloudwatch_handler(): aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID", None) aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY", None) aws_region_name = os.getenv("AWS_REGION_NAME", None) - log_group = os.getenv("AWS_LOG_GROUP", "platform") - stream_name = os.getenv("AWS_LOG_STREAM", _get_hostname()) # default to hostname - create_log_group = str(os.getenv("AWS_CREATE_LOG_GROUP")).lower() == "true" - if all([aws_access_key_id, aws_secret_access_key, aws_region_name, stream_name]): - print(f"Configuring watchtower logging (log_group={log_group}, stream_name={stream_name})") + if all((aws_access_key_id, aws_secret_access_key, aws_region_name)): + aws_log_group = os.getenv("AWS_LOG_GROUP", "platform") + aws_log_stream = os.getenv("AWS_LOG_STREAM", _get_hostname()) + create_log_group = str(os.getenv("AWS_CREATE_LOG_GROUP")).lower() == "true" + print(f"Configuring watchtower logging (log_group={aws_log_group}, stream_name={aws_log_stream})") boto3_session = Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name, ) - - root = logging.getLogger() - handler = watchtower.CloudWatchLogHandler( + return watchtower.CloudWatchLogHandler( boto3_session=boto3_session, - log_group=log_group, - stream_name=stream_name, + log_group=aws_log_group, + stream_name=aws_log_stream, create_log_group=create_log_group, ) - handler.setFormatter(logstash_formatter.LogstashFormatterV1()) - root.addHandler(handler) else: print("Unable to configure watchtower logging. Please verify watchtower logging configuration!") + return NullHandler() def _get_hostname(): - return os.uname()[1] - - -def _configure_contextual_logging_filter(): - # Only enable the contextual filter if not in "testing" mode - root = logging.getLogger() - root.addFilter(ContextualFilter()) + return os.uname().nodename class ContextualFilter(logging.Filter): @@ -119,8 +101,4 @@ def setup(self, cfg): def get_logger(name): - log_level = os.getenv("INVENTORY_LOG_LEVEL", "INFO").upper() - logger = logging.getLogger(LOGGER_PREFIX + name) - logger.addFilter(ContextualFilter()) - logger.setLevel(log_level) - return logger + return logging.getLogger(f"{LOGGER_NAME}.{name}") diff --git a/host_reaper.py b/host_reaper.py index f14842ca6..765f42d3d 100644 --- a/host_reaper.py +++ b/host_reaper.py @@ -88,7 +88,7 @@ def main(logger): if __name__ == "__main__": - configure_logging(RUNTIME_ENVIRONMENT) + configure_logging() logger = get_logger(LOGGER_NAME) sys.excepthook = partial(_excepthook, logger) diff --git a/logconfig.ini b/logconfig.ini deleted file mode 100644 index c274271ee..000000000 --- a/logconfig.ini +++ /dev/null @@ -1,85 +0,0 @@ -[loggers] -keys=root, gunicorn.error, gunicorn.access, sqlalchemy.engine, xjoin, app, api, tasks, mq_service, lib, utils - -[handlers] -keys=logstash - -[formatters] -keys=logstash, human_readable - -[logger_root] -level=INFO -handlers=logstash - -[logger_gunicorn.error] -level=ERROR -handlers=logstash -propagate=1 -qualname=gunicorn.error - -[logger_gunicorn.access] -level=ERROR -handlers=logstash -propagate=1 -qualname=gunicorn.access - -[logger_sqlalchemy.engine] -level=WARNING -handlers=logstash -propagate=1 -qualname=sqlalchemy.engine - -[logger_xjoin] -level=WARNING -handlers=logstash -propagate=0 -qualname=inventory.xjoin - -[logger_app] -level=DEBUG -handlers=logstash -propagate=0 -qualname=inventory.app - -[logger_api] -level=DEBUG -handlers=logstash -propagate=0 -qualname=inventory.api - -[logger_tasks] -level=DEBUG -handlers=logstash -propagate=0 -qualname=inventory.tasks - -[logger_mq_service] -level=DEBUG -handlers=logstash -propagate=0 -qualname=inventory.mq_service - -[logger_lib] -level=DEBUG -handlers=logstash -propagate=0 -qualname=inventory.lib - -[logger_utils] -level=INFO -handlers=logstash -propagate=0 -qualname=inventory.utils - -[handler_logstash] -class=StreamHandler -level=NOTSET -#formatter=logstash -formatter=human_readable -args=(sys.stdout, ) - -[formatter_logstash] -class=logstash_formatter.LogstashFormatterV1 - -[formatter_human_readable] -format=[%(asctime)s] [%(process)d] [%(thread)d] [%(name)s] [%(levelname)s] %(message)s diff --git a/logconfig.yaml b/logconfig.yaml new file mode 100644 index 000000000..b73eb6551 --- /dev/null +++ b/logconfig.yaml @@ -0,0 +1,54 @@ +--- +version: 1 +root: + level: INFO + handlers: + - logstash +loggers: + gunicorn.error: + level: ERROR + handlers: + - logstash + propagate: true + gunicorn.access: + level: ERROR + handlers: + - logstash + propagate: true + sqlalchemy.engine: + level: WARNING + handlers: + - logstash + propagate: true + inventory: + level: INFO + handlers: + - logstash + - cloudwatch + propagate: false + inventory.xjoin: + level: WARNING + handlers: + - logstash + propagate: false +handlers: + logstash: + class: logging.StreamHandler +# formatter: logstash + formatter: human_readable + stream: ext://sys.stdout + filters: + - contextual + cloudwatch: + "()": app.logging.cloudwatch_handler + formatter: logstash + filters: + - contextual +formatters: + human_readable: + format: "[%(asctime)s] [%(process)d] [%(thread)d] [%(name)s] [%(levelname)s] %(message)s" + logstash: + class: logstash_formatter.LogstashFormatterV1 +filters: + contextual: + "()": app.logging.ContextualFilter diff --git a/run_gunicorn.py b/run_gunicorn.py index d0ba06b10..0b7ddefe9 100755 --- a/run_gunicorn.py +++ b/run_gunicorn.py @@ -30,7 +30,7 @@ def run_server(): variables. """ bind = f"0.0.0.0:{LISTEN_PORT}" - run(("gunicorn", "--log-config=logconfig.ini", "--log-level=debug", f"--bind={bind}", "run")) + run(("gunicorn", "--log-level=debug", f"--bind={bind}", "run")) if __name__ == "__main__":