Examples¶
Status Handler¶
Execute with python doc/code/demo_status_handler.py --zmq
code/demo_status_handler.py¶
"""Outputs status logs::
2019-03-05 23:53:31 log messages: 1 ERROR, 4 WARNING 5 INFO
2019-03-05 23:53:36 log messages: 4 ERROR, 4 WARNING 6 INFO
2019-03-05 23:53:41 log messages: 6 ERROR, 1 WARNING 6 INFO
2019-03-05 23:53:46 log messages: 2 ERROR, 3 WARNING 6 INFO
2019-03-05 23:53:51 log messages: 3 ERROR, 2 WARNING 6 INFO
2019-03-05 23:53:56 log messages: 0 ERROR, 3 WARNING 5 INFO
"""
import logging
import logging.config
import os
import random
import time
_log = logging.getLogger(__name__)
def _random_log_entry():
level = random.choice(['info', 'warning', 'error'])
if level == 'info':
_log.info('testing info logging')
if level == 'warning':
_log.warning('testing warning logging')
if level == 'error':
_log.error('testing error logging')
def generate_logs():
# logs for 30 seconds
start = time.time()
duration = 0
while duration < 30:
_random_log_entry()
time.sleep(random.random())
duration = time.time() - start
def main():
here = os.path.dirname(os.path.abspath(__file__))
log_config_path = os.path.join(here, 'logging.conf')
logging.config.fileConfig(log_config_path, disable_existing_loggers=False)
generate_logs()
if __name__ == '__main__':
main()
code/logging.conf¶
# minimalistic sample configuration demonstraging starlog.StatusHandler
#
# The 'status' handler aggregates all log messages occurring at the 'root'
# logger. Every 5 seconds a status log line is sent by 'starlog.status' to
# its handler 'status_stdout'
[loggers]
keys = root, starlog.status
[handlers]
keys = status, status_stdout
[formatters]
keys = status
[logger_root]
level = NOTSET
handlers = status
[logger_starlog.status]
handlers = status_stdout
qualname = starlog.status
[handler_status]
class = starlog.StatusHandler
args = ()
[handler_status_stdout]
class = StreamHandler
args = (sys.stdout, )
formatter = status
[formatter_status]
format = %(asctime)s log messages: %(ERROR)d ERROR, %(WARNING)d WARNING %(INFO)d INFO
datefmt = %Y-%m-%d %H:%M:%S
ZMQ Handler¶
Execute with python doc/code/demo_multi_processing.py --zmq
code/demo_multi_processing.py¶
from __future__ import print_function
import logging
import logging.config
import os
import random
import multiprocessing
import time
from argparse import ArgumentParser
from starlog import inc
N = 5
T = 5
def _random_log_entry():
loggername = random.choice(['example', 'example.app', None])
logger = logging.getLogger(loggername)
level = random.choice(['info', 'warning', 'error'])
if level == 'info':
logger.info('testing info logging',
extra=inc('requests').inc('foo').update({'OTHER': True}))
if level == 'warning':
logger.warning('testing warning logging')
if level == 'error':
logger.error('testing error logging')
def log_process():
try:
# logs for some seconds
start = time.time()
duration = 0
while duration < T:
_random_log_entry()
time.sleep(random.random())
duration = time.time() - start
print("%s done" % (os.getpid(), ))
except KeyboardInterrupt:
pass
def main_loop_with_os_fork():
print("starting main loop")
pids = []
for ti in range(N):
pid = os.fork()
if pid == 0:
log_process()
return
pids.append(pid)
_waitpids(pids)
def _waitpids(pids):
for pid in pids:
os.waitpid(pid, 0)
def main_loop_with_multiprocessing_process():
print("starting main loop")
px = []
for ti in range(N):
p = multiprocessing.Process(target=log_process)
p.start()
px.append(p)
_join_processes(px)
def _join_processes(px):
# if MultiprocessHandler parameter manager_queue is set to False,
# then this is the pattern to join exited processes. The reason is,
# multiprocessing.Queue sometimes blocks the process at p.join forever
# although it left the run method.
px[0].join(T + 1)
for p in px:
p.join(timeout=1)
for p in px:
if p.is_alive():
print("terminating %s %s" % (p, p.pid))
p.terminate()
p.join()
print("all processes exited")
def get_cli_args():
parser = ArgumentParser()
parser.add_argument(
'--status',
action='store_true',
default=False,
help='use starlog.StatusLogger')
parser.add_argument(
'--zmq',
action='store_true',
default=False,
help='use starlog.ZmqHandler. If not set, then ' +
'starlog.MultiprocessHandler is used. ')
parser.add_argument(
'--duration',
type=int,
default=5,
help='How long the test should run in seconds')
parser.add_argument(
'--processes',
type=int,
default=5,
help='The number of processes to start')
parser.add_argument(
'--fork',
action='store_true',
default=False,
help='Use os.fork() to create a sub process. Default: use ' +
'multiprocessing.Process')
return parser.parse_args()
def main():
global N, T
args = get_cli_args()
N = args.processes
T = args.duration
print("configuring logging")
here = os.path.dirname(os.path.abspath(__file__))
configs = {
# status, zmq => log config file
(False, False): 'logging_multiprocess.conf',
(True, False): 'logging_multiprocess_status.conf',
(False, True): 'logging_zmq.conf',
(True, True): 'logging_zmq_status.conf'}
log_config_path = os.path.join(here, 'logging_zmq.conf')
basename = configs[(args.status, args.zmq)]
log_config_path = os.path.join(here, basename)
logging.config.fileConfig(log_config_path, disable_existing_loggers=False)
print("using %s" % basename)
if args.fork:
if not args.zmq:
print('\nWARNING: the combination of os.fork and ' +
'starlog.MultiprocessHandler is not reliable and thus not ' +
'recommend\n')
main_loop_with_os_fork()
else:
main_loop_with_multiprocessing_process()
if __name__ == '__main__':
main()
code/logging_zmq.conf¶
# minimalistic sample configuration demonstrating starlog.ZmqHandler
#
# All log records from any process is forwarded to the 'starlog.logsink'
# logger, where it's delegated to the 'stdout' handler.
[loggers]
keys = root, starlog.logsink, starlog.logsink.example
[handlers]
keys = multiprocess, stdout
[formatters]
keys = generic
[logger_root]
level = NOTSET
handlers = multiprocess
[logger_starlog.logsink]
level = NOTSET
handlers = stdout
propagate = 0
qualname = starlog.logsink
# do filtering / log handling in a central place for 'example' loggers
[logger_starlog.logsink.example]
level = ERROR
handlers =
propagate = 1
qualname = starlog.logsink.example
[handler_multiprocess]
class = starlog.ZmqHandler
# args = ('tcp://127.0.0.1:5557', )
args = ('tcp://127.0.0.1', )
# args = ()
formatter = generic
[handler_stdout]
class = StreamHandler
args = (sys.stdout, )
formatter = generic
[formatter_generic]
format = %(asctime)s [%(name)s-%(process)d] %(levelname)s: %(message)s
datefmt = %Y-%m-%d %H:%M:%S
Lookback Handler¶
Execute with python doc/code/demo_lookback.py
code/demo_lookback_handler.py¶
"""Outputs logs::
2019-03-24 17:50:17 [ INFO] message 3
2019-03-24 17:50:18 [ INFO] message 4
2019-03-24 17:50:18 [ INFO] message 5
2019-03-24 17:50:18 [ ERROR] message 6
2019-03-24 17:50:24 [ INFO] message 16
2019-03-24 17:50:24 [ INFO] message 17
2019-03-24 17:50:25 [ INFO] message 18
2019-03-24 17:50:25 [ ERROR] message 19
2019-03-24 17:50:26 [ INFO] message 20
2019-03-24 17:50:26 [WARNING] message 21
2019-03-24 17:50:27 [WARNING] message 22
2019-03-24 17:50:27 [ ERROR] message 23
"""
import logging
import logging.config
import os
import random
import time
_log = logging.getLogger(__name__)
def _random_log_entry(number):
rand = random.random()
if rand < 0.05:
level = logging.ERROR
elif rand < 0.2:
level = logging.WARNING
else:
level = logging.INFO
if level == logging.INFO:
_log.info('message %d', number)
if level == logging.WARNING:
_log.warning('message %d', number)
if level == logging.ERROR:
_log.error('message %d', number)
def generate_logs():
# logs for 30 seconds
start = time.time()
duration = 0
number = 0
while duration < 30:
_random_log_entry(number)
time.sleep(random.random())
duration = time.time() - start
number += 1
def main():
here = os.path.dirname(os.path.abspath(__file__))
log_config_path = os.path.join(here, 'logging_lookback.conf')
logging.config.fileConfig(log_config_path, disable_existing_loggers=False)
generate_logs()
if __name__ == '__main__':
main()
code/logging_lookback.conf¶
# minimalistic sample configuration demonstrating starlog.LookbackHandler
[loggers]
keys = root
[handlers]
keys = lookback_stdout, stdout
[formatters]
keys = generic
[logger_root]
level = NOTSET
handlers = lookback_stdout
[handler_lookback_stdout]
class = starlog.LookbackHandler
# capacity=100, max_age=5
args = (100, 5)
target = stdout
formatter = generic
[handler_stdout]
class = StreamHandler
args = (sys.stdout, )
formatter = generic
[formatter_generic]
format = %(asctime)s [%(levelname)7s] %(message)s
datefmt = %Y-%m-%d %H:%M:%S
gunicorn demo¶
code/gunicorn/app.py¶
import logging
from flask import Flask
app = Flask(__name__)
_log = logging.getLogger('app')
@app.route("/")
def hello():
_log.info('incoming request')
return "Hello World!"
if __name__ != '__main__':
_log.info('my app is starting')
code/gunicorn/log.conf¶
# Log record flow:
# All log records are delegated by the "zmq" handler to the "starlog.logsink"
# logger. The "starlog.sink" logger delegates all log records to the handlers
# "status" and "log_file". The "status" handler aggregates log records. A status
# line is generated in regular intervals by the logger "starlog.status" where
# it's written to stdout and to a separate status file.
#
# logger "root"
# -> handler "zmq"
# -> logger "starlog.sink"
# -> handler "log_file"
# -> handler "status"
# -> logger "starlog.status" (every 30 seconds)
# -> handler "status_stdout"
# -> handler "status_log_file"
[loggers]
keys=root, gunicorn.error, gunicorn.access, starlog.status, starlog.logsink
[handlers]
keys=log_file, status, status_log_file, status_stdout, zmq
[formatters]
keys=generic, status
[logger_root]
level=INFO
handlers=zmq
[logger_gunicorn.error]
level=INFO
handlers=
propagate=1
qualname=gunicorn.error
[logger_gunicorn.access]
level=INFO
handlers=
propagate=1
qualname=gunicorn.access
[logger_starlog.status]
level=NOTSET
handlers=status_log_file, status_stdout
propagate=0
qualname=starlog.status
[logger_starlog.logsink]
level=NOTSET
handlers=log_file, status
propagate=0
qualname=starlog.logsink
[handler_zmq]
class = starlog.ZmqHandler
args=('ipc://log.sock',)
formatter=generic
[handler_log_file]
level=DEBUG
class=logging.FileHandler
formatter=generic
args=('app.log', )
[handler_status_log_file]
level=NOTSET
class=logging.FileHandler
formatter=status
args=('status.log', )
[handler_status_stdout]
level=NOTSET
class=logging.StreamHandler
formatter=status
args=(sys.stdout,)
[handler_status]
class = starlog.StatusHandler
args = ('30s',)
formatter = status
[formatter_generic]
format=%(asctime)s [%(process)d:%(name)s:%(lineno)s] [%(levelname)s] %(message)s
datefmt=%Y-%m-%d %H:%M:%S
class=logging.Formatter
[formatter_status]
datefmt=%Y-%m-%d %H:%M:%S
format = %(asctime)s log messages: %(CRITICAL)d CRITICAL, %(ERROR)d ERROR, %(WARNING)d WARNING %(INFO)d INFO, %(DEBUG)d DEBUG.
Output¶
$ gunicorn app:app --log-config log.conf
2019-03-17 15:47:09,927 log messages: 0 CRITICAL, 0 ERROR, 0 WARNING 5 INFO, 0 DEBUG.
2019-03-17 15:47:39,963 log messages: 0 CRITICAL, 0 ERROR, 0 WARNING 0 INFO, 0 DEBUG.
2019-03-17 15:47:55,983 log messages: 0 CRITICAL, 0 ERROR, 0 WARNING 3 INFO, 0 DEBUG.
$ tail -F app.log status.log
==> app.log <==
2019-03-17 15:46:39 [22283:gunicorn.error:271] [INFO] Starting gunicorn 19.9.0
2019-03-17 15:46:39 [22283:gunicorn.error:271] [INFO] Listening at: http://127.0.0.1:8000 (22283)
2019-03-17 15:46:39 [22283:gunicorn.error:271] [INFO] Using worker: sync
2019-03-17 15:46:39 [22291:gunicorn.error:271] [INFO] Booting worker with pid: 22291
2019-03-17 15:46:39 [22291:app:17] [INFO] my app is starting
==> status.log <==
2019-03-17 15:47:09 log messages: 0 CRITICAL, 0 ERROR, 0 WARNING 5 INFO, 0 DEBUG.
2019-03-17 15:47:39 log messages: 0 CRITICAL, 0 ERROR, 0 WARNING 0 INFO, 0 DEBUG.
==> app.log <==
2019-03-17 15:47:54 [22283:gunicorn.error:271] [INFO] Handling signal: int
2019-03-17 15:47:54 [22291:gunicorn.error:271] [INFO] Worker exiting (pid: 22291)
2019-03-17 15:47:54 [22283:gunicorn.error:271] [INFO] Shutting down: Master
==> status.log <==
2019-03-17 15:47:55 log messages: 0 CRITICAL, 0 ERROR, 0 WARNING 3 INFO, 0 DEBUG.