From 015829f52f802a5dd70344a2a280b4d72cd65aa4 Mon Sep 17 00:00:00 2001 From: kyle-verhoog Date: Thu, 12 Nov 2020 01:16:47 -0500 Subject: [PATCH] various doc updates - Add PyPI install instructions - Add an initialization section with example - Add usage sections with examples for each module - Clarify the documentation and usage for ThreadStats - Consistent formatting in examples - Update the Get in Touch section --- README.md | 24 ++++---- datadog/dogstatsd/base.py | 52 ++++++++-------- datadog/threadstats/base.py | 32 +++++----- doc/source/index.rst | 114 +++++++++++++++++++++++++----------- 4 files changed, 134 insertions(+), 88 deletions(-) diff --git a/README.md b/README.md index 45a2fb09e..a184e1dae 100644 --- a/README.md +++ b/README.md @@ -32,15 +32,15 @@ Find below a working example for submitting an event to your Event Stream: from datadog import initialize, api options = { - 'api_key': '', - 'app_key': '' + "api_key": "", + "app_key": "", } initialize(**options) title = "Something big happened!" -text = 'And let me tell you all about it here!' -tags = ['version:1', 'application:web'] +text = "And let me tell you all about it here!" +tags = ["version:1", "application:web"] api.Event.create(title=title, text=text, tags=tags) ``` @@ -63,8 +63,8 @@ from datadog import initialize, api initialize() title = "Something big happened!" -text = 'And let me tell you all about it here!' -tags = ['version:1', 'application:web'] +text = "And let me tell you all about it here!" +tags = ["version:1", "application:web"] api.Event.create(title=title, text=text, tags=tags) ``` @@ -83,8 +83,8 @@ Once the Datadog Python Library is installed, instantiate the StatsD client usin from datadog import initialize, statsd options = { - 'statsd_host':'127.0.0.1', - 'statsd_port':8125 + "statsd_host": "127.0.0.1", + "statsd_port": 8125, } initialize(**options) @@ -100,7 +100,7 @@ Once the Datadog Python Library is installed, instantiate the StatsD client usin from datadog import initialize, statsd options = { - 'statsd_socket_path' : PATH_TO_SOCKET + "statsd_socket_path": PATH_TO_SOCKET, } initialize(**options) @@ -161,9 +161,9 @@ size should be used, you can set it with the parameter `max_buffer_len`. Example from datadog import initialize options = { - 'api_key': '', - 'app_key': '', - 'max_buffer_len': 4096 + "api_key": "", + "app_key": "", + "max_buffer_len": 4096, } initialize(**options) diff --git a/datadog/dogstatsd/base.py b/datadog/dogstatsd/base.py index f81736791..d9802dc0d 100644 --- a/datadog/dogstatsd/base.py +++ b/datadog/dogstatsd/base.py @@ -232,10 +232,10 @@ def resolve_host(host, use_default_route): """ Resolve the DogStatsd host. - Args: - host (string): host - use_default_route (bool): use the system default route as host - (overrides the `host` parameter) + :param host: host + :type host: string + :param use_default_route: use the system default route as host (overrides the `host` parameter) + :type use_default_route: bool """ if not use_default_route: return host @@ -271,8 +271,8 @@ def open_buffer(self, max_buffer_size=None): You can also use this as a context manager. >>> with DogStatsd() as batch: - >>> batch.gauge('users.online', 123) - >>> batch.gauge('active.connections', 1001) + >>> batch.gauge("users.online", 123) + >>> batch.gauge("active.connections", 1001) """ if max_buffer_size is not None: log.warning("The parameter max_buffer_size is now deprecated and is not used anymore") @@ -301,8 +301,8 @@ def gauge( Record the value of a gauge, optionally setting a list of tags and a sample rate. - >>> statsd.gauge('users.online', 123) - >>> statsd.gauge('active.connections', 1001, tags=["protocol:http"]) + >>> statsd.gauge("users.online", 123) + >>> statsd.gauge("active.connections", 1001, tags=["protocol:http"]) """ return self._report(metric, 'g', value, tags, sample_rate) @@ -317,8 +317,8 @@ def increment( Increment a counter, optionally setting a value, tags and a sample rate. - >>> statsd.increment('page.views') - >>> statsd.increment('files.transferred', 124) + >>> statsd.increment("page.views") + >>> statsd.increment("files.transferred", 124) """ self._report(metric, 'c', value, tags, sample_rate) @@ -327,8 +327,8 @@ def decrement(self, metric, value=1, tags=None, sample_rate=None): Decrement a counter, optionally setting a value, tags and a sample rate. - >>> statsd.decrement('files.remaining') - >>> statsd.decrement('active.connections', 2) + >>> statsd.decrement("files.remaining") + >>> statsd.decrement("active.connections", 2) """ metric_value = -value if value else value self._report(metric, 'c', metric_value, tags, sample_rate) @@ -337,8 +337,8 @@ def histogram(self, metric, value, tags=None, sample_rate=None): """ Sample a histogram value, optionally setting tags and a sample rate. - >>> statsd.histogram('uploaded.file.size', 1445) - >>> statsd.histogram('album.photo.count', 26, tags=["gender:female"]) + >>> statsd.histogram("uploaded.file.size", 1445) + >>> statsd.histogram("album.photo.count", 26, tags=["gender:female"]) """ self._report(metric, 'h', value, tags, sample_rate) @@ -346,8 +346,8 @@ def distribution(self, metric, value, tags=None, sample_rate=None): """ Send a global distribution value, optionally setting tags and a sample rate. - >>> statsd.distribution('uploaded.file.size', 1445) - >>> statsd.distribution('album.photo.count', 26, tags=["gender:female"]) + >>> statsd.distribution("uploaded.file.size", 1445) + >>> statsd.distribution("album.photo.count", 26, tags=["gender:female"]) """ self._report(metric, 'd', value, tags, sample_rate) @@ -368,13 +368,13 @@ def timed(self, metric=None, tags=None, sample_rate=None, use_ms=None): manager. :: - @statsd.timed('user.query.time', sample_rate=0.5) + @statsd.timed("user.query.time", sample_rate=0.5) def get_user(user_id): # Do what you need to ... pass # Is equivalent to ... - with statsd.timed('user.query.time', sample_rate=0.5): + with statsd.timed("user.query.time", sample_rate=0.5): # Do what you need to ... pass @@ -383,7 +383,7 @@ def get_user(user_id): try: get_user(user_id) finally: - statsd.timing('user.query.time', time.time() - start) + statsd.timing("user.query.time", time.time() - start) """ return TimedContextManagerDecorator(self, metric, tags, sample_rate, use_ms) @@ -396,13 +396,13 @@ def distributed(self, metric=None, tags=None, sample_rate=None, use_ms=None): The metric is required as a context manager. :: - @statsd.distributed('user.query.time', sample_rate=0.5) + @statsd.distributed("user.query.time", sample_rate=0.5) def get_user(user_id): # Do what you need to ... pass # Is equivalent to ... - with statsd.distributed('user.query.time', sample_rate=0.5): + with statsd.distributed("user.query.time", sample_rate=0.5): # Do what you need to ... pass @@ -411,7 +411,7 @@ def get_user(user_id): try: get_user(user_id) finally: - statsd.distribution('user.query.time', time.time() - start) + statsd.distribution("user.query.time", time.time() - start) """ return DistributedContextManagerDecorator(self, metric, tags, sample_rate, use_ms) @@ -419,7 +419,7 @@ def set(self, metric, value, tags=None, sample_rate=None): """ Sample a set value. - >>> statsd.set('visitors.uniques', 999) + >>> statsd.set("visitors.uniques", 999) """ self._report(metric, 's', value, tags, sample_rate) @@ -569,8 +569,8 @@ def event(self, title, text, alert_type=None, aggregation_key=None, Send an event. Attributes are the same as the Event API. http://docs.datadoghq.com/api/ - >>> statsd.event('Man down!', 'This server needs assistance.') - >>> statsd.event('The web server restarted', 'The web server is up again', alert_type='success') # NOQA + >>> statsd.event("Man down!", "This server needs assistance.") + >>> statsd.event("The web server restarted", "The web server is up again", alert_type="success") # NOQA """ title = self._escape_event_content(title) text = self._escape_event_content(text) @@ -607,7 +607,7 @@ def service_check(self, check_name, status, tags=None, timestamp=None, """ Send a service check run. - >>> statsd.service_check('my_service.check_name', DogStatsd.WARNING) + >>> statsd.service_check("my_service.check_name", DogStatsd.WARNING) """ message = self._escape_service_check_message(message) if message is not None else '' diff --git a/datadog/threadstats/base.py b/datadog/threadstats/base.py index 87421a061..0d1ce835a 100644 --- a/datadog/threadstats/base.py +++ b/datadog/threadstats/base.py @@ -88,10 +88,10 @@ def start(self, flush_interval=10, roll_up_interval=10, device=None, using datadog module ``initialize`` method. >>> from datadog import initialize, ThreadStats - >>> initialize(api_key='my_api_key') + >>> initialize(api_key="my_api_key") >>> stats = ThreadStats() >>> stats.start() - >>> stats.increment('home.page.hits') + >>> stats.increment("home.page.hits") :param flush_interval: The number of seconds to wait between flushes. :type flush_interval: int @@ -145,9 +145,9 @@ def event(self, title, text, alert_type=None, aggregation_key=None, """ Send an event. Attributes are the same as the Event API. (http://docs.datadoghq.com/api/) - >>> stats.event('Man down!', 'This server needs assistance.') - >>> stats.event('The web server restarted', \ - 'The web server is up again', alert_type='success') + >>> stats.event("Man down!", "This server needs assistance.") + >>> stats.event("The web server restarted", \ + "The web server is up again", alert_type="success") """ if not self._disabled: # Append all client level tags to every event @@ -171,8 +171,8 @@ def gauge(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, ho such as total hard disk space, process uptime, total number of active users, or number of rows in a database table. - >>> stats.gauge('process.uptime', time.time() - process_start_time) - >>> stats.gauge('cache.bytes.free', cache.get_free_bytes(), tags=['version:1.0']) + >>> stats.gauge("process.uptime", time.time() - process_start_time) + >>> stats.gauge("cache.bytes.free", cache.get_free_bytes(), tags=["version:1.0"]) """ if not self._disabled: self._metric_aggregator.add_point(metric_name, tags, timestamp or time(), value, Gauge, @@ -184,7 +184,7 @@ def set(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host flushed as a gauge to Datadog. Optionally, specify a set of tags to associate with the metric. - >>> stats.set('example_metric.set', "value_1", tags=['environement:dev']) + >>> stats.set("example_metric.set", "value_1", tags=["environement:dev"]) """ if not self._disabled: self._metric_aggregator.add_point(metric_name, tags, timestamp or time(), value, Set, @@ -208,8 +208,8 @@ def decrement(self, metric_name, value=1, timestamp=None, tags=None, sample_rate Decrement a counter, optionally setting a value, tags and a sample rate. - >>> stats.decrement('files.remaining') - >>> stats.decrement('active.connections', 2) + >>> stats.decrement("files.remaining") + >>> stats.decrement("active.connections", 2) """ if not self._disabled: self._metric_aggregator.add_point(metric_name, tags, timestamp or time(), -value, @@ -222,7 +222,7 @@ def histogram(self, metric_name, value, timestamp=None, tags=None, sample_rate=1 average, count and the 75/85/95/99 percentiles. Optionally, specify a list of ``tags`` to associate with the metric. - >>> stats.histogram('uploaded_file.size', uploaded_file.size()) + >>> stats.histogram("uploaded_file.size", uploaded_file.size()) """ if not self._disabled: self._metric_aggregator.add_point(metric_name, tags, timestamp or time(), value, @@ -235,7 +235,7 @@ def distribution(self, metric_name, value, timestamp=None, tags=None, sample_rat median, average, count and the 50/75/90/95/99 percentiles. Optionally, specify a list of ``tags`` to associate with the metric. - >>> stats.distribution('uploaded_file.size', uploaded_file.size()) + >>> stats.distribution("uploaded_file.size", uploaded_file.size()) """ if not self._disabled: self._metric_aggregator.add_point(metric_name, tags, timestamp or time(), value, @@ -259,7 +259,7 @@ def timer(self, metric_name, sample_rate=1, tags=None, host=None): :: def get_user(user_id): - with stats.timer('user.query.time'): + with stats.timer("user.query.time"): # Do what you need to ... pass @@ -270,7 +270,7 @@ def get_user(user_id): # Do what you need to ... pass finally: - stats.histogram('user.query.time', time.time() - start) + stats.histogram("user.query.time", time.time() - start) """ start = time() try: @@ -286,7 +286,7 @@ def timed(self, metric_name, sample_rate=1, tags=None, host=None): Optionally specify a list of tags to associate with the metric. :: - @stats.timed('user.query.time') + @stats.timed("user.query.time") def get_user(user_id): # Do what you need to ... pass @@ -296,7 +296,7 @@ def get_user(user_id): try: get_user(user_id) finally: - stats.histogram('user.query.time', time.time() - start) + stats.histogram("user.query.time", time.time() - start) """ def wrapper(func): diff --git a/doc/source/index.rst b/doc/source/index.rst index 327411f9b..265a80a05 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,29 +1,68 @@ -############################################### -:mod:`datadog` --- The Datadog's Python library -############################################### +######################################### +:mod:`datadog` --- Datadog Python library +######################################### .. module:: datadog -The :mod:`datadog` module provides :mod:`datadog.api` - a simple wrapper around Datadog's HTTP API - :mod:`datadog.threadstats` - a tool for collecting metrics in high performance applications - and :mod:`datadog.dogstatsd` a DogStatsd Python client. +The :mod:`datadog` module provides + - :mod:`datadog.api`: a client for Datadog's HTTP API. + - :mod:`datadog.dogstatsd`: a DogStatsd client. + - :mod:`datadog.threadstats`: a DogStatsd client that submits metrics in a + worker thread. + Installation ============ -To install from source, `download `_ a distribution and run: +Install from PyPI:: + + pip install datadog + - >>> sudo python setup.py install +Initialization +============== -If you use `virtualenv `_ you do not need to use sudo. +:mod:`datadog` must be initialized with :meth:`datadog.initialize`. An API key +and an app key are required. These can be passed explicitly to +:meth:`datadog.initialize` or defined as environment variables +``DATADOG_API_KEY`` and ``DATADOG_APP_KEY`` respectively. -Datadog.api module -================== -Datadog.api is a Python client library for Datadog's `HTTP API `_. +Here's an example where the statsd host and port are configured as well:: -Datadog.api client requires to run :mod:`datadog` `initialize` method first. + from datadog import initialize + + initialize( + api_key="", + app_key="", + statsd_host: "127.0.0.1", + statsd_port: 8125 + ) .. autofunction:: datadog.initialize + +datadog.api +=========== +:mod:`datadog.api` is a Python client library for Datadog's `HTTP API +`_. + + +Usage +~~~~~ + +Be sure to initialize the client using :meth:`datadog.initialize` and then use +:mod:`datadog.api`:: + + from datadog import api + + api.Event.create( + title="Something big happened!", + text="And let me tell you all about it here!", + tags=["version:1", "application:web"], + ) + + .. autoclass:: datadog.api.Comment :members: :inherited-members: @@ -90,22 +129,36 @@ Datadog.api client requires to run :mod:`datadog` `initialize` method first. :inherited-members: -Datadog.threadstats module -========================== -Datadog.threadstats is a tool for collecting application metrics without hindering performance. -It collects metrics in the application thread with very little overhead and allows flushing -metrics in process, in a thread or in a greenlet, depending on your application's needs. +datadog.threadstats +=================== +:mod:`datadog.threadstats` is a DogStatsd client that aggregates metrics when +possible and submits them asynchronously in order to minimize the performance +impact on the application. Submitting metrics can be done with a worker thread +or in a greenlet. -To run properly Datadog.threadstats requires to run :mod:`datadog` `initialize` method first. -.. autofunction:: datadog.initialize +Usage +~~~~~ + +Be sure to initialize the library with :meth:`datadog.initialize`. Then create +an instance of :class:`datadog.threadstats.ThreadStats`:: + + from datadog.threadstats import ThreadStats + + statsd = ThreadStats() + statsd.start() # Creates a worker thread used to submit metrics. + + # Use statsd just like any other DatadogStatsd client. + statsd.increment("home.page.hits") + .. autoclass:: datadog.threadstats.base.ThreadStats :members: :inherited-members: -Datadog.dogstatsd module -========================== + +datadog.dogstatsd +================= .. autoclass:: datadog.dogstatsd.base.DogStatsd :members: @@ -114,24 +167,17 @@ Datadog.dogstatsd module .. data:: statsd - A global :class:`~datadog.dogstatsd.base.DogStatsd` instance that is easily shared - across an application's modules. Initialize this once in your application's - set-up code and then other modules can import and use it without further - configuration. + A global :class:`~datadog.dogstatsd.base.DogStatsd` instance that can be + used across an application:: >>> from datadog import initialize, statsd - >>> initialize(statsd_host='localhost', statsd_port=8125) - >>> statsd.increment('home.page.hits') - - - -Source -====== + >>> initialize(statsd_host="localhost", statsd_port=8125) + >>> statsd.increment("home.page.hits") -The Datadog's Python library source is freely available on Github. Check it out `here -`_. Get in Touch ============ -If you'd like to suggest a feature or report a bug, please add an issue `here `_. If you want to talk about Datadog in general, reach out at `datadoghq.com `_. +If you'd like to suggest a feature or report a bug, please submit an issue +`here `_. If you have questions +about Datadog in general, reach out to support@datadoghq.com.