diff --git a/datadog/api/__init__.py b/datadog/api/__init__.py index 31ca0bc5b..6c28dd46b 100644 --- a/datadog/api/__init__.py +++ b/datadog/api/__init__.py @@ -35,3 +35,4 @@ from datadog.api.service_checks import ServiceCheck from datadog.api.tags import Tag from datadog.api.users import User +from datadog.api.service_level_objectives import ServiceLevelObjective \ No newline at end of file diff --git a/datadog/api/api_client.py b/datadog/api/api_client.py index 9f4f3aa71..c27b1d21d 100644 --- a/datadog/api/api_client.py +++ b/datadog/api/api_client.py @@ -46,7 +46,7 @@ def _get_http_client(cls): @classmethod def submit(cls, method, path, api_version=None, body=None, attach_host_name=False, - response_formatter=None, error_formatter=None, **params): + response_formatter=None, error_formatter=None, suppress_response_errors_on_codes=None, **params): """ Make an HTTP API request @@ -70,6 +70,10 @@ def submit(cls, method, path, api_version=None, body=None, attach_host_name=Fals :param attach_host_name: link the new resource object to the host name :type attach_host_name: bool + :param suppress_response_errors_on_codes: suppress ApiError on `errors` key in the response for the given HTTP + status codes + :type suppress_response_errors_on_codes: None|list(int) + :param params: dictionary to be sent in the query string of the request :type params: dictionary @@ -151,7 +155,10 @@ def submit(cls, method, path, api_version=None, body=None, attach_host_name=Fals raise ValueError('Invalid JSON response: {0}'.format(content)) if response_obj and 'errors' in response_obj: - raise ApiError(response_obj) + # suppress ApiError when specified and just return the response + if not (suppress_response_errors_on_codes and + result.status_code in suppress_response_errors_on_codes): + raise ApiError(response_obj) else: response_obj = None @@ -177,7 +184,7 @@ def submit(cls, method, path, api_version=None, body=None, attach_host_name=Fals raise except ApiError as e: if _mute: - for error in e.args[0]['errors']: + for error in (e.args[0].get('errors') or []): log.error(error) if error_formatter is None: return e.args[0] diff --git a/datadog/api/service_level_objectives.py b/datadog/api/service_level_objectives.py new file mode 100644 index 000000000..d2b8bc6bc --- /dev/null +++ b/datadog/api/service_level_objectives.py @@ -0,0 +1,139 @@ +from datadog.api.resources import ( + GetableAPIResource, + CreateableAPIResource, + UpdatableAPIResource, + ListableAPIResource, + DeletableAPIResource, + ActionAPIResource, +) + + +class ServiceLevelObjective( + GetableAPIResource, + CreateableAPIResource, + UpdatableAPIResource, + ListableAPIResource, + DeletableAPIResource, + ActionAPIResource, +): + """ + A wrapper around Service Level Objective HTTP API. + """ + + _resource_name = "slo" + + @classmethod + def create( + cls, attach_host_name=False, method="POST", id=None, params=None, **body + ): + """ + Create a SLO + + :returns: created SLO details + """ + return super(ServiceLevelObjective, cls).create( + attach_host_name=False, method="POST", id=None, params=params, **body + ) + + @classmethod + def get(cls, id, **params): + """ + Get a specific SLO details. + + :param id: SLO id to get details for + :type id: str + + :returns: SLO details + """ + return super(ServiceLevelObjective, cls).get(id, **params) + + @classmethod + def get_all(cls, query=None, ids=None, offset=0, limit=100, **params): + """ + Get all SLO details. + + :param query: optional search query - syntax in UI && online documentation + :type query: str + + :param ids: optional list of SLO ids to get many specific SLOs at once. + :type ids: list(str) + + :param offset: offset of results to use (default 0) + :type offset: int + + :param limit: limit of results to return (default: 1000) + :type limit: int + + :returns: SLOs matching the query + """ + search_terms = {} + if query: + search_terms["query"] = query + if ids: + search_terms["ids"] = ids + search_terms["offset"] = offset + search_terms["limit"] = limit + + return super(ServiceLevelObjective, cls).get_all(**search_terms) + + @classmethod + def update(cls, id, params=None, **body): + """ + Update a specific SLO details. + + :param id: SLO id to update details for + :type id: str + + :returns: SLO details + """ + return super(ServiceLevelObjective, cls).update(id, params, **body) + + @classmethod + def delete(cls, id, **params): + """ + Delete a specific SLO. + + :param id: SLO id to delete + :type id: str + + :returns: SLO ids removed + """ + return super(ServiceLevelObjective, cls).delete(id, **params) + + @classmethod + def bulk_delete(cls, ops, **params): + """ + Bulk Delete Timeframes from multiple SLOs. + + :param ops: a dictionary mapping of SLO ID to timeframes to remove. + :type ops: dict(str, list(str)) + + :returns: Dictionary representing the API's JSON response + `errors` - errors with operation + `data` - updates and deletions + """ + return super(ServiceLevelObjective, cls)._trigger_class_action( + "POST", + "bulk_delete", + body=ops, + params=params, + suppress_response_errors_on_codes=[200], + ) + + @classmethod + def delete_many(cls, ids, **params): + """ + Delete Multiple SLOs + + :param ids: a list of SLO IDs to remove + :type ids: list(str) + + :returns: Dictionary representing the API's JSON response see `data` list(slo ids) && `errors` + """ + return super(ServiceLevelObjective, cls)._trigger_class_action( + "DELETE", + "", + params=params, + body={"ids": ids}, + suppress_response_errors_on_codes=[200], + ) diff --git a/datadog/dogshell/service_level_objective.py b/datadog/dogshell/service_level_objective.py new file mode 100644 index 000000000..bc8e71066 --- /dev/null +++ b/datadog/dogshell/service_level_objective.py @@ -0,0 +1,407 @@ +# stdlib +import argparse +import json + +# 3p +from datadog.util.cli import set_of_ints, comma_set, comma_list_or_empty +from datadog.util.format import pretty_json + +# datadog +from datadog import api +from datadog.dogshell.common import report_errors, report_warnings + + +class MonitorClient(object): + @classmethod + def setup_parser(cls, subparsers): + parser = subparsers.add_parser( + "service_level_objective", + help="Create, edit, and delete service level objectives", + ) + + verb_parsers = parser.add_subparsers(title="Verbs", dest="verb") + verb_parsers.required = True + + create_parser = verb_parsers.add_parser("create", help="Create a SLO") + create_parser.add_argument( + "--type", + required=True, + help="type of the SLO, e.g.", + choices=["metric", "monitor"], + ) + create_parser.add_argument("--name", help="name of the SLO", default=None) + create_parser.add_argument( + "--description", help="description of the SLO", default=None + ) + create_parser.add_argument( + "--tags", + help="comma-separated list of tags", + default=None, + type=comma_list_or_empty, + ) + create_parser.add_argument( + "--thresholds", + help="comma separated list of :[:[:[:]]", + required=True, + ) + create_parser.add_argument( + "--numerator", + help="numerator metric query (sum of good events)", + default=None, + ) + create_parser.add_argument( + "--denominator", + help="denominator metric query (sum of total events)", + default=None, + ) + create_parser.add_argument( + "--monitor_ids", + help="explicit monitor_ids to use (CSV)", + default=None, + type=set_of_ints, + ) + create_parser.add_argument( + "--monitor_search", help="monitor search terms to use", default=None + ) + create_parser.add_argument( + "--groups", + help="for a single monitor you can specify the specific groups as a pipe (|) delimited string", + default=None, + type=comma_list_or_empty, + ) + create_parser.set_defaults(func=cls._create) + + file_create_parser = verb_parsers.add_parser( + "fcreate", help="Create a SLO from file" + ) + file_create_parser.add_argument( + "file", help="json file holding all details", type=argparse.FileType("r") + ) + file_create_parser.set_defaults(func=cls._file_create) + + update_parser = verb_parsers.add_parser("update", help="Update existing SLO") + update_parser.add_argument( + "slo_id", help="SLO to replace with the new definition" + ) + update_parser.add_argument( + "--type", + required=True, + help="type of the SLO (must specify it's original type)", + choices=["metric", "monitor"], + ) + update_parser.add_argument("--name", help="name of the SLO", default=None) + update_parser.add_argument( + "--description", help="description of the SLO", default=None + ) + create_parser.add_argument( + "--thresholds", + help="comma separated list of :[:[:[:]]", + required=True, + ) + update_parser.add_argument( + "--tags", + help="comma-separated list of tags", + default=None, + type=comma_list_or_empty, + ) + update_parser.add_argument( + "--numerator", + help="numerator metric query (sum of good events)", + default=None, + ) + update_parser.add_argument( + "--denominator", + help="denominator metric query (sum of total events)", + default=None, + ) + update_parser.add_argument( + "--monitor_ids", + help="explicit monitor_ids to use (CSV)", + default=None, + type=set_of_ints, + ) + update_parser.add_argument( + "--monitor_search", help="monitor search terms to use", default=None + ) + update_parser.add_argument( + "--groups", + help="for a single monitor you can specify the specific groups as a pipe (|) delimited string", + default=None, + ) + update_parser.set_defaults(func=cls._update) + + file_update_parser = verb_parsers.add_parser( + "fupdate", help="Update existing SLO from file" + ) + file_update_parser.add_argument( + "file", help="json file holding all details", type=argparse.FileType("r") + ) + file_update_parser.set_defaults(func=cls._file_update) + + show_parser = verb_parsers.add_parser("show", help="Show a SLO definition") + show_parser.add_argument("slo_id", help="SLO to show") + show_parser.set_defaults(func=cls._show) + + show_all_parser = verb_parsers.add_parser( + "show_all", help="Show a list of all SLOs" + ) + show_all_parser.add_argument( + "--query", help="string to filter SLOs by query (see UI or documentation)" + ) + show_all_parser.add_argument( + "--slo_ids", + help="comma separated list indicating what SLO IDs to get at once", + type=comma_set, + ) + show_all_parser.add_argument( + "--offset", help="offset of query pagination", default=0 + ) + show_all_parser.add_argument( + "--limit", help="limit of query pagination", default=100 + ) + show_all_parser.set_defaults(func=cls._show_all) + + delete_parser = verb_parsers.add_parser("delete", help="Delete a SLO") + delete_parser.add_argument("slo_id", help="SLO to delete") + delete_parser.set_defaults(func=cls._delete) + + delete_many_parser = verb_parsers.add_parser("delete_many", help="Delete a SLO") + delete_many_parser.add_argument( + "slo_ids", help="comma separated list of SLO IDs to delete", type=comma_set + ) + delete_many_parser.set_defaults(func=cls._delete_many) + + delete_timeframe_parser = verb_parsers.add_parser( + "delete_many", help="Delete a SLO timeframe" + ) + delete_timeframe_parser.add_argument("slo_id", help="SLO ID to update") + delete_timeframe_parser.add_argument( + "timeframes", + help="CSV of timeframes to delete, e.g. 7d,30d,90d", + required=True, + type=comma_set, + ) + delete_timeframe_parser.set_defaults(func=cls._delete_timeframe) + + @classmethod + def _create(cls, args): + api._timeout = args.timeout + format = args.format + + params = {"type": args.type, "name": args.name} + + if args.tags: + tags = sorted(set([t.strip() for t in args.tags.split(",") if t.strip()])) + params["tags"] = tags + + thresholds = [] + for threshold_str in args.thresholds.split(","): + parts = threshold_str.split(":") + timeframe = parts[0] + target = float(parts[1]) + + threshold = {"timeframe": timeframe, "target": target} + + if len(parts) > 2: + threshold["warning"] = float(parts[2]) + + if len(parts) > 3 and parts[3]: + threshold["target_display"] = parts[3] + + if len(parts) > 4 and parts[4]: + threshold["warning_display"] = parts[4] + + thresholds.append(threshold) + params["thresholds"] = thresholds + + if args.description: + params["description"] = args.description + + if args.type == "metric": + params["query"] = { + "numerator": args.numerator, + "denominator": args.denominator, + } + elif args.monitor_search: + params["monitor_search"] = args.monitor_search + else: + params["monitor_ids"] = args.monitor_ids + if args.groups and len(args.monitor_ids) == 1: + groups = args.groups.split("|") + params["groups"] = groups + + if args.tags: + params["tags"] = args.tags + + res = api.ServiceLevelObjective.create(return_raw=True, **params) + report_warnings(res) + report_errors(res) + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _file_create(cls, args): + api._timeout = args.timeout + format = args.format + slo = json.load(args.file) + res = api.ServiceLevelObjective.create(return_raw=True, **slo) + report_warnings(res) + report_errors(res) + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _update(cls, args): + api._timeout = args.timeout + format = args.format + + params = {"type": args.type} + + if args.thresholds: + thresholds = [] + for threshold_str in args.thresholds.split(","): + parts = threshold_str.split(":") + timeframe = parts[0] + target = parts[1] + + threshold = {"timeframe": timeframe, "target": target} + + if len(parts) > 2: + threshold["warning"] = float(parts[2]) + + if len(parts) > 3 and parts[3]: + threshold["target_display"] = parts[3] + + if len(parts) > 4 and parts[4]: + threshold["warning_display"] = parts[4] + + thresholds.append(threshold) + params["thresholds"] = thresholds + + if args.description: + params["description"] = args.description + + if args.type == "metric": + if args.numerator and args.denominator: + params["query"] = { + "numerator": args.numerator, + "denominator": args.denominator, + } + elif args.monitor_search: + params["monitor_search"] = args.monitor_search + else: + params["monitor_ids"] = args.monitor_ids + if args.groups and len(args.monitor_ids) == 1: + groups = args.groups.split("|") + params["groups"] = groups + + if args.tags: + tags = sorted(set([t.strip() for t in args.tags if t.strip()])) + params["tags"] = tags + res = api.ServiceLevelObjective.update(args.slo_id, return_raw=True, **params) + report_warnings(res) + report_errors(res) + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _file_update(cls, args): + api._timeout = args.timeout + format = args.format + slo = json.load(args.file) + + res = api.ServiceLevelObjective.update(slo["id"], return_raw=True, **slo) + report_warnings(res) + report_errors(res) + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _show(cls, args): + api._timeout = args.timeout + format = args.format + res = api.ServiceLevelObjective.get(args.slo_id, return_raw=True) + report_warnings(res) + report_errors(res) + + if args.string_ids: + res["id"] = str(res["id"]) + + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _show_all(cls, args): + api._timeout = args.timeout + format = args.format + + params = {"offset": args.offset, "limit": args.limit} + if args.query: + params["query"] = args.query + else: + params["ids"] = args.slo_ids + + res = api.ServiceLevelObjective.get_all(return_raw=True, **params) + report_warnings(res) + report_errors(res) + + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _delete(cls, args): + api._timeout = args.timeout + res = api.ServiceLevelObjective.delete(args.slo_id, return_raw=True) + if res is not None: + report_warnings(res) + report_errors(res) + + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _delete_many(cls, args): + api._timeout = args.timeout + res = api.ServiceLevelObjective.delete_many(args.slo_ids) + if res is not None: + report_warnings(res) + report_errors(res) + + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _delete_timeframe(cls, args): + api._timeout = args.timeout + + ops = {args.slo_id: args.timeframes} + + res = api.ServiceLevelObjective.bulk_delete(ops) + if res is not None: + report_warnings(res) + report_errors(res) + + if format == "pretty": + print(pretty_json(res)) + else: + print(json.dumps(res)) + + @classmethod + def _escape(cls, s): + return s.replace("\r", "\\r").replace("\n", "\\n").replace("\t", "\\t") diff --git a/datadog/util/cli.py b/datadog/util/cli.py new file mode 100644 index 000000000..b9e8ffa00 --- /dev/null +++ b/datadog/util/cli.py @@ -0,0 +1,52 @@ +from argparse import ArgumentTypeError +import json + + +def comma_list(list_str, item_func=None): + if not list_str: + raise ArgumentTypeError("Invalid comma list") + item_func = item_func or (lambda i: i) + return [item_func(i.strip()) for i in list_str.split(",") if i.strip()] + + +def comma_set(list_str, item_func=None): + return set(comma_list(list_str, item_func=item_func)) + + +def comma_list_or_empty(list_str): + if not list_str: + return [] + else: + return comma_list(list_str) + + +def list_of_ints(int_csv): + if not int_csv: + raise ArgumentTypeError("Invalid list of ints") + try: + # Try as a [1, 2, 3] list + j = json.loads(int_csv) + if isinstance(j, (list, set)): + j = [int(i) for i in j] + return j + except Exception: + pass + + try: + return [int(i.strip()) for i in int_csv.strip().split(",")] + except Exception: + raise ArgumentTypeError("Invalid list of ints: {0}".format(int_csv)) + + +def list_of_ints_and_strs(csv): + def int_or_str(item): + try: + return int(item) + except ValueError: + return item + + return comma_list(csv, int_or_str) + + +def set_of_ints(int_csv): + return set(list_of_ints(int_csv)) diff --git a/tests/integration/api/test_api.py b/tests/integration/api/test_api.py index 213e78c42..212842294 100644 --- a/tests/integration/api/test_api.py +++ b/tests/integration/api/test_api.py @@ -397,6 +397,28 @@ def test_monitor_crud(self): assert dog.Monitor.delete(monitor["id"]) == {"deleted_monitor_id": monitor["id"]} + def test_service_level_objective_crud(self): + numerator = "sum:my.custom.metric{type:good}.as_count()" + denominator = "sum:my.custom.metric{*}.as_count()" + query = {"numerator": numerator, "denominator": denominator} + thresholds = [{"timeframe": "7d", "target": 90}] + name = "test SLO {}".format(time.time()) + slo = dog.ServiceLevelObjective.create(type="metric", query=query, thresholds=thresholds, name=name, + tags=["type:test"])["data"][0] + assert slo["name"] == name + + numerator2 = "sum:my.custom.metric{type:good,!type:ignored}.as_count()" + denominator2 = "sum:my.custom.metric{!type:ignored}.as_count()" + query = {"numerator": numerator2, "denominator": denominator2} + slo = dog.ServiceLevelObjective.update(id=slo["id"], type="metric", query=query, thresholds=thresholds, + name=name, tags=["type:test"])["data"][0] + assert slo["name"] == name + slos = [s for s in dog.ServiceLevelObjective.get_all()["data"] if s["id"] == slo["id"]] + assert len(slos) == 1 + + assert dog.ServiceLevelObjective.get(slo["id"])["data"]["id"] == slo["id"] + dog.ServiceLevelObjective.delete(slo["id"]) + @pytest.mark.admin_needed def test_monitor_muting(self): query1 = "avg(last_1h):sum:system.net.bytes_rcvd{host:host0} > 100" diff --git a/tests/unit/api/helper.py b/tests/unit/api/helper.py index 1ffd95760..1af83ed94 100644 --- a/tests/unit/api/helper.py +++ b/tests/unit/api/helper.py @@ -1,4 +1,5 @@ # stdlib +from io import BytesIO import unittest import json @@ -22,7 +23,7 @@ DeletableAPISubResource, ActionAPIResource ) -from datadog.util.compat import iteritems +from datadog.util.compat import iteritems, is_p3k from tests.util.contextmanagers import EnvVars @@ -134,6 +135,19 @@ def setUp(self): def tearDown(self): RequestClient._session = None + def load_request_response(self, status_code=200, response_body='{}', raise_for_status=False): + """ + Load the repsonse body from the given payload + """ + mock_response = MockResponse(raise_for_status=raise_for_status) + if is_p3k(): + mock_response.raw = BytesIO(bytes(response_body, 'utf-8')) + else: + mock_response.raw = BytesIO(response_body) + mock_response.status_code = status_code + + self.request_mock.request = Mock(return_value=mock_response) + def arm_requests_to_raise(self): """ Arm the mocked request to raise for status. diff --git a/tests/unit/api/test_api.py b/tests/unit/api/test_api.py index 636dcad70..540a333dc 100644 --- a/tests/unit/api/test_api.py +++ b/tests/unit/api/test_api.py @@ -78,6 +78,19 @@ def test_get_hostname(self, mock_config_path): initialize() self.assertEqual(api._host_name, HOST_NAME, api._host_name) + def test_errors_suppressed(self): + """ + API `errors` field ApiError supppressed when specified + """ + # Test API, application keys, API host, and some HTTP client options + initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST) + + # Make a simple API call + self.load_request_response(response_body='{"data": {}, "errors": ["foo error"]}') + resp = MyCreatable.create(params={"suppress_response_errors_on_codes": [200]}) + self.assertNotIsInstance(resp, ApiError) + self.assertDictEqual({"data": {}, "errors": ["foo error"]}, resp) + def test_request_parameters(self): """ API parameters are set with `initialize` method.