|
1 | 1 | from datadog.threadstats import ThreadStats |
2 | | -from threading import Lock |
| 2 | +from threading import Lock, Thread |
3 | 3 | from datadog import api |
4 | 4 | import os |
5 | 5 |
|
@@ -34,6 +34,11 @@ def _enter(cls): |
34 | 34 | cls._was_initialized = True |
35 | 35 | api._api_key = os.environ.get('DATADOG_API_KEY') |
36 | 36 | api._api_host = os.environ.get('DATADOG_HOST', 'https://api.datadoghq.com') |
| 37 | + |
| 38 | + # Async initialization of the TLS connection with our endpoints |
| 39 | + # This avoids adding execution time at the end of the lambda run |
| 40 | + t = Thread(target=init_connection) |
| 41 | + t.start() |
37 | 42 | cls._counter = cls._counter + 1 |
38 | 43 |
|
39 | 44 | @classmethod |
@@ -70,3 +75,16 @@ def __call__(self, *args, **kw): |
70 | 75 | def lambda_metric(*args, **kw): |
71 | 76 | """ Alias to expose only distributions for lambda functions""" |
72 | 77 | _lambda_stats.distribution(*args, **kw) |
| 78 | + |
| 79 | + |
| 80 | +def init_connection(): |
| 81 | + """ No-op POST to initialize the requests connection with DD's endpoints |
| 82 | +
|
| 83 | + The goal here is to make the final flush faster: |
| 84 | + we keep alive the Requests session, this means that we can re-use the connection |
| 85 | + The consequence is that the HTTP Handshake, which can take hundreds of ms, |
| 86 | + is now made at the beginning of a lambda instead of at the end. |
| 87 | +
|
| 88 | + By making the initial request async, we spare a lot of execution time in the lambdas. |
| 89 | + """ |
| 90 | + api.api_client.APIClient.submit('GET', 'validate') |
0 commit comments