Skip to content

Commit d480aa6

Browse files
committed
Add support for OTEL_BSP_* environment variables
Fixes #1105
1 parent e7dc0e8 commit d480aa6

File tree

1 file changed

+32
-5
lines changed
  • opentelemetry-sdk/src/opentelemetry/sdk/trace/export

1 file changed

+32
-5
lines changed

opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020
import typing
2121
from enum import Enum
2222

23+
from opentelemetry.configuration import Configuration
2324
from opentelemetry.context import attach, detach, set_value
24-
from opentelemetry.sdk.trace import sampling
2525
from opentelemetry.util import time_ns
2626

2727
from .. import Span, SpanProcessor
@@ -113,10 +113,32 @@ class BatchExportSpanProcessor(SpanProcessor):
113113
def __init__(
114114
self,
115115
span_exporter: SpanExporter,
116-
max_queue_size: int = 2048,
117-
schedule_delay_millis: float = 5000,
118-
max_export_batch_size: int = 512,
116+
max_queue_size: int = None,
117+
schedule_delay_millis: float = None,
118+
max_export_batch_size: int = None,
119+
export_timeout_millis: float = None,
119120
):
121+
122+
if max_queue_size is None:
123+
max_queue_size = Configuration().get(
124+
"OTEL_BSP_MAX_QUEUE_SIZE", 2048
125+
)
126+
127+
if schedule_delay_millis is None:
128+
schedule_delay_millis = Configuration().get(
129+
"OTEL_BSP_SCHEDULE_DELAY_MILLIS", 5000
130+
)
131+
132+
if max_export_batch_size is None:
133+
max_export_batch_size = Configuration().get(
134+
"OTEL_BSP_MAX_EXPORT_BATCH_SIZE", 512
135+
)
136+
137+
if export_timeout_millis is None:
138+
export_timeout_millis = Configuration().get(
139+
"OTEL_BSP_EXPORT_TIMEOUT_MILLIS", 30000
140+
)
141+
120142
if max_queue_size <= 0:
121143
raise ValueError("max_queue_size must be a positive integer.")
122144

@@ -143,6 +165,7 @@ def __init__(
143165
self.schedule_delay_millis = schedule_delay_millis
144166
self.max_export_batch_size = max_export_batch_size
145167
self.max_queue_size = max_queue_size
168+
self.export_timeout_millis = export_timeout_millis
146169
self.done = False
147170
# flag that indicates that spans are being dropped
148171
self._spans_dropped = False
@@ -306,7 +329,11 @@ def _drain_queue(self):
306329
while self.queue:
307330
self._export_batch()
308331

309-
def force_flush(self, timeout_millis: int = 30000) -> bool:
332+
def force_flush(self, timeout_millis: int = None) -> bool:
333+
334+
if timeout_millis is None:
335+
timeout_millis = self.export_timeout_millis
336+
310337
if self.done:
311338
logger.warning("Already shutdown, ignoring call to force_flush().")
312339
return True

0 commit comments

Comments
 (0)