diff --git a/opencensus/metrics/export/metric.py b/opencensus/metrics/export/metric.py index beb805c54..c41e2dd4a 100644 --- a/opencensus/metrics/export/metric.py +++ b/opencensus/metrics/export/metric.py @@ -13,25 +13,6 @@ # limitations under the License. from opencensus.metrics.export import metric_descriptor -from opencensus.metrics.export import value - - -DESCRIPTOR_VALUE = { - metric_descriptor.MetricDescriptorType.GAUGE_INT64: - value.ValueLong, - metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64: - value.ValueLong, - metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE: - value.ValueDouble, - metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE: - value.ValueDouble, - metric_descriptor.MetricDescriptorType.GAUGE_DISTRIBUTION: - value.ValueDistribution, - metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION: - value.ValueDistribution, - metric_descriptor.MetricDescriptorType.SUMMARY: - value.ValueSummary, -} class Metric(object): @@ -71,9 +52,8 @@ def descriptor(self): def _check_type(self): """Check that point value types match the descriptor type.""" - check_type = DESCRIPTOR_VALUE.get(self.descriptor.type) - if check_type is None: - raise ValueError("Unknown metric descriptor type") + check_type = metric_descriptor.MetricDescriptorType.to_type_class( + self.descriptor.type) for ts in self.time_series: if not ts.check_points_type(check_type): raise ValueError("Invalid point value type") diff --git a/opencensus/metrics/export/metric_descriptor.py b/opencensus/metrics/export/metric_descriptor.py index c0c06bc27..99e3b6944 100644 --- a/opencensus/metrics/export/metric_descriptor.py +++ b/opencensus/metrics/export/metric_descriptor.py @@ -14,8 +14,7 @@ import six -from opencensus.metrics.export.value import ValueDistribution -from opencensus.metrics.export.value import ValueSummary +from opencensus.metrics.export import value class _MetricDescriptorTypeMeta(type): @@ -81,19 +80,20 @@ class MetricDescriptorType(object): # is not recommended, since it cannot be aggregated. SUMMARY = 7 + _type_map = { + GAUGE_INT64: value.ValueLong, + GAUGE_DOUBLE: value.ValueDouble, + GAUGE_DISTRIBUTION: value.ValueDistribution, + CUMULATIVE_INT64: value.ValueLong, + CUMULATIVE_DOUBLE: value.ValueDouble, + CUMULATIVE_DISTRIBUTION: value.ValueDistribution, + SUMMARY: value.ValueSummary + } + @classmethod def to_type_class(cls, metric_descriptor_type): - type_map = { - cls.GAUGE_INT64: int, - cls.GAUGE_DOUBLE: float, - cls.GAUGE_DISTRIBUTION: ValueDistribution, - cls.CUMULATIVE_INT64: int, - cls.CUMULATIVE_DOUBLE: float, - cls.CUMULATIVE_DISTRIBUTION: ValueDistribution, - cls.SUMMARY: ValueSummary - } try: - return type_map[metric_descriptor_type] + return cls._type_map[metric_descriptor_type] except KeyError: raise ValueError("Unknown MetricDescriptorType value") @@ -119,8 +119,8 @@ class MetricDescriptor(object): format described by http://unitsofmeasure.org/ucum.html. :type type_: int - :param unit: The unit in which the metric value is reported. The - MetricDescriptorType class enumerates valid options. + :param type_: The type of metric. MetricDescriptorType enumerates the valid + options. :type label_keys: list(:class: '~opencensus.metrics.label_key.LabelKey') :param label_keys: The label keys associated with the metric descriptor. diff --git a/opencensus/metrics/export/time_series.py b/opencensus/metrics/export/time_series.py index b4dc6fbee..6d1718d47 100644 --- a/opencensus/metrics/export/time_series.py +++ b/opencensus/metrics/export/time_series.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from opencensus.metrics.export import metric_descriptor - class TimeSeries(object): """Time series data for a given metric and time interval. @@ -60,15 +58,19 @@ def label_values(self): def points(self): return self._points - def check_points_type(self, type_): - """Check that each point's value is an instance `type_`. + def check_points_type(self, type_class): + """Check that each point's value is an instance of `type_class`. + + `type_class` should typically be a Value type, i.e. one that extends + :class: `opencensus.metrics.export.value.Value`. + + :type type_class: type + :param type_class: Type to check against. - :type type_: type - :param type_: Type to check against. + :rtype: bool + :return: Whether all points are instances of `type_class`. """ - type_class = ( - metric_descriptor.MetricDescriptorType.to_type_class(type_)) for point in self.points: - if not isinstance(point.value.value, type_class): + if not isinstance(point.value, type_class): return False return True diff --git a/opencensus/metrics/export/value.py b/opencensus/metrics/export/value.py index 065d30180..82c3fba53 100644 --- a/opencensus/metrics/export/value.py +++ b/opencensus/metrics/export/value.py @@ -149,7 +149,7 @@ class Bucket(object): distribution does not have a histogram. """ - def __init__(self, count, exemplar): + def __init__(self, count, exemplar=None): self._count = count self._exemplar = exemplar diff --git a/opencensus/stats/aggregation_data.py b/opencensus/stats/aggregation_data.py index 338b04bdb..c0d2cf7a2 100644 --- a/opencensus/stats/aggregation_data.py +++ b/opencensus/stats/aggregation_data.py @@ -12,8 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import logging +from opencensus.metrics.export import point +from opencensus.metrics.export import value from opencensus.stats import bucket_boundaries @@ -36,6 +39,18 @@ def aggregation_data(self): """The current aggregation data""" return self._aggregation_data + def to_point(self, timestamp): + """Get a Point conversion of this aggregation. + + :type timestamp: :class: `datetime.datetime` + :param timestamp: The time to report the point as having been recorded. + + :rtype: :class: `opencensus.metrics.export.point.Point` + :return: a Point with with this aggregation's value and appropriate + value type. + """ + raise NotImplementedError # pragma: NO COVER + class SumAggregationDataFloat(BaseAggregationData): """Sum Aggregation Data is the aggregated data for the Sum aggregation @@ -60,6 +75,18 @@ def sum_data(self): """The current sum data""" return self._sum_data + def to_point(self, timestamp): + """Get a Point conversion of this aggregation. + + :type timestamp: :class: `datetime.datetime` + :param timestamp: The time to report the point as having been recorded. + + :rtype: :class: `opencensus.metrics.export.point.Point` + :return: a :class: `opencensus.metrics.export.value.ValueDouble`-valued + Point with value equal to `sum_data`. + """ + return point.Point(value.ValueDouble(self.sum_data), timestamp) + class CountAggregationData(BaseAggregationData): """Count Aggregation Data is the count value of aggregated data @@ -83,6 +110,18 @@ def count_data(self): """The current count data""" return self._count_data + def to_point(self, timestamp): + """Get a Point conversion of this aggregation. + + :type timestamp: :class: `datetime.datetime` + :param timestamp: The time to report the point as having been recorded. + + :rtype: :class: `opencensus.metrics.export.point.Point` + :return: a :class: `opencensus.metrics.export.value.ValueLong`-valued + Point with value equal to `count_data`. + """ + return point.Point(value.ValueLong(self.count_data), timestamp) + class DistributionAggregationData(BaseAggregationData): """Distribution Aggregation Data refers to the distribution stats of @@ -123,34 +162,37 @@ def __init__(self, counts_per_bucket=None, bounds=None, exemplars=None): + if bounds is None and exemplars is not None: + raise ValueError + if exemplars is not None and len(exemplars) != len(bounds) + 1: + raise ValueError + super(DistributionAggregationData, self).__init__(mean_data) self._mean_data = mean_data self._count_data = count_data self._min = min_ self._max = max_ self._sum_of_sqd_deviations = sum_of_sqd_deviations + if bounds is None: bounds = [] + self._exemplars = None else: assert bounds == list(sorted(set(bounds))) assert all(bb > 0 for bb in bounds) + if exemplars is None: + self._exemplars = {ii: None for ii in range(len(bounds) + 1)} + else: + self._exemplars = {ii: ex for ii, ex in enumerate(exemplars)} + self._bounds = (bucket_boundaries.BucketBoundaries(boundaries=bounds) + .boundaries) if counts_per_bucket is None: counts_per_bucket = [0 for ii in range(len(bounds) + 1)] else: assert all(cc >= 0 for cc in counts_per_bucket) assert len(counts_per_bucket) == len(bounds) + 1 - self._counts_per_bucket = counts_per_bucket - self._bounds = bucket_boundaries.BucketBoundaries( - boundaries=bounds).boundaries - bucket = 0 - for _ in self.bounds: - bucket = bucket + 1 - - # If there is no histogram, do not record an exemplar - self._exemplars = \ - {bucket: exemplars} if len(self._bounds) > 0 else None @property def mean_data(self): @@ -240,6 +282,43 @@ def increment_bucket_count(self, value): self._counts_per_bucket[last_bucket_index] += 1 return last_bucket_index + def to_point(self, timestamp): + """Get a Point conversion of this aggregation. + + This method creates a :class: `opencensus.metrics.export.point.Point` + with a :class: `opencensus.metrics.export.value.ValueDistribution` + value, and creates buckets and exemplars for that distribution from the + appropriate classes in the `metrics` package. + + :type timestamp: :class: `datetime.datetime` + :param timestamp: The time to report the point as having been recorded. + + :rtype: :class: `opencensus.metrics.export.point.Point` + :return: a :class: `opencensus.metrics.export.value.ValueDistribution` + -valued Point. + """ + buckets = [None] * len(self.counts_per_bucket) + for ii, count in enumerate(self.counts_per_bucket): + stat_ex = self.exemplars.get(ii, None) + if stat_ex is not None: + metric_ex = value.Exemplar(stat_ex.value, stat_ex.timestamp, + copy.copy(stat_ex.attachments)) + buckets[ii] = value.Bucket(count, metric_ex) + else: + buckets[ii] = value.Bucket(count) + + bucket_options = value.BucketOptions(value.Explicit(self.bounds)) + return point.Point( + value.ValueDistribution( + count=self.count_data, + sum_=self.sum, + sum_of_squared_deviation=self.sum_of_sqd_deviations, + bucket_options=bucket_options, + buckets=buckets + ), + timestamp + ) + class LastValueAggregationData(BaseAggregationData): """ @@ -265,6 +344,18 @@ def value(self): """The current value recorded""" return self._value + def to_point(self, timestamp): + """Get a Point conversion of this aggregation. + + :type timestamp: :class: `datetime.datetime` + :param timestamp: The time to report the point as having been recorded. + + :rtype: :class: `opencensus.metrics.export.point.Point` + :return: a :class: `opencensus.metrics.export.value.ValueDouble`-valued + Point. + """ + return point.Point(value.ValueDouble(self.value), timestamp) + class Exemplar(object): """ Exemplar represents an example point that may be used to annotate diff --git a/tests/unit/metrics/export/test_metric_descriptor.py b/tests/unit/metrics/export/test_metric_descriptor.py index 2e9b90950..30eef9e2b 100644 --- a/tests/unit/metrics/export/test_metric_descriptor.py +++ b/tests/unit/metrics/export/test_metric_descriptor.py @@ -16,48 +16,53 @@ import unittest -from opencensus.metrics.export.metric_descriptor import MetricDescriptor -from opencensus.metrics.export.metric_descriptor import MetricDescriptorType -from opencensus.metrics.label_key import LabelKey +from opencensus.metrics import label_key +from opencensus.metrics.export import metric_descriptor +from opencensus.metrics.export import value NAME = 'metric' DESCRIPTION = 'Metric description' UNIT = '0.738.[ft_i].[lbf_av]/s' -LABEL_KEY1 = LabelKey('key1', 'key description one') -LABEL_KEY2 = LabelKey('值', '测试用键') +LABEL_KEY1 = label_key.LabelKey('key1', 'key description one') +LABEL_KEY2 = label_key.LabelKey('值', '测试用键') LABEL_KEYS = (LABEL_KEY1, LABEL_KEY2) class TestMetricDescriptor(unittest.TestCase): def test_init(self): - metric_descriptor = MetricDescriptor(NAME, DESCRIPTION, UNIT, - MetricDescriptorType.GAUGE_DOUBLE, - (LABEL_KEY1, LABEL_KEY2)) + md = metric_descriptor.MetricDescriptor( + NAME, DESCRIPTION, UNIT, + metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, + (LABEL_KEY1, LABEL_KEY2)) - self.assertEqual(metric_descriptor.name, NAME) - self.assertEqual(metric_descriptor.description, DESCRIPTION) - self.assertEqual(metric_descriptor.unit, UNIT) - self.assertEqual(metric_descriptor.type, - MetricDescriptorType.GAUGE_DOUBLE) - self.assertEqual(metric_descriptor.label_keys, LABEL_KEYS) + self.assertEqual(md.name, NAME) + self.assertEqual(md.description, DESCRIPTION) + self.assertEqual(md.unit, UNIT) + self.assertEqual(md.type, + metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE) + self.assertEqual(md.label_keys, LABEL_KEYS) def test_bogus_type(self): with self.assertRaises(ValueError): - MetricDescriptor(NAME, DESCRIPTION, UNIT, 0, (LABEL_KEY1, )) + metric_descriptor.MetricDescriptor(NAME, DESCRIPTION, UNIT, 0, + (LABEL_KEY1, )) def test_null_label_keys(self): with self.assertRaises(ValueError): - MetricDescriptor(NAME, DESCRIPTION, UNIT, - MetricDescriptorType.GAUGE_DOUBLE, None) + metric_descriptor.MetricDescriptor( + NAME, DESCRIPTION, UNIT, + metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, None) def test_null_label_key_values(self): with self.assertRaises(ValueError): - MetricDescriptor(NAME, DESCRIPTION, UNIT, - MetricDescriptorType.GAUGE_DOUBLE, (None, )) + metric_descriptor.MetricDescriptor( + NAME, DESCRIPTION, UNIT, + metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, (None, )) def test_to_type_class(self): self.assertEqual( - MetricDescriptorType.to_type_class( - MetricDescriptorType.GAUGE_INT64), int) + metric_descriptor.MetricDescriptorType.to_type_class( + metric_descriptor.MetricDescriptorType.GAUGE_INT64), + value.ValueLong) with self.assertRaises(ValueError): - MetricDescriptorType.to_type_class(10) + metric_descriptor.MetricDescriptorType.to_type_class(10) diff --git a/tests/unit/metrics/export/test_time_series.py b/tests/unit/metrics/export/test_time_series.py index 76b475d35..0c54debad 100644 --- a/tests/unit/metrics/export/test_time_series.py +++ b/tests/unit/metrics/export/test_time_series.py @@ -17,7 +17,6 @@ import unittest from opencensus.metrics import label_value -from opencensus.metrics.export import metric_descriptor from opencensus.metrics.export import point from opencensus.metrics.export import time_series from opencensus.metrics.export import value @@ -59,18 +58,12 @@ def test_init_invalid(self): def test_check_points_type(self): ts = time_series.TimeSeries(LABEL_VALUES, POINTS, START_TIMESTAMP) - self.assertTrue( - ts.check_points_type( - metric_descriptor.MetricDescriptorType.GAUGE_INT64)) + self.assertTrue(ts.check_points_type(value.ValueLong)) bad_points = POINTS + (point.Point( value.Value.double_value(6.0), "2018-10-10T04:33:44.012345Z"), ) bad_time_series = time_series.TimeSeries(LABEL_VALUES, bad_points, START_TIMESTAMP) - self.assertFalse( - bad_time_series.check_points_type( - metric_descriptor.MetricDescriptorType.GAUGE_INT64)) - self.assertFalse( - bad_time_series.check_points_type( - metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE)) + self.assertFalse(bad_time_series.check_points_type(value.ValueLong)) + self.assertFalse(bad_time_series.check_points_type(value.ValueLong)) diff --git a/tests/unit/stats/test_aggregation_data.py b/tests/unit/stats/test_aggregation_data.py index 4c86e5240..477a62d06 100644 --- a/tests/unit/stats/test_aggregation_data.py +++ b/tests/unit/stats/test_aggregation_data.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from datetime import datetime import time import unittest import mock +from opencensus.metrics.export import point +from opencensus.metrics.export import value from opencensus.stats import aggregation_data as aggregation_data_module @@ -46,6 +49,16 @@ def test_add_sample(self): self.assertEqual(4, sum_aggregation_data.sum_data) + def test_to_point(self): + sum_data = 12.345 + timestamp = datetime(1970, 1, 1) + agg = aggregation_data_module.SumAggregationDataFloat(sum_data) + converted_point = agg.to_point(timestamp) + self.assertTrue(isinstance(converted_point, point.Point)) + self.assertTrue(isinstance(converted_point.value, value.ValueDouble)) + self.assertEqual(converted_point.value.value, sum_data) + self.assertEqual(converted_point.timestamp, timestamp) + class TestCountAggregationData(unittest.TestCase): def test_constructor(self): @@ -63,6 +76,16 @@ def test_add_sample(self): self.assertEqual(1, count_aggregation_data.count_data) + def test_to_point(self): + count_data = 123 + timestamp = datetime(1970, 1, 1) + agg = aggregation_data_module.CountAggregationData(count_data) + converted_point = agg.to_point(timestamp) + self.assertTrue(isinstance(converted_point, point.Point)) + self.assertTrue(isinstance(converted_point.value, value.ValueLong)) + self.assertEqual(converted_point.value.value, count_data) + self.assertEqual(converted_point.timestamp, timestamp) + class TestLastValueAggregationData(unittest.TestCase): def test_constructor(self): @@ -80,6 +103,25 @@ def test_overwrite_sample(self): last_value_aggregation_data.add_sample(1, None, None) self.assertEqual(1, last_value_aggregation_data.value) + def test_to_point(self): + val = 1.2 + timestamp = datetime(1970, 1, 1) + agg = aggregation_data_module.LastValueAggregationData(val) + converted_point = agg.to_point(timestamp) + self.assertTrue(isinstance(converted_point, point.Point)) + self.assertTrue(isinstance(converted_point.value, value.ValueDouble)) + self.assertEqual(converted_point.value.value, val) + self.assertEqual(converted_point.timestamp, timestamp) + + +def exemplars_equal(stats_ex, metrics_ex): + """Compare a stats exemplar to a metrics exemplar.""" + assert isinstance(stats_ex, aggregation_data_module.Exemplar) + assert isinstance(metrics_ex, value.Exemplar) + return (stats_ex.value == metrics_ex.value and + stats_ex.timestamp == metrics_ex.timestamp and + stats_ex.attachments == metrics_ex.attachments) + class TestDistributionAggregationData(unittest.TestCase): def test_constructor(self): @@ -179,21 +221,46 @@ def test_init_bad_bounds(self): counts_per_bucket=[0, 0, 0, 0], bounds=[-1, 1, 2]) + def test_init_bad_exemplars(self): + # Check that we don't allow exemplars without bounds + with self.assertRaises(ValueError): + aggregation_data_module.DistributionAggregationData( + mean_data=mock.Mock(), + count_data=mock.Mock(), + min_=mock.Mock(), + max_=mock.Mock(), + sum_of_sqd_deviations=mock.Mock(), + counts_per_bucket=mock.Mock(), + bounds=None, + exemplars=[mock.Mock()]) + + # Check that the exemplar count matches the bucket count + with self.assertRaises(ValueError): + aggregation_data_module.DistributionAggregationData( + mean_data=mock.Mock(), + count_data=mock.Mock(), + min_=mock.Mock(), + max_=mock.Mock(), + sum_of_sqd_deviations=mock.Mock(), + counts_per_bucket=mock.Mock(), + bounds=[0, 1], + exemplars=[mock.Mock(), mock.Mock()]) + def test_constructor_with_exemplar(self): timestamp = time.time() attachments = {"One": "one", "Two": "two"} - exemplar_1 = aggregation_data_module.Exemplar(4, timestamp, - attachments) - exemplar_2 = aggregation_data_module.Exemplar(5, timestamp, - attachments) - mean_data = 1 - count_data = 0 - _min = 0 - _max = 1 + exemplars = [ + aggregation_data_module.Exemplar(.07, timestamp, attachments), + aggregation_data_module.Exemplar(.7, timestamp, attachments), + aggregation_data_module.Exemplar(7, timestamp, attachments) + ] + mean_data = 2.59 + count_data = 3 + _min = .07 + _max = 7 sum_of_sqd_deviations = mock.Mock() counts_per_bucket = [1, 1, 1] bounds = [1.0 / 2.0, 1] - exemplars = [exemplar_1, exemplar_2] dist_agg_data = aggregation_data_module.DistributionAggregationData( mean_data=mean_data, @@ -205,18 +272,17 @@ def test_constructor_with_exemplar(self): counts_per_bucket=counts_per_bucket, bounds=bounds) - self.assertEqual(1, dist_agg_data.mean_data) - self.assertEqual(0, dist_agg_data.count_data) - self.assertEqual(0, dist_agg_data.min) - self.assertEqual(1, dist_agg_data.max) - self.assertEqual(sum_of_sqd_deviations, - dist_agg_data.sum_of_sqd_deviations) - self.assertEqual([1, 1, 1], dist_agg_data.counts_per_bucket) - self.assertEqual([exemplar_1, exemplar_2], dist_agg_data.exemplars[2]) - self.assertEqual([1.0 / 2.0, 1], dist_agg_data.bounds) - - self.assertIsNotNone(dist_agg_data.sum) - self.assertEqual(0, dist_agg_data.variance) + self.assertEqual(dist_agg_data.mean_data, mean_data) + self.assertEqual(dist_agg_data.count_data, count_data) + self.assertEqual(dist_agg_data.min, _min) + self.assertEqual(dist_agg_data.max, _max) + self.assertEqual(dist_agg_data.sum_of_sqd_deviations, + sum_of_sqd_deviations) + self.assertEqual(dist_agg_data.counts_per_bucket, counts_per_bucket) + self.assertEqual(dist_agg_data.bounds, bounds) + self.assertEqual(dist_agg_data.sum, mean_data * count_data) + for ii, ex in enumerate(exemplars): + self.assertEqual(dist_agg_data.exemplars[ii], ex) def test_exemplar(self): timestamp = time.time() @@ -369,16 +435,19 @@ def test_add_sample_attachment(self): sum_of_sqd_deviations=sum_of_sqd_deviations, counts_per_bucket=counts_per_bucket, bounds=bounds, - exemplars=exemplar_1) + exemplars=[None, None, None, exemplar_1]) - self.assertEqual({3: exemplar_1}, dist_agg_data.exemplars) + self.assertEqual(dist_agg_data.exemplars[3], exemplar_1) dist_agg_data.add_sample(value, timestamp, attachments) self.assertEqual(0, dist_agg_data.min) self.assertEqual(3, dist_agg_data.max) self.assertEqual(2, dist_agg_data.count_data) self.assertEqual(2.0, dist_agg_data.mean_data) - self.assertEqual(3, dist_agg_data.exemplars[3].value) + # Check that adding a sample overwrites the bucket's exemplar + self.assertNotEqual(dist_agg_data.exemplars[3], exemplar_1) + self.assertEqual(dist_agg_data.exemplars[3].value, 3) + self.assertEqual(dist_agg_data.exemplars[3].attachments, attachments) count_data = 4 dist_agg_data = aggregation_data_module.DistributionAggregationData( @@ -448,3 +517,41 @@ def test_increment_bucket_count(self): dist_agg_data.increment_bucket_count(value=value) self.assertEqual([1, 2, 2], dist_agg_data.counts_per_bucket) + + def test_to_point(self): + timestamp = datetime(1970, 1, 1) + ex_9 = aggregation_data_module.Exemplar( + 9, timestamp, {'trace_id': 'dead', 'span_id': 'beef'} + ) + ex_99 = aggregation_data_module.Exemplar( + 99, timestamp, {'trace_id': 'dead', 'span_id': 'bef0'} + ) + dist_agg_data = aggregation_data_module.DistributionAggregationData( + mean_data=50, + count_data=99, + min_=1, + max_=99, + sum_of_sqd_deviations=80850.0, + counts_per_bucket=[0, 9, 90, 0], + bounds=[1, 10, 100], + exemplars=[None, ex_9, ex_99, None], + ) + converted_point = dist_agg_data.to_point(timestamp) + self.assertTrue(isinstance(converted_point.value, + value.ValueDistribution)) + self.assertEqual(converted_point.value.count, 99) + self.assertEqual(converted_point.value.sum, 4950) + self.assertEqual(converted_point.value.sum_of_squared_deviation, + 80850.0) + self.assertEqual([bb.count for bb in converted_point.value.buckets], + [0, 9, 90, 0]) + self.assertEqual(converted_point.value.bucket_options.type_.bounds, + [1, 10, 100]) + self.assertTrue( + exemplars_equal( + ex_9, + converted_point.value.buckets[1].exemplar)) + self.assertTrue( + exemplars_equal( + ex_99, + converted_point.value.buckets[2].exemplar))