-
Notifications
You must be signed in to change notification settings - Fork 537
Expand file tree
/
Copy pathevents.go
More file actions
281 lines (240 loc) · 8.89 KB
/
events.go
File metadata and controls
281 lines (240 loc) · 8.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
package alerts
import (
"context"
"fmt"
"log/slog"
"time"
"github.com/blang/semver/v4"
"github.com/go-kit/log"
alertmgr_cfg "github.com/grafana/alloy/internal/mimir/alertmanager"
"github.com/grafana/dskit/instrument"
"github.com/prometheus-operator/prometheus-operator/pkg/alertmanager"
validation_v1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/alertmanager/validation/v1alpha1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
"github.com/prometheus-operator/prometheus-operator/pkg/assets"
promListers_v1alpha "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/apimachinery/pkg/labels"
go_k8s "k8s.io/client-go/kubernetes"
coreListers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/util/workqueue"
"github.com/grafana/alloy/internal/component/common/kubernetes"
"github.com/grafana/alloy/internal/component/mimir/util"
"github.com/grafana/alloy/internal/mimir/client"
"github.com/grafana/alloy/internal/runtime/logging"
"github.com/grafana/alloy/internal/runtime/logging/level"
)
type eventProcessor struct {
queue workqueue.TypedRateLimitingInterface[kubernetes.Event]
stopChan chan struct{}
health util.HealthReporter
mimirClient client.AlertmanagerInterface
namespaceLister coreListers.NamespaceLister
cfgLister promListers_v1alpha.AlertmanagerConfigLister
namespaceSelector labels.Selector
cfgSelector labels.Selector
kclient go_k8s.Interface
baseCfg alertmgr_cfg.Config
templateFiles map[string]string
metrics *metrics
logger log.Logger
}
type metrics struct {
configUpdatesTotal prometheus.Counter
eventsTotal *prometheus.CounterVec
eventsFailed *prometheus.CounterVec
eventsRetried *prometheus.CounterVec
mimirClientTiming *prometheus.HistogramVec
}
// TODO: Write unit tests which check metrics
func newMetrics() *metrics {
return &metrics{
configUpdatesTotal: prometheus.NewCounter(prometheus.CounterOpts{
Subsystem: "mimir_alerts",
Name: "config_updates_total",
Help: "Total number of times the configuration has been updated.",
}),
eventsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: "mimir_alerts",
Name: "events_total",
Help: "Total number of events processed, partitioned by event type.",
}, []string{"type"}),
eventsFailed: prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: "mimir_alerts",
Name: "events_failed_total",
Help: "Total number of events that failed to be processed, even after retries, partitioned by event type.",
}, []string{"type"}),
eventsRetried: prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: "mimir_alerts",
Name: "events_retried_total",
Help: "Total number of retries across all events, partitioned by event type.",
}, []string{"type"}),
mimirClientTiming: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: "mimir_alerts",
Name: "mimir_client_request_duration_seconds",
Help: "Duration of requests to the Mimir API.",
Buckets: instrument.DefBuckets,
}, instrument.HistogramCollectorBuckets),
}
}
func (m *metrics) register(r prometheus.Registerer) error {
for _, c := range []prometheus.Collector{
m.configUpdatesTotal,
m.eventsTotal,
m.eventsFailed,
m.eventsRetried,
m.mimirClientTiming,
} {
if err := r.Register(c); err != nil {
return err
}
}
return nil
}
// run processes events added to the queue until the queue is shutdown.
func (e *eventProcessor) run(ctx context.Context) {
for {
evt, shutdown := e.queue.Get()
if shutdown {
level.Info(e.logger).Log("msg", "shutting down event loop")
return
}
e.metrics.eventsTotal.WithLabelValues(string(evt.Typ)).Inc()
err := e.processEvent(ctx, evt)
if err != nil {
retries := e.queue.NumRequeues(evt)
if retries < 5 && client.IsRecoverable(err) {
e.metrics.eventsRetried.WithLabelValues(string(evt.Typ)).Inc()
e.queue.AddRateLimited(evt)
level.Error(e.logger).Log(
"msg", "failed to process event, will retry",
"retries", fmt.Sprintf("%d/5", retries),
"err", err,
)
continue
} else {
e.metrics.eventsFailed.WithLabelValues(string(evt.Typ)).Inc()
level.Error(e.logger).Log(
"msg", "failed to process event, unrecoverable error or max retries exceeded",
"retries", fmt.Sprintf("%d/5", retries),
"err", err,
)
e.health.ReportUnhealthy(err)
}
} else {
e.health.ReportHealthy()
}
e.queue.Forget(evt)
}
}
// stop stops adding new Kubernetes events to the queue and blocks until all existing
// events have been processed by the run loop.
func (e *eventProcessor) stop() {
// TODO: This stops the informers, but the informers are not created by eventProcessor.
// Create and stop the components in the same struct? To make it more clear what owns them.
close(e.stopChan)
// Because this method blocks until the queue is empty, it's important that we don't
// stop the run loop and let it continue to process existing items in the queue.
e.queue.ShutDownWithDrain()
}
func (e *eventProcessor) processEvent(ctx context.Context, event kubernetes.Event) error {
defer e.queue.Done(event)
return e.reconcileState(ctx)
}
func (e *eventProcessor) enqueueSyncMimir() {
e.queue.Add(kubernetes.Event{
Typ: util.EventTypeSyncMimir,
})
}
func (c *eventProcessor) provisionAlertmanagerConfiguration(ctx context.Context,
amConfigs map[string]*promv1alpha1.AlertmanagerConfig, store *assets.StoreBuilder) (*alertmgr_cfg.Config, error) {
var (
// TODO: Make this configurable?
version, _ = semver.New("0.29.0")
// TODO: Add an option to get an Alertmanager CRD through k8s informers.
cfgBuilder = alertmanager.NewConfigBuilder(slog.New(logging.NewSlogGoKitHandler(c.logger)), *version, store, &monitoringv1.Alertmanager{})
)
convertedCfg, err := c.baseCfg.String()
if err != nil {
return nil, err
}
err = cfgBuilder.InitializeFromRawConfiguration([]byte(convertedCfg))
if err != nil {
return nil, fmt.Errorf("failed to initialize from global AlertmangerConfig: %w", err)
}
if err := cfgBuilder.AddAlertmanagerConfigs(ctx, amConfigs); err != nil {
return nil, fmt.Errorf("failed to generate Alertmanager configuration: %w", err)
}
generatedConfig, err := cfgBuilder.MarshalJSON()
if err != nil {
return nil, fmt.Errorf("failed to marshal configuration: %w", err)
}
res, err := alertmgr_cfg.Unmarshal(generatedConfig)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal generated final configuration: %w", err)
}
return res, nil
}
func (e *eventProcessor) reconcileState(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
cfg, err := e.desiredStateFromKubernetes(ctx)
if err != nil {
return err
}
// TODO: Get Mimir's current Alertmanager config and diff it with the one Alloy has.
// If it's the same, do nothing. If it's different, update Mimir.
err = e.mimirClient.CreateAlertmanagerConfigs(ctx, cfg, e.templateFiles)
if err != nil {
return err
}
return nil
}
// Load AlertmanagerConfig resources from Kubernetes and
// merge them into one together with the global Alertmanager configuration.
func (e *eventProcessor) desiredStateFromKubernetes(ctx context.Context) (*alertmgr_cfg.Config, error) {
cfgs, err := e.getKubernetesState()
if err != nil {
return nil, err
}
amConfigs := make(map[string]*promv1alpha1.AlertmanagerConfig)
for namespace, configs := range cfgs {
for _, config := range configs {
// Validate the AlertmanagerConfig CRDs
err := validation_v1alpha1.ValidateAlertmanagerConfig(config)
if err != nil {
level.Error(e.logger).Log(
"msg", "got an invalid AlertmanagerConfig CRD from Kubernetes",
"namespace", namespace,
"name", config.Name,
"err", err,
)
continue
}
id := namespace + `/` + config.Name
amConfigs[id] = config
}
}
cfg, err := e.provisionAlertmanagerConfiguration(ctx, amConfigs, nil)
if err != nil {
return nil, fmt.Errorf("failed to provision Alertmanager configuration: %w", err)
}
return cfg, nil
}
// Returns AlertmanagerConfig resources indexed by Kubernetes namespace.
func (e *eventProcessor) getKubernetesState() (map[string][]*promv1alpha1.AlertmanagerConfig, error) {
namespaces, err := e.namespaceLister.List(e.namespaceSelector)
if err != nil {
return nil, fmt.Errorf("failed to list namespaces: %w", err)
}
out := make(map[string][]*promv1alpha1.AlertmanagerConfig)
for _, namespace := range namespaces {
alertmanagerConfigs, err := e.cfgLister.AlertmanagerConfigs(namespace.Name).List(e.cfgSelector)
if err != nil {
return nil, fmt.Errorf("failed to list AlertmanagerConfig CRDs: %w", err)
}
out[namespace.Name] = append(out[namespace.Name], alertmanagerConfigs...)
}
return out, nil
}