This repository was archived by the owner on Jun 6, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 550
Expand file tree
/
Copy pathindex.js
More file actions
139 lines (127 loc) · 4.52 KB
/
index.js
File metadata and controls
139 lines (127 loc) · 4.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
require('module-alias/register');
require('dotenv').config();
const AsyncLock = require('async-lock');
const _ = require('lodash');
const DatabaseModel = require('openpaidbsdk');
const { default: PQueue } = require('p-queue');
const interval = require('interval-promise');
require('@dbc/common/init');
const logger = require('@dbc/common/logger');
const { getEventInformer } = require('@dbc/common/k8s');
const { alwaysRetryDecorator } = require('@dbc/common/util');
const disk = require('diskusage');
const config = require('@dbc/watcher/cluster-event/config');
// Here, we use AsyncLock to control the concurrency of events with the same uid;
// e.g. If one event has ADDED, MODIFED, and MODIFED incidents, we use AsyncLock
// to ensure they will be delivered to write-merger in order.
// In the same time, we use PQueue to control the concurrency of events with different uid;
// e.g. If there are event 1 ~ event 30000, only some of them can be processed concurrently.
const lock = new AsyncLock({ maxPending: Number.MAX_SAFE_INTEGER });
const queue = new PQueue({ concurrency: config.maxRpcConcurrency });
const databaseModel = new DatabaseModel(
config.dbConnectionStr,
config.maxDatabaseConnection,
);
async function synchronizeEvent(eventType, apiObject) {
// query db instead
const uid = apiObject.metadata.uid;
const names = apiObject.involvedObject.name.split('-');
const obj = {
uid: uid,
frameworkName: names[0],
podUid: apiObject.involvedObject.uid,
taskroleName: names[1],
taskName: apiObject.involvedObject.name,
taskIndex: parseInt(names[2]),
type: apiObject.type,
reason: apiObject.reason,
message: apiObject.message,
firstTimestamp: apiObject.firstTimestamp,
lastTimestamp: apiObject.lastTimestamp,
count: apiObject.count,
sourceComponent: _.get(apiObject, 'source.component', null),
sourceHost: _.get(apiObject, 'source.host', null),
event: JSON.stringify(apiObject),
};
databaseModel.FrameworkEvent.upsert(obj, { where: { uid: uid } });
}
const eventHandler = (eventType, apiObject) => {
/*
framework name-based lock + always retry
*/
const receivedTs = new Date().getTime();
const involvedObjKind = apiObject.involvedObject.kind;
const involvedObjName = apiObject.involvedObject.name;
const uid = apiObject.metadata.uid;
if (
involvedObjKind === 'Pod' &&
/^[a-z0-9]{32}-[A-Za-z0-9._~]+-[0-9]+$/.test(involvedObjName)
) {
logger.info(
`Cluster event type=${eventType} receivedTs=${receivedTs} uid=${uid} involvedObjKind=${involvedObjKind} involvedObjName=${involvedObjName} received.`,
);
lock.acquire(uid, () => {
return queue.add(
alwaysRetryDecorator(
() => synchronizeEvent(eventType, apiObject),
`Sync to database type=${eventType} receivedTs=${receivedTs} uid=${uid} involvedObjKind=${involvedObjKind} involvedObjName=${involvedObjName}`,
),
);
});
} else {
logger.info(
`Cluster Event type=${eventType} receivedTs=${receivedTs} uid=${uid} involvedObjKind=${involvedObjKind} involvedObjName=${involvedObjName} received but ignored.`,
);
}
};
async function assertDiskUsageHealthy() {
try {
const { available, total } = await disk.check(config.diskPath);
const currentUsage = ((total - available) / total) * 100;
logger.info(`Current internal storage usage is ${currentUsage}% .`);
if (currentUsage > config.maxDiskUsagePercent) {
logger.error(
`Internal storage usage exceeds ${config.maxDiskUsagePercent}%, exit.`,
function() {
process.exit(1);
},
);
}
} catch (err) {
logger.error(`Check disk usage fails, details: ${err}`, function() {
process.exit(1);
});
}
}
function startInformer() {
const informer = getEventInformer();
informer.on('add', apiObject => {
eventHandler('ADDED', apiObject);
});
informer.on('update', apiObject => {
eventHandler('MODIFED', apiObject);
});
informer.on('delete', apiObject => {
eventHandler('DELETED', apiObject);
});
informer.on('error', err => {
// If any error happens, the process should exit, and let Kubernetes restart it.
logger.error(err, function() {
process.exit(1);
});
});
informer.start();
}
function startDiskCheck() {
interval(assertDiskUsageHealthy, config.diskCheckIntervalSecond * 1000, {
stopOnError: false,
});
}
async function main() {
await assertDiskUsageHealthy();
startInformer();
startDiskCheck();
}
main();