Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ repos:
name: Terraform · Linter
always_run: true
args: [--hook-config=--tf-path=tofu]
exclude: (modules/integrations/splunk_cloud_data_manager|modules/infra/forge_subscription|modules/integrations/splunk_secrets/)
exclude: (modules/integrations/splunk_cloud_data_manager|modules/infra/forge_subscription|modules/integrations/splunk_secrets/|modules/integrations/splunk_cloud_s3_runner_logs/)
- id: terraform_validate
name: Terraform · Validate
always_run: true
Expand All @@ -250,7 +250,7 @@ repos:
- --tf-init-args=--upgrade=true
- --hook-config=--tf-path=tofu
- --hook-config=--parallelism-limit=1
exclude: (modules/integrations/splunk_cloud_data_manager)
exclude: (modules/integrations/splunk_cloud_data_manager|modules/integrations/splunk_cloud_s3_runner_logs)

# ---------------------
# Security Hooks
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,18 @@ variable "webhook_relay_destination_config" {
})
}

variable "log_retention_days" {
variable "logging_retention_in_days" {
description = "Number of days to retain logs."
type = number
default = 3
}

variable "log_level" {
type = string
description = "Log level for application logging (e.g., INFO, DEBUG, WARN, ERROR)"
default = "INFO"
}

variable "enable_webex_webhook_relay" {
type = bool
description = "Enable Webex webhook relay."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ module "webex_webhook_relay" {
aws = aws
}

log_retention_days = var.log_retention_days
aws_region = var.aws_region
tags = var.tags
default_tags = var.default_tags
logging_retention_in_days = var.logging_retention_in_days
log_level = var.log_level
aws_region = var.aws_region
tags = var.tags
default_tags = var.default_tags
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
resource "aws_cloudwatch_log_group" "webex" {
name = "/aws/lambda/webex-webhook-relay-destination-receiver"
retention_in_days = var.log_retention_days
retention_in_days = var.logging_retention_in_days
tags_all = local.all_security_tags
tags = local.all_security_tags
}
Expand All @@ -26,6 +26,7 @@ module "webex" {

environment_variables = {
WEBEX_BOT_TOKEN_SECRET_NAME = "/cicd/common/webex_webhook_relay_bot_token"
LOG_LEVEL = var.log_level
}

attach_policy_json = true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@

import boto3

log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
LOG = logging.getLogger()
level_str = os.environ.get('LOG_LEVEL', 'INFO').upper()
LOG.setLevel(getattr(logging, level_str, logging.INFO))

DEFAULT_FAILURES = {'cancelled', 'failure',
'timed_out', 'timeout', 'startup_failure'}
Expand Down Expand Up @@ -162,7 +163,7 @@ def load_webex_secret() -> tuple[str, str]:
if not token.lower().startswith('bearer '):
token = f"Bearer {token}"

log.info('Successfully loaded Webex secret.')
LOG.info('Successfully loaded Webex secret.')
return token, room


Expand All @@ -173,7 +174,7 @@ def send_webex_card(card: Dict[str, Any]) -> None:
'text': 'GitHub Actions workflow alert', # required by Webex
'attachments': card.get('attachments', [])
}
log.info('Prepared payload for Webex message with %d attachments',
LOG.info('Prepared payload for Webex message with %d attachments',
len(payload.get('attachments', [])))

req = urllib.request.Request(
Expand All @@ -188,7 +189,7 @@ def send_webex_card(card: Dict[str, Any]) -> None:
if not (200 <= resp.status < 300):
body = resp.read().decode()
raise RuntimeError(f"Webex send failed: {resp.status} {body}")
log.info('Webex Adaptive Card sent successfully')
LOG.info('Webex Adaptive Card sent successfully')
except urllib.error.HTTPError as e:
error_body = e.read().decode()
raise RuntimeError(
Expand All @@ -202,24 +203,24 @@ def lambda_handler(event, _context):
detail = event.get('detail', {})
run = detail.get('workflow_run')
if not isinstance(run, dict):
log.info('lambda_skip reason=no_workflow_run')
LOG.info('lambda_skip reason=no_workflow_run')
return {'statusCode': 200, 'body': 'No workflow_run'}

branch = run.get('head_branch')
conclusion = (run.get('conclusion') or '').lower()
repo = (detail.get('repository') or {}).get('full_name') or (
run.get('head_repository') or {}).get('full_name') or 'unknown repo'
job_url = run.get('html_url')
log.info('run_info repo=%s branch=%s conclusion=%s job_url=%s',
LOG.info('run_info repo=%s branch=%s conclusion=%s job_url=%s',
repo, branch, conclusion, job_url)

if branch != 'main':
log.info('lambda_skip reason=branch_not_main repo=%s branch=%s conclusion=%s job_url=%s',
LOG.info('lambda_skip reason=branch_not_main repo=%s branch=%s conclusion=%s job_url=%s',
repo, branch, conclusion, job_url)
return {'statusCode': 200, 'body': f"Skipped (branch={branch})"}

if conclusion not in DEFAULT_FAILURES:
log.info('lambda_skip reason=non_failure repo=%s branch=%s conclusion=%s job_url=%s',
LOG.info('lambda_skip reason=non_failure repo=%s branch=%s conclusion=%s job_url=%s',
repo, branch, conclusion, job_url)
return {'statusCode': 200, 'body': f"Skipped ({conclusion})"}

Expand All @@ -230,5 +231,5 @@ def lambda_handler(event, _context):
return {'statusCode': 200, 'body': 'Alert sent'}

except Exception as exc:
log.exception('lambda_error error=%s', exc)
LOG.exception('lambda_error error=%s', exc)
return {'statusCode': 500, 'body': f"Error: {exc}"}
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,14 @@ variable "default_tags" {
description = "A map of tags to apply to resources."
}

variable "log_retention_days" {
variable "logging_retention_in_days" {
type = number
description = "Number of days to retain logs in CloudWatch."
default = 3
}

variable "log_level" {
type = string
description = "Log level for application logging (e.g., INFO, DEBUG, WARN, ERROR)"
default = "INFO"
}
2 changes: 1 addition & 1 deletion modules/integrations/github_webhook_relay_source/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ curl -X POST "$(terraform output -raw webhook_endpoint)/webhook" \
| <a name="input_destination_event_bus_name"></a> [destination\_event\_bus\_name](#input\_destination\_event\_bus\_name) | Destination bus name in destination account | `string` | n/a | yes |
| <a name="input_destination_region"></a> [destination\_region](#input\_destination\_region) | Destination region (omit for same as source) | `string` | `null` | no |
| <a name="input_event_source"></a> [event\_source](#input\_event\_source) | EventBridge source field for emitted events | `string` | `"webhook.relay"` | no |
| <a name="input_log_retention_in_days"></a> [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | Log retention period in days | `number` | `3` | no |
| <a name="input_logging_retention_in_days"></a> [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | Log retention period in days | `number` | `3` | no |
| <a name="input_name_prefix"></a> [name\_prefix](#input\_name\_prefix) | Prefix for created resources | `string` | `"webhook-relay-source"` | no |
| <a name="input_source_event_bus_name"></a> [source\_event\_bus\_name](#input\_source\_event\_bus\_name) | Name of the source EventBridge bus | `string` | `"webhook-relay-source"` | no |
| <a name="input_tags"></a> [tags](#input\_tags) | Tags to apply to all resources | `map(string)` | `{}` | no |
Expand Down
3 changes: 2 additions & 1 deletion modules/integrations/github_webhook_relay_source/lambda.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ module "validate_signature_lambda" {
environment_variables = {
EVENT_BUS = var.source_event_bus_name
WEBHOOK_SECRET = var.webhook_secret
LOG_LEVEL = var.log_level
}

attach_policy_json = true
Expand Down Expand Up @@ -48,7 +49,7 @@ data "aws_iam_policy_document" "validate_signature_lambda" {

resource "aws_cloudwatch_log_group" "validate_signature_lambda" {
name = "/aws/lambda/${var.name_prefix}-validate-signature"
retention_in_days = var.log_retention_in_days
retention_in_days = var.logging_retention_in_days
tags = var.tags
tags_all = var.tags
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,26 +10,27 @@
SECRET = os.environ.get('GITHUB_SECRET', '').encode()
eb = boto3.client('events')

logger = logging.getLogger()
logger.setLevel(logging.INFO)
LOG = logging.getLogger()
level_str = os.environ.get('LOG_LEVEL', 'INFO').upper()
LOG.setLevel(getattr(logging, level_str, logging.INFO))


def lambda_handler(event, _):
logger.info('Received event for processing: %s', event)
LOG.info('Received event for processing: %s', event)
signature = event['headers'].get('X-Hub-Signature-256', '')
body = event['body']

if SECRET:
digest = hmac.new(SECRET, body.encode(), hashlib.sha256).hexdigest()
if not signature.endswith(digest):
logger.warning(
LOG.warning(
'Signature mismatch: provided %s, expected digest %s', signature, digest)
return {'statusCode': 401, 'body': 'Invalid signature'}

try:
payload = json.loads(body)
except json.JSONDecodeError as e:
logger.error('JSON decode error: %s', e)
LOG.error('JSON decode error: %s', e)
return {'statusCode': 400, 'body': 'Invalid JSON'}
gh_event = event['headers'].get('X-GitHub-Event', 'unknown')
action = payload.get('action', 'none')
Expand All @@ -47,10 +48,10 @@ def lambda_handler(event, _):
}
]
)
logger.info('Event forwarded to EventBridge %s, response: %s',
EVENT_BUS, response)
LOG.info('Event forwarded to EventBridge %s, response: %s',
EVENT_BUS, response)
except Exception as e:
logger.error('Failed to put event to EventBridge: %s', e)
LOG.error('Failed to put event to EventBridge: %s', e)
return {'statusCode': 500, 'body': 'Failed to forward event'}

return {'statusCode': 200, 'body': 'Event forwarded'}
2 changes: 1 addition & 1 deletion modules/integrations/github_webhook_relay_source/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ resource "aws_cloudwatch_log_delivery_source" "error_logs" {
# Logging to CloudWatch Log Group
resource "aws_cloudwatch_log_group" "event_bus_logs" {
name = "/aws/vendedlogs/events/event-bus/${aws_cloudwatch_event_bus.source.name}"
retention_in_days = var.log_retention_in_days
retention_in_days = var.logging_retention_in_days
tags = var.tags
tags_all = var.tags
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,14 @@ variable "destination_event_bus_name" {
type = string
}

variable "log_retention_in_days" {
variable "logging_retention_in_days" {
description = "Log retention period in days"
type = number
default = 3
}

variable "log_level" {
type = string
description = "Log level for application logging (e.g., INFO, DEBUG, WARN, ERROR)"
default = "INFO"
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ module "cur_per_resource" {
SPLUNK_INDEX = var.splunk_aws_billing_config.splunk_index
SPLUNK_METRICS_TOKEN = data.aws_secretsmanager_secret_version.secrets["splunk_o11y_ingest_token_aws_billing"].secret_string
SPLUNK_METRICS_URL = var.splunk_aws_billing_config.splunk_metrics_url
LOG_LEVEL = var.log_level
}

attach_policy_jsons = true
Expand Down Expand Up @@ -63,7 +64,7 @@ resource "aws_lambda_permission" "cur_per_resource" {

resource "aws_cloudwatch_log_group" "cur_per_resource" {
name = "/aws/lambda/${local.cur_per_resource_lambda_name}"
retention_in_days = 3
retention_in_days = var.logging_retention_in_days
tags = local.all_security_tags
tags_all = local.all_security_tags
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ module "cur_per_resource_process" {
SPLUNK_INDEX = var.splunk_aws_billing_config.splunk_index
SPLUNK_METRICS_TOKEN = data.aws_secretsmanager_secret_version.secrets["splunk_o11y_ingest_token_aws_billing"].secret_string
SPLUNK_METRICS_URL = var.splunk_aws_billing_config.splunk_metrics_url
LOG_LEVEL = var.log_level
}

attach_policy_jsons = true
Expand Down Expand Up @@ -63,7 +64,7 @@ resource "aws_lambda_permission" "cur_per_resource_process" {

resource "aws_cloudwatch_log_group" "cur_per_resource_process" {
name = "/aws/lambda/${local.cur_per_resource_process_lambda_name}"
retention_in_days = 3
retention_in_days = var.logging_retention_in_days
tags = local.all_security_tags
tags_all = local.all_security_tags
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ module "cur_per_service" {
SPLUNK_INDEX = var.splunk_aws_billing_config.splunk_index
SPLUNK_METRICS_TOKEN = data.aws_secretsmanager_secret_version.secrets["splunk_o11y_ingest_token_aws_billing"].secret_string
SPLUNK_METRICS_URL = var.splunk_aws_billing_config.splunk_metrics_url
LOG_LEVEL = var.log_level
}

attach_policy_jsons = true
Expand Down Expand Up @@ -64,7 +65,7 @@ resource "aws_lambda_permission" "cur_per_service" {

resource "aws_cloudwatch_log_group" "cur_per_service" {
name = "/aws/lambda/${local.cur_per_service_lambda_name}"
retention_in_days = 3
retention_in_days = var.logging_retention_in_days
tags = local.all_security_tags
tags_all = local.all_security_tags
}
Expand Down
21 changes: 11 additions & 10 deletions modules/integrations/splunk_aws_billing/lambda/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@
MAX_BATCH_COUNT = 500
METRICS_BATCH_SIZE = 500

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
LOG = logging.getLogger()
level_str = os.environ.get('LOG_LEVEL', 'INFO').upper()
LOG.setLevel(getattr(logging, level_str, logging.INFO))

s3 = boto3.client('s3')

Expand All @@ -43,11 +44,11 @@ def send_to_splunk_batch(events):
data=compressed_payload,
timeout=10,
)
logger.info('[Splunk Batch] Sent %d events | Status: %s',
len(events), resp.status_code)
LOG.info('[Splunk Batch] Sent %d events | Status: %s',
len(events), resp.status_code)
resp.raise_for_status()
except requests.RequestException as e:
logger.error('Failed to send batch to Splunk: %s', e)
LOG.error('Failed to send batch to Splunk: %s', e)


def send_metric_to_o11y_batch(metrics):
Expand All @@ -68,11 +69,11 @@ def send_metric_to_o11y_batch(metrics):
json=payload,
timeout=10,
)
logger.info('[O11y Batch] Sent %d metrics | Status: %s',
len(metrics), resp.status_code)
LOG.info('[O11y Batch] Sent %d metrics | Status: %s',
len(metrics), resp.status_code)
resp.raise_for_status()
except requests.RequestException as e:
logger.error('Failed to send metric batch: %s', e)
LOG.error('Failed to send metric batch: %s', e)


def extract_arn_parts(arn):
Expand Down Expand Up @@ -108,7 +109,7 @@ def parse_tags(val):


def preprocess_df(df):
logger.info('Raw DataFrame shape: %s', df.shape)
LOG.info('Raw DataFrame shape: %s', df.shape)

df['line_item_usage_start_date'] = pd.to_datetime(
df['line_item_usage_start_date'])
Expand All @@ -119,5 +120,5 @@ def preprocess_df(df):
lambda tags: tags.get('user_aws_application', 'unknown'))
df = df[df['user_aws_application'] != 'unknown']

logger.info('Preprocessed DataFrame shape: %s', df.shape)
LOG.info('Preprocessed DataFrame shape: %s', df.shape)
return df
Loading