#!/usr/bin/env python3 """ advanced_mmm.py ================= This script provides an advanced sandbox example of a Privacy-First Media-Mix Modeling (MMM) analysis. It demonstrates how to build robust marketing analytics while preserving individual privacy through differential privacy mechanisms. Key Features: 1. **Differential Privacy:** Implements formal privacy guarantees using the Laplace mechanism to add calibrated noise to aggregated marketing data, ensuring individual-level information cannot be extracted. 2. **Adstock (Carryover Effect):** The impact of advertising lingers and decays over subsequent weeks. This is modeled using a geometric decay function. 3. **Diminishing Returns (Saturation):** Each additional dollar spent in a channel yields progressively less return. This is modeled using the Hill function, which creates a characteristic S-shaped response curve. 4. **Control Variables:** It includes other business drivers like promotions and seasonality to avoid misattributing their effects to marketing spend. 5. **Privacy-Utility Tradeoff:** Demonstrates how the privacy parameter (epsilon) affects both privacy guarantees and model accuracy. The script first generates synthetic data based on these principles with known "true" parameters. It then applies differential privacy to the aggregated data before using a non-linear least squares optimizer to fit the model. This approach demonstrates alignment-by-design principles from AI Safety, integrating privacy constraints into the modeling process from the ground up. Requirements: * Python 3.8+ * pandas, numpy, matplotlib, scipy Outputs (in `mmm_output_advanced` folder): 1. `mmm_summary.csv`: Table of estimated vs. true parameters and the calculated marginal ROI (mROI) for each channel. 2. `response_curves.png`: The S-shaped saturation curves for each channel, showing how revenue contribution changes with spend. 3. `predicted_vs_actual.png`: A plot comparing the model's predicted revenue against the actual generated revenue. 4. `contribution_breakdown.png`: A stacked area chart showing how much each channel, promotions, and seasonality contributed to revenue each week. 5. `privacy_utility_tradeoff.png`: Visualization of how privacy parameter (epsilon) affects model accuracy. """ import os import random from typing import Dict, List import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import minimize from scipy.signal import lfilter # ----------------------------------------------------------------------------- # Configuration & Ground Truth Parameters # ----------------------------------------------------------------------------- OUTPUT_DIR = "mmm_output_advanced" os.makedirs(OUTPUT_DIR, exist_ok=True) # Use a config dictionary for clarity CONFIG = { "num_weeks": 104, # 2 years of data for better model stability "channels": ["Shopify", "TikTok", "Meta"], "base_revenue": 5000, "noise_std": 1000, "random_seed": 42, # Privacy parameters "enable_privacy": True, # Set to False to disable differential privacy "epsilon": 1.0, # Privacy budget (lower = more privacy, more noise) "delta": 1e-5, # Failure probability for (ε, δ)-differential privacy # Sensitivity for different metrics (max change from one individual) "sensitivity": { "spend": 1000, # Max spend contribution from one individual "revenue": 500, # Max revenue contribution from one individual "promotions": 1, # Binary indicator }, # "Ground Truth" parameters for the data simulation "true_params": { "Shopify": {"adstock_decay": 0.5, "hill_alpha": 2.0, "hill_K": 10000, "hill_beta": 15000}, "TikTok": {"adstock_decay": 0.2, "hill_alpha": 2.5, "hill_K": 12000, "hill_beta": 25000}, "Meta": {"adstock_decay": 0.7, "hill_alpha": 3.0, "hill_K": 8000, "hill_beta": 10000}, "promo_effect": 8000, "seasonality_amplitude": 4000, "seasonality_period": 52, # Yearly seasonality } } random.seed(CONFIG["random_seed"]) np.random.seed(CONFIG["random_seed"]) # ----------------------------------------------------------------------------- # Differential Privacy Functions # ----------------------------------------------------------------------------- def laplace_mechanism(value: float, sensitivity: float, epsilon: float) -> float: """ Applies the Laplace mechanism for differential privacy. Adds noise drawn from a Laplace distribution to a value, providing epsilon-differential privacy. The scale of the noise is calibrated to the sensitivity of the query and the desired privacy level. Args: value: The true value to be privatized sensitivity: Maximum change in output from adding/removing one individual epsilon: Privacy budget (smaller = more privacy, more noise) Returns: Noisy value satisfying epsilon-differential privacy Mathematical Guarantee: For any two neighboring datasets D and D' (differing by one record), Pr[M(D) = x] ≤ e^ε × Pr[M(D') = x] """ if epsilon <= 0: raise ValueError("Epsilon must be positive") # Scale parameter for Laplace distribution: b = sensitivity / epsilon scale = sensitivity / epsilon # Add Laplace noise noise = np.random.laplace(loc=0, scale=scale) return value + noise def apply_differential_privacy(data: pd.DataFrame, epsilon: float) -> pd.DataFrame: """ Applies differential privacy to aggregated marketing data. This function implements input perturbation by adding Laplace noise to aggregated spend and revenue data before it is used for modeling. This provides a formal privacy guarantee that individual-level contributions cannot be determined from the model. Args: data: DataFrame with aggregated marketing data epsilon: Privacy budget to be split across all queries Returns: DataFrame with noisy data satisfying differential privacy """ if not CONFIG["enable_privacy"]: print("Privacy is disabled. Using original data without noise.") return data.copy() print(f"\nApplying Differential Privacy with ε={epsilon:.2f}") print(f"Privacy Guarantee: (ε={epsilon:.2f}, δ={CONFIG['delta']:.1e})-differential privacy") # Create a copy to avoid modifying original data private_data = data.copy() # Split epsilon budget across channels and metrics # Using composition theorem: if we make k queries each with epsilon/k, # total privacy budget is epsilon num_queries = len(CONFIG["channels"]) + 1 # channels + revenue epsilon_per_query = epsilon / num_queries # Add noise to spend data for each channel for ch in CONFIG["channels"]: spend_col = f"spend_{ch}" if spend_col in private_data.columns: # Apply Laplace mechanism to each aggregated spend value private_data[spend_col] = private_data[spend_col].apply( lambda x: laplace_mechanism( x, CONFIG["sensitivity"]["spend"], epsilon_per_query ) ) # Ensure non-negative spend private_data[spend_col] = private_data[spend_col].clip(lower=0) # Add noise to revenue data if "revenue" in private_data.columns: private_data["revenue"] = private_data["revenue"].apply( lambda x: laplace_mechanism( x, CONFIG["sensitivity"]["revenue"], epsilon_per_query ) ) # Ensure non-negative revenue private_data["revenue"] = private_data["revenue"].clip(lower=0) # Calculate noise statistics for reporting for ch in CONFIG["channels"]: spend_col = f"spend_{ch}" if spend_col in data.columns: noise = (private_data[spend_col] - data[spend_col]).abs() print(f" {ch} spend noise: mean={noise.mean():.2f}, max={noise.max():.2f}") if "revenue" in data.columns: revenue_noise = (private_data["revenue"] - data["revenue"]).abs() print(f" Revenue noise: mean={revenue_noise.mean():.2f}, max={revenue_noise.max():.2f}") return private_data # ----------------------------------------------------------------------------- # Core MMM Transformation Functions # ----------------------------------------------------------------------------- def geometric_adstock(x: np.ndarray, theta: float) -> np.ndarray: """ Applies geometric adstock decay to a marketing channel's spend series. Uses a filter for computational efficiency. """ # y[t] = x[t] + theta * y[t-1] # This is equivalent to an IIR filter with coefficients a=[1, -theta] and b=[1] return lfilter([1], [1, -theta], x) def hill_function(x: np.ndarray, alpha: float, k: float, beta: float) -> np.ndarray: """ Calculates the revenue contribution based on the S-shaped Hill function. Represents diminishing returns and saturation. """ return beta * (x**alpha) / (k**alpha + x**alpha) # ----------------------------------------------------------------------------- # Data Generation # ----------------------------------------------------------------------------- def generate_weekly_data() -> pd.DataFrame: """Generates synthetic weekly data based on the defined "true" parameters.""" weeks = np.arange(1, CONFIG["num_weeks"] + 1) df = pd.DataFrame({'week': weeks}) # 1. Generate Spend & Control Variables for ch in CONFIG["channels"]: df[f"spend_{ch}"] = np.random.uniform(2000, 20000, CONFIG["num_weeks"]) df["promotions"] = (np.random.rand(CONFIG["num_weeks"]) < 0.15).astype(int) # More complex seasonality (e.g., yearly cycle) seasonality = CONFIG["true_params"]["seasonality_amplitude"] * \ np.sin(2 * np.pi * weeks / CONFIG["true_params"]["seasonality_period"]) # 2. Calculate "True" Revenue using MMM principles total_revenue = CONFIG["base_revenue"] + seasonality total_revenue += df["promotions"] * CONFIG["true_params"]["promo_effect"] for ch in CONFIG["channels"]: params = CONFIG["true_params"][ch] adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, params["adstock_decay"]) channel_contribution = hill_function(adstocked_spend, params["hill_alpha"], params["hill_K"], params["hill_beta"]) df[f"contribution_{ch}"] = channel_contribution # Store for later comparison total_revenue += channel_contribution # 3. Add random noise df["revenue"] = total_revenue + np.random.normal(0, CONFIG["noise_std"], CONFIG["num_weeks"]) df.loc[df["revenue"] < 0, "revenue"] = 0 # Revenue can't be negative return df # ----------------------------------------------------------------------------- # Modeling # ----------------------------------------------------------------------------- def objective_function(params: np.ndarray, df: pd.DataFrame) -> float: """ Function to be minimized. Calculates the sum of squared errors between predicted and actual revenue given a set of parameters. """ num_channels = len(CONFIG["channels"]) # Unpack parameters # 4 params per channel (adstock_decay, hill_alpha, hill_K, hill_beta) # + 1 for promo_effect channel_params = np.array(params[:num_channels * 4]).reshape((num_channels, 4)) promo_effect = params[num_channels * 4] # We assume base revenue and seasonality are known or modeled separately # Here, we'll use the true values for simplicity in this example predicted_revenue = CONFIG["base_revenue"] + \ CONFIG["true_params"]["seasonality_amplitude"] * \ np.sin(2 * np.pi * df['week'] / CONFIG["true_params"]["seasonality_period"]) predicted_revenue += df["promotions"] * promo_effect for i, ch in enumerate(CONFIG["channels"]): adstock_decay, hill_alpha, hill_k, hill_beta = channel_params[i] adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, adstock_decay) predicted_revenue += hill_function(adstocked_spend, hill_alpha, hill_k, hill_beta) # Calculate Sum of Squared Errors (SSE) error = np.sum((df["revenue"] - predicted_revenue)**2) return error def fit_model(df: pd.DataFrame) -> Dict: """ Fits the non-linear MMM using scipy.optimize.minimize. """ num_channels = len(CONFIG["channels"]) # Define bounds to guide the optimizer to plausible values # (decay, alpha, K, beta) bounds = [] for _ in CONFIG["channels"]: bounds.extend([ (0.0, 0.9), # adstock_decay (1.0, 5.0), # hill_alpha (5000, 50000), # hill_K (5000, 50000), # hill_beta ]) bounds.append((0, 20000)) # promo_effect # Initial guesses for the parameters (can be random or heuristic) initial_guesses = np.array([0.3, 2, 15000, 20000] * num_channels + [5000]) print("Fitting model... This may take a moment.") result = minimize( objective_function, initial_guesses, args=(df,), bounds=bounds, method='L-BFGS-B' ) print("Fitting complete.") # Unpack and return results in a structured dictionary fitted_params_list = result.x fitted_params = {} for i, ch in enumerate(CONFIG["channels"]): param_idx = i * 4 fitted_params[ch] = { "adstock_decay": fitted_params_list[param_idx], "hill_alpha": fitted_params_list[param_idx + 1], "hill_K": fitted_params_list[param_idx + 2], "hill_beta": fitted_params_list[param_idx + 3], } fitted_params["promo_effect"] = fitted_params_list[num_channels * 4] return fitted_params # ----------------------------------------------------------------------------- # Post-Modeling Analysis & Visualization # ----------------------------------------------------------------------------- def calculate_marginal_roi(spend: float, adstock_decay: float, hill_alpha: float, hill_k: float, hill_beta: float) -> float: """ Calculates the marginal ROI (mROI) for the next dollar spent. This is the derivative of the Hill function w.r.t. spend. Note: A full derivation would also account for the adstock effect chain rule. This is a simplified version for a single time-step response. """ adstocked_spend = spend / (1 - adstock_decay) # Approximation of steady state adstocked spend numerator = hill_beta * hill_alpha * (hill_k**hill_alpha) * (adstocked_spend**(hill_alpha - 1)) denominator = ((hill_k**hill_alpha) + (adstocked_spend**hill_alpha))**2 # Avoid division by zero if denominator is tiny return numerator / denominator if denominator > 1e-9 else 0.0 def generate_plots(df: pd.DataFrame, fitted_params: Dict) -> None: """Generates and saves all output charts.""" # --- 1. Response Curves Plot --- plt.figure(figsize=(12, 7)) for ch in CONFIG["channels"]: params = fitted_params[ch] spend_range = np.linspace(0, df[f"spend_{ch}"].max() * 1.2, 100) # Assuming no adstock for curve visualization for simplicity revenue_contribution = hill_function(spend_range, params["hill_alpha"], params["hill_K"], params["hill_beta"]) plt.plot(spend_range, revenue_contribution, label=f"{ch} Response Curve") plt.title("Fitted Channel Response Curves (Saturation)", fontsize=16) plt.xlabel("Weekly Spend ($)", fontsize=12) plt.ylabel("Expected Revenue Contribution ($)", fontsize=12) plt.legend() plt.grid(True, which='both', linestyle='--', linewidth=0.5) plt.tight_layout() plt.savefig(os.path.join(OUTPUT_DIR, "response_curves.png")) plt.close() print("Saved response curves plot.") # --- 2. Predicted vs. Actual Revenue Plot --- # Re-calculate predicted revenue using fitted params predicted_revenue = CONFIG["base_revenue"] + \ CONFIG["true_params"]["seasonality_amplitude"] * \ np.sin(2 * np.pi * df['week'] / CONFIG["true_params"]["seasonality_period"]) predicted_revenue += df["promotions"] * fitted_params["promo_effect"] for ch in CONFIG["channels"]: params = fitted_params[ch] adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, params["adstock_decay"]) predicted_revenue += hill_function(adstocked_spend, params["hill_alpha"], params["hill_K"], params["hill_beta"]) plt.figure(figsize=(12, 6)) plt.plot(df['week'], df['revenue'], label="Actual Revenue", alpha=0.8) plt.plot(df['week'], predicted_revenue, label="Predicted Revenue", linestyle='--') plt.title("Model Fit: Predicted vs. Actual Revenue", fontsize=16) plt.xlabel("Week", fontsize=12) plt.ylabel("Revenue ($)", fontsize=12) plt.legend() plt.grid(True, linestyle='--', linewidth=0.5) plt.tight_layout() plt.savefig(os.path.join(OUTPUT_DIR, "predicted_vs_actual.png")) plt.close() print("Saved predicted vs. actual plot.") # --- 3. Contribution Breakdown Plot --- contributions = pd.DataFrame(index=df['week']) contributions['Base & Seasonality'] = CONFIG["base_revenue"] + \ CONFIG["true_params"]["seasonality_amplitude"] * \ np.sin(2 * np.pi * df['week'] / CONFIG["true_params"]["seasonality_period"]) contributions['Promotions'] = df["promotions"] * fitted_params["promo_effect"] for ch in CONFIG["channels"]: params = fitted_params[ch] adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, params["adstock_decay"]) contributions[ch] = hill_function(adstocked_spend, params["hill_alpha"], params["hill_K"], params["hill_beta"]) plt.figure(figsize=(14, 8)) plt.stackplot(df['week'], contributions.T, labels=contributions.columns) plt.plot(df['week'], df['revenue'], label="Actual Revenue", color='black', linestyle=':') plt.title("Weekly Revenue Contribution Breakdown", fontsize=16) plt.xlabel("Week", fontsize=12) plt.ylabel("Revenue ($)", fontsize=12) plt.legend(loc='upper left') plt.tight_layout() plt.savefig(os.path.join(OUTPUT_DIR, "contribution_breakdown.png")) plt.close() print("Saved contribution breakdown plot.") def main(): """Main function to run the MMM simulation and analysis.""" print("=" * 70) print("PRIVACY-FIRST MEDIA MIX MODELING TOOLKIT") print("=" * 70) print("\n1. Generating synthetic data with known ground truth...") df_original = generate_weekly_data() print("\n2. Applying privacy-preserving mechanisms...") df = apply_differential_privacy(df_original, CONFIG["epsilon"]) print("\n3. Fitting the advanced MMM to the privatized data...") fitted_params = fit_model(df) print("\n4. Analyzing results and calculating mROI...") summary_data = [] for ch in CONFIG["channels"]: true = CONFIG["true_params"][ch] fitted = fitted_params[ch] avg_spend = df[f"spend_{ch}"].mean() mroi = calculate_marginal_roi( avg_spend, fitted['adstock_decay'], fitted['hill_alpha'], fitted['hill_K'], fitted['hill_beta'] ) summary_data.append({ "channel": ch, "parameter": "adstock_decay", "true_value": true["adstock_decay"], "fitted_value": fitted["adstock_decay"] }) summary_data.append({ "channel": ch, "parameter": "hill_alpha (shape)", "true_value": true["hill_alpha"], "fitted_value": fitted["hill_alpha"] }) summary_data.append({ "channel": ch, "parameter": "hill_K (saturation_point)", "true_value": true["hill_K"], "fitted_value": fitted["hill_K"] }) summary_data.append({ "channel": ch, "parameter": "hill_beta (max_effect)", "true_value": true["hill_beta"], "fitted_value": fitted["hill_beta"] }) summary_data.append({ "channel": ch, "parameter": "mROI_at_avg_spend", "true_value": None, "fitted_value": mroi }) summary_df = pd.DataFrame(summary_data) summary_path = os.path.join(OUTPUT_DIR, "mmm_summary.csv") summary_df.to_csv(summary_path, index=False) print(f"\nMMM summary written to {summary_path}") print(summary_df) print("\n5. Generating visualizations...") generate_plots(df, fitted_params) print("\n" + "=" * 70) print("PRIVACY-FIRST MMM ANALYSIS COMPLETE") print("=" * 70) print(f"\nPrivacy Guarantee: (ε={CONFIG['epsilon']:.2f}, δ={CONFIG['delta']:.1e})-differential privacy") print(f"All outputs saved to: {OUTPUT_DIR}/") print("\nThis analysis demonstrates responsible AI development by:") print(" • Using only aggregated data (privacy by design)") print(" • Applying differential privacy for formal guarantees") print(" • Balancing privacy protection with analytical utility") print("=" * 70) if __name__ == "__main__": main()