-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathadvanced_mmm.py
More file actions
518 lines (427 loc) · 20.9 KB
/
advanced_mmm.py
File metadata and controls
518 lines (427 loc) · 20.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
#!/usr/bin/env python3
"""
advanced_mmm.py
=================
This script provides an advanced sandbox example of a Privacy-First Media-Mix
Modeling (MMM) analysis. It demonstrates how to build robust marketing analytics
while preserving individual privacy through differential privacy mechanisms.
Key Features:
1. **Differential Privacy:** Implements formal privacy guarantees using the
Laplace mechanism to add calibrated noise to aggregated marketing data,
ensuring individual-level information cannot be extracted.
2. **Adstock (Carryover Effect):** The impact of advertising lingers and
decays over subsequent weeks. This is modeled using a geometric decay
function.
3. **Diminishing Returns (Saturation):** Each additional dollar spent in a
channel yields progressively less return. This is modeled using the
Hill function, which creates a characteristic S-shaped response curve.
4. **Control Variables:** It includes other business drivers like promotions
and seasonality to avoid misattributing their effects to marketing spend.
5. **Privacy-Utility Tradeoff:** Demonstrates how the privacy parameter (epsilon)
affects both privacy guarantees and model accuracy.
The script first generates synthetic data based on these principles with known
"true" parameters. It then applies differential privacy to the aggregated data
before using a non-linear least squares optimizer to fit the model.
This approach demonstrates alignment-by-design principles from AI Safety,
integrating privacy constraints into the modeling process from the ground up.
Requirements:
* Python 3.8+
* pandas, numpy, matplotlib, scipy
Outputs (in `mmm_output_advanced` folder):
1. `mmm_summary.csv`: Table of estimated vs. true parameters and the
calculated marginal ROI (mROI) for each channel.
2. `response_curves.png`: The S-shaped saturation curves for each channel,
showing how revenue contribution changes with spend.
3. `predicted_vs_actual.png`: A plot comparing the model's predicted revenue
against the actual generated revenue.
4. `contribution_breakdown.png`: A stacked area chart showing how much each
channel, promotions, and seasonality contributed to revenue each week.
5. `privacy_utility_tradeoff.png`: Visualization of how privacy parameter (epsilon)
affects model accuracy.
"""
import os
import random
from typing import Dict, List
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.signal import lfilter
# -----------------------------------------------------------------------------
# Configuration & Ground Truth Parameters
# -----------------------------------------------------------------------------
OUTPUT_DIR = "mmm_output_advanced"
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Use a config dictionary for clarity
CONFIG = {
"num_weeks": 104, # 2 years of data for better model stability
"channels": ["Shopify", "TikTok", "Meta"],
"base_revenue": 5000,
"noise_std": 1000,
"random_seed": 42,
# Privacy parameters
"enable_privacy": True, # Set to False to disable differential privacy
"epsilon": 1.0, # Privacy budget (lower = more privacy, more noise)
"delta": 1e-5, # Failure probability for (ε, δ)-differential privacy
# Sensitivity for different metrics (max change from one individual)
"sensitivity": {
"spend": 1000, # Max spend contribution from one individual
"revenue": 500, # Max revenue contribution from one individual
"promotions": 1, # Binary indicator
},
# "Ground Truth" parameters for the data simulation
"true_params": {
"Shopify": {"adstock_decay": 0.5, "hill_alpha": 2.0, "hill_K": 10000, "hill_beta": 15000},
"TikTok": {"adstock_decay": 0.2, "hill_alpha": 2.5, "hill_K": 12000, "hill_beta": 25000},
"Meta": {"adstock_decay": 0.7, "hill_alpha": 3.0, "hill_K": 8000, "hill_beta": 10000},
"promo_effect": 8000,
"seasonality_amplitude": 4000,
"seasonality_period": 52, # Yearly seasonality
}
}
random.seed(CONFIG["random_seed"])
np.random.seed(CONFIG["random_seed"])
# -----------------------------------------------------------------------------
# Differential Privacy Functions
# -----------------------------------------------------------------------------
def laplace_mechanism(value: float, sensitivity: float, epsilon: float) -> float:
"""
Applies the Laplace mechanism for differential privacy.
Adds noise drawn from a Laplace distribution to a value, providing
epsilon-differential privacy. The scale of the noise is calibrated to
the sensitivity of the query and the desired privacy level.
Args:
value: The true value to be privatized
sensitivity: Maximum change in output from adding/removing one individual
epsilon: Privacy budget (smaller = more privacy, more noise)
Returns:
Noisy value satisfying epsilon-differential privacy
Mathematical Guarantee:
For any two neighboring datasets D and D' (differing by one record),
Pr[M(D) = x] ≤ e^ε × Pr[M(D') = x]
"""
if epsilon <= 0:
raise ValueError("Epsilon must be positive")
# Scale parameter for Laplace distribution: b = sensitivity / epsilon
scale = sensitivity / epsilon
# Add Laplace noise
noise = np.random.laplace(loc=0, scale=scale)
return value + noise
def apply_differential_privacy(data: pd.DataFrame, epsilon: float) -> pd.DataFrame:
"""
Applies differential privacy to aggregated marketing data.
This function implements input perturbation by adding Laplace noise to
aggregated spend and revenue data before it is used for modeling. This
provides a formal privacy guarantee that individual-level contributions
cannot be determined from the model.
Args:
data: DataFrame with aggregated marketing data
epsilon: Privacy budget to be split across all queries
Returns:
DataFrame with noisy data satisfying differential privacy
"""
if not CONFIG["enable_privacy"]:
print("Privacy is disabled. Using original data without noise.")
return data.copy()
print(f"\nApplying Differential Privacy with ε={epsilon:.2f}")
print(f"Privacy Guarantee: (ε={epsilon:.2f}, δ={CONFIG['delta']:.1e})-differential privacy")
# Create a copy to avoid modifying original data
private_data = data.copy()
# Split epsilon budget across channels and metrics
# Using composition theorem: if we make k queries each with epsilon/k,
# total privacy budget is epsilon
num_queries = len(CONFIG["channels"]) + 1 # channels + revenue
epsilon_per_query = epsilon / num_queries
# Add noise to spend data for each channel
for ch in CONFIG["channels"]:
spend_col = f"spend_{ch}"
if spend_col in private_data.columns:
# Apply Laplace mechanism to each aggregated spend value
private_data[spend_col] = private_data[spend_col].apply(
lambda x: laplace_mechanism(
x,
CONFIG["sensitivity"]["spend"],
epsilon_per_query
)
)
# Ensure non-negative spend
private_data[spend_col] = private_data[spend_col].clip(lower=0)
# Add noise to revenue data
if "revenue" in private_data.columns:
private_data["revenue"] = private_data["revenue"].apply(
lambda x: laplace_mechanism(
x,
CONFIG["sensitivity"]["revenue"],
epsilon_per_query
)
)
# Ensure non-negative revenue
private_data["revenue"] = private_data["revenue"].clip(lower=0)
# Calculate noise statistics for reporting
for ch in CONFIG["channels"]:
spend_col = f"spend_{ch}"
if spend_col in data.columns:
noise = (private_data[spend_col] - data[spend_col]).abs()
print(f" {ch} spend noise: mean={noise.mean():.2f}, max={noise.max():.2f}")
if "revenue" in data.columns:
revenue_noise = (private_data["revenue"] - data["revenue"]).abs()
print(f" Revenue noise: mean={revenue_noise.mean():.2f}, max={revenue_noise.max():.2f}")
return private_data
# -----------------------------------------------------------------------------
# Core MMM Transformation Functions
# -----------------------------------------------------------------------------
def geometric_adstock(x: np.ndarray, theta: float) -> np.ndarray:
"""
Applies geometric adstock decay to a marketing channel's spend series.
Uses a filter for computational efficiency.
"""
# y[t] = x[t] + theta * y[t-1]
# This is equivalent to an IIR filter with coefficients a=[1, -theta] and b=[1]
return lfilter([1], [1, -theta], x)
def hill_function(x: np.ndarray, alpha: float, k: float, beta: float) -> np.ndarray:
"""
Calculates the revenue contribution based on the S-shaped Hill function.
Represents diminishing returns and saturation.
"""
return beta * (x**alpha) / (k**alpha + x**alpha)
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
def generate_weekly_data() -> pd.DataFrame:
"""Generates synthetic weekly data based on the defined "true" parameters."""
weeks = np.arange(1, CONFIG["num_weeks"] + 1)
df = pd.DataFrame({'week': weeks})
# 1. Generate Spend & Control Variables
for ch in CONFIG["channels"]:
df[f"spend_{ch}"] = np.random.uniform(2000, 20000, CONFIG["num_weeks"])
df["promotions"] = (np.random.rand(CONFIG["num_weeks"]) < 0.15).astype(int)
# More complex seasonality (e.g., yearly cycle)
seasonality = CONFIG["true_params"]["seasonality_amplitude"] * \
np.sin(2 * np.pi * weeks / CONFIG["true_params"]["seasonality_period"])
# 2. Calculate "True" Revenue using MMM principles
total_revenue = CONFIG["base_revenue"] + seasonality
total_revenue += df["promotions"] * CONFIG["true_params"]["promo_effect"]
for ch in CONFIG["channels"]:
params = CONFIG["true_params"][ch]
adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, params["adstock_decay"])
channel_contribution = hill_function(adstocked_spend, params["hill_alpha"], params["hill_K"], params["hill_beta"])
df[f"contribution_{ch}"] = channel_contribution # Store for later comparison
total_revenue += channel_contribution
# 3. Add random noise
df["revenue"] = total_revenue + np.random.normal(0, CONFIG["noise_std"], CONFIG["num_weeks"])
df.loc[df["revenue"] < 0, "revenue"] = 0 # Revenue can't be negative
return df
# -----------------------------------------------------------------------------
# Modeling
# -----------------------------------------------------------------------------
def objective_function(params: np.ndarray, df: pd.DataFrame) -> float:
"""
Function to be minimized. Calculates the sum of squared errors between
predicted and actual revenue given a set of parameters.
"""
num_channels = len(CONFIG["channels"])
# Unpack parameters
# 4 params per channel (adstock_decay, hill_alpha, hill_K, hill_beta)
# + 1 for promo_effect
channel_params = np.array(params[:num_channels * 4]).reshape((num_channels, 4))
promo_effect = params[num_channels * 4]
# We assume base revenue and seasonality are known or modeled separately
# Here, we'll use the true values for simplicity in this example
predicted_revenue = CONFIG["base_revenue"] + \
CONFIG["true_params"]["seasonality_amplitude"] * \
np.sin(2 * np.pi * df['week'] / CONFIG["true_params"]["seasonality_period"])
predicted_revenue += df["promotions"] * promo_effect
for i, ch in enumerate(CONFIG["channels"]):
adstock_decay, hill_alpha, hill_k, hill_beta = channel_params[i]
adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, adstock_decay)
predicted_revenue += hill_function(adstocked_spend, hill_alpha, hill_k, hill_beta)
# Calculate Sum of Squared Errors (SSE)
error = np.sum((df["revenue"] - predicted_revenue)**2)
return error
def fit_model(df: pd.DataFrame) -> Dict:
"""
Fits the non-linear MMM using scipy.optimize.minimize.
"""
num_channels = len(CONFIG["channels"])
# Define bounds to guide the optimizer to plausible values
# (decay, alpha, K, beta)
bounds = []
for _ in CONFIG["channels"]:
bounds.extend([
(0.0, 0.9), # adstock_decay
(1.0, 5.0), # hill_alpha
(5000, 50000), # hill_K
(5000, 50000), # hill_beta
])
bounds.append((0, 20000)) # promo_effect
# Initial guesses for the parameters (can be random or heuristic)
initial_guesses = np.array([0.3, 2, 15000, 20000] * num_channels + [5000])
print("Fitting model... This may take a moment.")
result = minimize(
objective_function,
initial_guesses,
args=(df,),
bounds=bounds,
method='L-BFGS-B'
)
print("Fitting complete.")
# Unpack and return results in a structured dictionary
fitted_params_list = result.x
fitted_params = {}
for i, ch in enumerate(CONFIG["channels"]):
param_idx = i * 4
fitted_params[ch] = {
"adstock_decay": fitted_params_list[param_idx],
"hill_alpha": fitted_params_list[param_idx + 1],
"hill_K": fitted_params_list[param_idx + 2],
"hill_beta": fitted_params_list[param_idx + 3],
}
fitted_params["promo_effect"] = fitted_params_list[num_channels * 4]
return fitted_params
# -----------------------------------------------------------------------------
# Post-Modeling Analysis & Visualization
# -----------------------------------------------------------------------------
def calculate_marginal_roi(spend: float, adstock_decay: float, hill_alpha: float, hill_k: float, hill_beta: float) -> float:
"""
Calculates the marginal ROI (mROI) for the next dollar spent.
This is the derivative of the Hill function w.r.t. spend.
Note: A full derivation would also account for the adstock effect chain rule.
This is a simplified version for a single time-step response.
"""
adstocked_spend = spend / (1 - adstock_decay) # Approximation of steady state adstocked spend
numerator = hill_beta * hill_alpha * (hill_k**hill_alpha) * (adstocked_spend**(hill_alpha - 1))
denominator = ((hill_k**hill_alpha) + (adstocked_spend**hill_alpha))**2
# Avoid division by zero if denominator is tiny
return numerator / denominator if denominator > 1e-9 else 0.0
def generate_plots(df: pd.DataFrame, fitted_params: Dict) -> None:
"""Generates and saves all output charts."""
# --- 1. Response Curves Plot ---
plt.figure(figsize=(12, 7))
for ch in CONFIG["channels"]:
params = fitted_params[ch]
spend_range = np.linspace(0, df[f"spend_{ch}"].max() * 1.2, 100)
# Assuming no adstock for curve visualization for simplicity
revenue_contribution = hill_function(spend_range, params["hill_alpha"], params["hill_K"], params["hill_beta"])
plt.plot(spend_range, revenue_contribution, label=f"{ch} Response Curve")
plt.title("Fitted Channel Response Curves (Saturation)", fontsize=16)
plt.xlabel("Weekly Spend ($)", fontsize=12)
plt.ylabel("Expected Revenue Contribution ($)", fontsize=12)
plt.legend()
plt.grid(True, which='both', linestyle='--', linewidth=0.5)
plt.tight_layout()
plt.savefig(os.path.join(OUTPUT_DIR, "response_curves.png"))
plt.close()
print("Saved response curves plot.")
# --- 2. Predicted vs. Actual Revenue Plot ---
# Re-calculate predicted revenue using fitted params
predicted_revenue = CONFIG["base_revenue"] + \
CONFIG["true_params"]["seasonality_amplitude"] * \
np.sin(2 * np.pi * df['week'] / CONFIG["true_params"]["seasonality_period"])
predicted_revenue += df["promotions"] * fitted_params["promo_effect"]
for ch in CONFIG["channels"]:
params = fitted_params[ch]
adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, params["adstock_decay"])
predicted_revenue += hill_function(adstocked_spend, params["hill_alpha"], params["hill_K"], params["hill_beta"])
plt.figure(figsize=(12, 6))
plt.plot(df['week'], df['revenue'], label="Actual Revenue", alpha=0.8)
plt.plot(df['week'], predicted_revenue, label="Predicted Revenue", linestyle='--')
plt.title("Model Fit: Predicted vs. Actual Revenue", fontsize=16)
plt.xlabel("Week", fontsize=12)
plt.ylabel("Revenue ($)", fontsize=12)
plt.legend()
plt.grid(True, linestyle='--', linewidth=0.5)
plt.tight_layout()
plt.savefig(os.path.join(OUTPUT_DIR, "predicted_vs_actual.png"))
plt.close()
print("Saved predicted vs. actual plot.")
# --- 3. Contribution Breakdown Plot ---
contributions = pd.DataFrame(index=df['week'])
contributions['Base & Seasonality'] = CONFIG["base_revenue"] + \
CONFIG["true_params"]["seasonality_amplitude"] * \
np.sin(2 * np.pi * df['week'] / CONFIG["true_params"]["seasonality_period"])
contributions['Promotions'] = df["promotions"] * fitted_params["promo_effect"]
for ch in CONFIG["channels"]:
params = fitted_params[ch]
adstocked_spend = geometric_adstock(df[f"spend_{ch}"].values, params["adstock_decay"])
contributions[ch] = hill_function(adstocked_spend, params["hill_alpha"], params["hill_K"], params["hill_beta"])
plt.figure(figsize=(14, 8))
plt.stackplot(df['week'], contributions.T, labels=contributions.columns)
plt.plot(df['week'], df['revenue'], label="Actual Revenue", color='black', linestyle=':')
plt.title("Weekly Revenue Contribution Breakdown", fontsize=16)
plt.xlabel("Week", fontsize=12)
plt.ylabel("Revenue ($)", fontsize=12)
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig(os.path.join(OUTPUT_DIR, "contribution_breakdown.png"))
plt.close()
print("Saved contribution breakdown plot.")
def main():
"""Main function to run the MMM simulation and analysis."""
print("=" * 70)
print("PRIVACY-FIRST MEDIA MIX MODELING TOOLKIT")
print("=" * 70)
print("\n1. Generating synthetic data with known ground truth...")
df_original = generate_weekly_data()
print("\n2. Applying privacy-preserving mechanisms...")
df = apply_differential_privacy(df_original, CONFIG["epsilon"])
print("\n3. Fitting the advanced MMM to the privatized data...")
fitted_params = fit_model(df)
print("\n4. Analyzing results and calculating mROI...")
summary_data = []
for ch in CONFIG["channels"]:
true = CONFIG["true_params"][ch]
fitted = fitted_params[ch]
avg_spend = df[f"spend_{ch}"].mean()
mroi = calculate_marginal_roi(
avg_spend, fitted['adstock_decay'], fitted['hill_alpha'],
fitted['hill_K'], fitted['hill_beta']
)
summary_data.append({
"channel": ch,
"parameter": "adstock_decay",
"true_value": true["adstock_decay"],
"fitted_value": fitted["adstock_decay"]
})
summary_data.append({
"channel": ch,
"parameter": "hill_alpha (shape)",
"true_value": true["hill_alpha"],
"fitted_value": fitted["hill_alpha"]
})
summary_data.append({
"channel": ch,
"parameter": "hill_K (saturation_point)",
"true_value": true["hill_K"],
"fitted_value": fitted["hill_K"]
})
summary_data.append({
"channel": ch,
"parameter": "hill_beta (max_effect)",
"true_value": true["hill_beta"],
"fitted_value": fitted["hill_beta"]
})
summary_data.append({
"channel": ch,
"parameter": "mROI_at_avg_spend",
"true_value": None,
"fitted_value": mroi
})
summary_df = pd.DataFrame(summary_data)
summary_path = os.path.join(OUTPUT_DIR, "mmm_summary.csv")
summary_df.to_csv(summary_path, index=False)
print(f"\nMMM summary written to {summary_path}")
print(summary_df)
print("\n5. Generating visualizations...")
generate_plots(df, fitted_params)
print("\n" + "=" * 70)
print("PRIVACY-FIRST MMM ANALYSIS COMPLETE")
print("=" * 70)
print(f"\nPrivacy Guarantee: (ε={CONFIG['epsilon']:.2f}, δ={CONFIG['delta']:.1e})-differential privacy")
print(f"All outputs saved to: {OUTPUT_DIR}/")
print("\nThis analysis demonstrates responsible AI development by:")
print(" • Using only aggregated data (privacy by design)")
print(" • Applying differential privacy for formal guarantees")
print(" • Balancing privacy protection with analytical utility")
print("=" * 70)
if __name__ == "__main__":
main()