data-science

Compare original and translation side by side

🇺🇸

Original

English
🇨🇳

Translation

Chinese
<!-- Adapted from: claude-skills/engineering-team/senior-data-scientist -->
<!-- Adapted from: claude-skills/engineering-team/senior-data-scientist -->

Data Science Guide

数据科学指南

Statistical modeling, experimentation, and advanced analytics.
统计建模、实验与高级分析。

When to Use

适用场景

  • Designing A/B tests and experiments
  • Building predictive models
  • Performing causal analysis
  • Feature engineering
  • Statistical hypothesis testing
  • 设计A/B测试与实验
  • 构建预测模型
  • 执行因果分析
  • 特征工程
  • 统计假设检验

Tech Stack

技术栈

CategoryTools
LanguagesPython, SQL, R
AnalysisNumPy, Pandas, SciPy
MLScikit-learn, XGBoost, LightGBM
VisualizationMatplotlib, Seaborn, Plotly
StatisticsStatsmodels, PyMC
NotebooksJupyter, VS Code
类别工具
编程语言Python, SQL, R
分析工具NumPy, Pandas, SciPy
机器学习Scikit-learn, XGBoost, LightGBM
可视化工具Matplotlib, Seaborn, Plotly
统计工具Statsmodels, PyMC
笔记本工具Jupyter, VS Code

Experiment Design

实验设计

A/B Test Framework

A/B测试框架

python
import scipy.stats as stats
import numpy as np

def calculate_sample_size(baseline_rate, mde, alpha=0.05, power=0.8):
    """Calculate required sample size for A/B test."""
    effect_size = mde / np.sqrt(baseline_rate * (1 - baseline_rate))
    analysis = stats.TTestIndPower()
    return int(analysis.solve_power(
        effect_size=effect_size,
        alpha=alpha,
        power=power,
        alternative='two-sided'
    ))
python
import scipy.stats as stats
import numpy as np

def calculate_sample_size(baseline_rate, mde, alpha=0.05, power=0.8):
    """Calculate required sample size for A/B test."""
    effect_size = mde / np.sqrt(baseline_rate * (1 - baseline_rate))
    analysis = stats.TTestIndPower()
    return int(analysis.solve_power(
        effect_size=effect_size,
        alpha=alpha,
        power=power,
        alternative='two-sided'
    ))

Example: 5% baseline, 10% relative lift

Example: 5% baseline, 10% relative lift

n = calculate_sample_size(0.05, 0.005) print(f"Required sample size per group: {n}")
undefined
n = calculate_sample_size(0.05, 0.005) print(f"Required sample size per group: {n}")
undefined

Statistical Significance

统计显著性

python
def analyze_ab_test(control, treatment):
    """Analyze A/B test results."""
    # Two-proportion z-test
    n1, n2 = len(control), len(treatment)
    p1, p2 = control.mean(), treatment.mean()
    p_pool = (control.sum() + treatment.sum()) / (n1 + n2)

    se = np.sqrt(p_pool * (1 - p_pool) * (1/n1 + 1/n2))
    z = (p2 - p1) / se
    p_value = 2 * (1 - stats.norm.cdf(abs(z)))

    return {
        'control_rate': p1,
        'treatment_rate': p2,
        'lift': (p2 - p1) / p1,
        'p_value': p_value,
        'significant': p_value < 0.05
    }
python
def analyze_ab_test(control, treatment):
    """Analyze A/B test results."""
    # Two-proportion z-test
    n1, n2 = len(control), len(treatment)
    p1, p2 = control.mean(), treatment.mean()
    p_pool = (control.sum() + treatment.sum()) / (n1 + n2)

    se = np.sqrt(p_pool * (1 - p_pool) * (1/n1 + 1/n2))
    z = (p2 - p1) / se
    p_value = 2 * (1 - stats.norm.cdf(abs(z)))

    return {
        'control_rate': p1,
        'treatment_rate': p2,
        'lift': (p2 - p1) / p1,
        'p_value': p_value,
        'significant': p_value < 0.05
    }

Feature Engineering

特征工程

Common Patterns

常见模式

python
import pandas as pd
from sklearn.preprocessing import StandardScaler

def engineer_features(df):
    """Feature engineering pipeline."""
    # Temporal features
    df['hour'] = df['timestamp'].dt.hour
    df['day_of_week'] = df['timestamp'].dt.dayofweek
    df['is_weekend'] = df['day_of_week'].isin([5, 6])

    # Aggregations
    df['user_avg_spend'] = df.groupby('user_id')['amount'].transform('mean')
    df['user_transaction_count'] = df.groupby('user_id')['amount'].transform('count')

    # Ratios
    df['spend_vs_avg'] = df['amount'] / df['user_avg_spend']

    return df
python
import pandas as pd
from sklearn.preprocessing import StandardScaler

def engineer_features(df):
    """Feature engineering pipeline."""
    # Temporal features
    df['hour'] = df['timestamp'].dt.hour
    df['day_of_week'] = df['timestamp'].dt.dayofweek
    df['is_weekend'] = df['day_of_week'].isin([5, 6])

    # Aggregations
    df['user_avg_spend'] = df.groupby('user_id')['amount'].transform('mean')
    df['user_transaction_count'] = df.groupby('user_id')['amount'].transform('count')

    # Ratios
    df['spend_vs_avg'] = df['amount'] / df['user_avg_spend']

    return df

Feature Selection

特征选择

python
from sklearn.feature_selection import mutual_info_classif

def select_features(X, y, k=10):
    """Select top k features by mutual information."""
    mi_scores = mutual_info_classif(X, y)
    top_k = np.argsort(mi_scores)[-k:]
    return X.columns[top_k].tolist()
python
from sklearn.feature_selection import mutual_info_classif

def select_features(X, y, k=10):
    """Select top k features by mutual information."""
    mi_scores = mutual_info_classif(X, y)
    top_k = np.argsort(mi_scores)[-k:]
    return X.columns[top_k].tolist()

Model Evaluation

模型评估

Cross-Validation

交叉验证

python
from sklearn.model_selection import cross_val_score, StratifiedKFold

def evaluate_model(model, X, y):
    """Robust model evaluation."""
    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

    scores = {
        'accuracy': cross_val_score(model, X, y, cv=cv, scoring='accuracy'),
        'precision': cross_val_score(model, X, y, cv=cv, scoring='precision'),
        'recall': cross_val_score(model, X, y, cv=cv, scoring='recall'),
        'auc': cross_val_score(model, X, y, cv=cv, scoring='roc_auc')
    }

    return {k: f"{v.mean():.3f} (+/- {v.std()*2:.3f})" for k, v in scores.items()}
python
from sklearn.model_selection import cross_val_score, StratifiedKFold

def evaluate_model(model, X, y):
    """Robust model evaluation."""
    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

    scores = {
        'accuracy': cross_val_score(model, X, y, cv=cv, scoring='accuracy'),
        'precision': cross_val_score(model, X, y, cv=cv, scoring='precision'),
        'recall': cross_val_score(model, X, y, cv=cv, scoring='recall'),
        'auc': cross_val_score(model, X, y, cv=cv, scoring='roc_auc')
    }

    return {k: f"{v.mean():.3f} (+/- {v.std()*2:.3f})" for k, v in scores.items()}

Causal Inference

因果推断

Propensity Score Matching

倾向得分匹配

python
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors

def propensity_matching(df, treatment_col, features):
    """Match treatment and control using propensity scores."""
    # Estimate propensity scores
    ps_model = LogisticRegression()
    ps_model.fit(df[features], df[treatment_col])
    df['propensity'] = ps_model.predict_proba(df[features])[:, 1]

    # Match nearest neighbors
    treated = df[df[treatment_col] == 1]
    control = df[df[treatment_col] == 0]

    nn = NearestNeighbors(n_neighbors=1)
    nn.fit(control[['propensity']])
    distances, indices = nn.kneighbors(treated[['propensity']])

    return treated, control.iloc[indices.flatten()]
python
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors

def propensity_matching(df, treatment_col, features):
    """Match treatment and control using propensity scores."""
    # Estimate propensity scores
    ps_model = LogisticRegression()
    ps_model.fit(df[features], df[treatment_col])
    df['propensity'] = ps_model.predict_proba(df[features])[:, 1]

    # Match nearest neighbors
    treated = df[df[treatment_col] == 1]
    control = df[df[treatment_col] == 0]

    nn = NearestNeighbors(n_neighbors=1)
    nn.fit(control[['propensity']])
    distances, indices = nn.kneighbors(treated[['propensity']])

    return treated, control.iloc[indices.flatten()]

Best Practices

最佳实践

Analysis Workflow

分析工作流

  1. Define hypothesis clearly
  2. Calculate required sample size
  3. Design experiment (randomization)
  4. Collect data with quality checks
  5. Analyze with appropriate tests
  6. Report with confidence intervals
  1. 清晰定义假设
  2. 计算所需样本量
  3. 设计实验(随机化)
  4. 收集数据并进行质量检查
  5. 使用合适的测试方法进行分析
  6. 结合置信区间撰写报告

Common Pitfalls

常见误区

  • Multiple comparisons without correction
  • Peeking at results before sample size reached
  • Simpson's paradox in aggregations
  • Survivorship bias in cohort analysis
  • Correlation vs causation confusion
  • 未校正的多重比较
  • 未达到样本量就查看结果
  • 聚合分析中的辛普森悖论
  • 群组分析中的幸存者偏差
  • 混淆相关性与因果关系