Skip to content

Metrics Reference

mmm_eval.metrics

Accuracy metrics for MMM evaluation.

Functions

calculate_absolute_percentage_change(baseline_series: pd.Series, comparison_series: pd.Series) -> pd.Series

Calculate the absolute percentage change between two series.

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_absolute_percentage_change(baseline_series: pd.Series, comparison_series: pd.Series) -> pd.Series:
    """Calculate the absolute percentage change between two series."""
    return np.abs((comparison_series - baseline_series) / baseline_series)

calculate_mean_for_singular_values_across_cross_validation_folds(fold_metrics: list[AccuracyMetricResults], metric_name: AccuracyMetricNames) -> float

Calculate the mean of the fold metrics for single values.

Parameters:

Name Type Description Default
fold_metrics list[AccuracyMetricResults]

List of metric result objects

required
metric_name AccuracyMetricNames

Name of the metric attribute

required

Returns:

Type Description
float

Mean value as float

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_mean_for_singular_values_across_cross_validation_folds(
    fold_metrics: list[AccuracyMetricResults],
    metric_name: AccuracyMetricNames,
) -> float:
    """Calculate the mean of the fold metrics for single values.

    Args:
        fold_metrics: List of metric result objects
        metric_name: Name of the metric attribute

    Returns:
        Mean value as float

    """
    metric_attr = metric_name.value
    return np.mean([getattr(fold_metric, metric_attr) for fold_metric in fold_metrics])

calculate_means_for_series_across_cross_validation_folds(folds_of_series: list[pd.Series]) -> pd.Series

Calculate the mean of pandas Series across folds.

Parameters:

Name Type Description Default
folds_of_series list[Series]

List of pandas Series (e.g., ROI series from different folds)

required

Returns:

Type Description
Series

Mean Series with same index as input series

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_means_for_series_across_cross_validation_folds(
    folds_of_series: list[pd.Series],
) -> pd.Series:
    """Calculate the mean of pandas Series across folds.

    Args:
        folds_of_series: List of pandas Series (e.g., ROI series from different folds)

    Returns:
        Mean Series with same index as input series

    """
    return pd.concat(folds_of_series, axis=1).mean(axis=1)

calculate_std_for_singular_values_across_cross_validation_folds(fold_metrics: list[AccuracyMetricResults], metric_name: AccuracyMetricNames) -> float

Calculate the standard deviation of the fold metrics for single values.

Parameters:

Name Type Description Default
fold_metrics list[AccuracyMetricResults]

List of metric result objects

required
metric_name AccuracyMetricNames

Name of the metric attribute

required

Returns:

Type Description
float

Standard deviation value as float

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_std_for_singular_values_across_cross_validation_folds(
    fold_metrics: list[AccuracyMetricResults],
    metric_name: AccuracyMetricNames,
) -> float:
    """Calculate the standard deviation of the fold metrics for single values.

    Args:
        fold_metrics: List of metric result objects
        metric_name: Name of the metric attribute

    Returns:
        Standard deviation value as float

    """
    metric_attr = metric_name.value
    return np.std([getattr(fold_metric, metric_attr) for fold_metric in fold_metrics])

calculate_stds_for_series_across_cross_validation_folds(folds_of_series: list[pd.Series]) -> pd.Series

Calculate the standard deviation of pandas Series across folds.

Parameters:

Name Type Description Default
folds_of_series list[Series]

List of pandas Series (e.g., ROI series from different folds)

required

Returns:

Type Description
Series

Standard deviation Series with same index as input series

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_stds_for_series_across_cross_validation_folds(
    folds_of_series: list[pd.Series],
) -> pd.Series:
    """Calculate the standard deviation of pandas Series across folds.

    Args:
        folds_of_series: List of pandas Series (e.g., ROI series from different folds)

    Returns:
        Standard deviation Series with same index as input series

    """
    return pd.concat(folds_of_series, axis=1).std(axis=1)

Modules

accuracy_functions

Accuracy metrics for MMM evaluation.

Classes
Functions
calculate_absolute_percentage_change(baseline_series: pd.Series, comparison_series: pd.Series) -> pd.Series

Calculate the absolute percentage change between two series.

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_absolute_percentage_change(baseline_series: pd.Series, comparison_series: pd.Series) -> pd.Series:
    """Calculate the absolute percentage change between two series."""
    return np.abs((comparison_series - baseline_series) / baseline_series)
calculate_mean_for_singular_values_across_cross_validation_folds(fold_metrics: list[AccuracyMetricResults], metric_name: AccuracyMetricNames) -> float

Calculate the mean of the fold metrics for single values.

Parameters:

Name Type Description Default
fold_metrics list[AccuracyMetricResults]

List of metric result objects

required
metric_name AccuracyMetricNames

Name of the metric attribute

required

Returns:

Type Description
float

Mean value as float

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_mean_for_singular_values_across_cross_validation_folds(
    fold_metrics: list[AccuracyMetricResults],
    metric_name: AccuracyMetricNames,
) -> float:
    """Calculate the mean of the fold metrics for single values.

    Args:
        fold_metrics: List of metric result objects
        metric_name: Name of the metric attribute

    Returns:
        Mean value as float

    """
    metric_attr = metric_name.value
    return np.mean([getattr(fold_metric, metric_attr) for fold_metric in fold_metrics])
calculate_means_for_series_across_cross_validation_folds(folds_of_series: list[pd.Series]) -> pd.Series

Calculate the mean of pandas Series across folds.

Parameters:

Name Type Description Default
folds_of_series list[Series]

List of pandas Series (e.g., ROI series from different folds)

required

Returns:

Type Description
Series

Mean Series with same index as input series

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_means_for_series_across_cross_validation_folds(
    folds_of_series: list[pd.Series],
) -> pd.Series:
    """Calculate the mean of pandas Series across folds.

    Args:
        folds_of_series: List of pandas Series (e.g., ROI series from different folds)

    Returns:
        Mean Series with same index as input series

    """
    return pd.concat(folds_of_series, axis=1).mean(axis=1)
calculate_std_for_singular_values_across_cross_validation_folds(fold_metrics: list[AccuracyMetricResults], metric_name: AccuracyMetricNames) -> float

Calculate the standard deviation of the fold metrics for single values.

Parameters:

Name Type Description Default
fold_metrics list[AccuracyMetricResults]

List of metric result objects

required
metric_name AccuracyMetricNames

Name of the metric attribute

required

Returns:

Type Description
float

Standard deviation value as float

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_std_for_singular_values_across_cross_validation_folds(
    fold_metrics: list[AccuracyMetricResults],
    metric_name: AccuracyMetricNames,
) -> float:
    """Calculate the standard deviation of the fold metrics for single values.

    Args:
        fold_metrics: List of metric result objects
        metric_name: Name of the metric attribute

    Returns:
        Standard deviation value as float

    """
    metric_attr = metric_name.value
    return np.std([getattr(fold_metric, metric_attr) for fold_metric in fold_metrics])
calculate_stds_for_series_across_cross_validation_folds(folds_of_series: list[pd.Series]) -> pd.Series

Calculate the standard deviation of pandas Series across folds.

Parameters:

Name Type Description Default
folds_of_series list[Series]

List of pandas Series (e.g., ROI series from different folds)

required

Returns:

Type Description
Series

Standard deviation Series with same index as input series

Source code in mmm_eval/metrics/accuracy_functions.py
def calculate_stds_for_series_across_cross_validation_folds(
    folds_of_series: list[pd.Series],
) -> pd.Series:
    """Calculate the standard deviation of pandas Series across folds.

    Args:
        folds_of_series: List of pandas Series (e.g., ROI series from different folds)

    Returns:
        Standard deviation Series with same index as input series

    """
    return pd.concat(folds_of_series, axis=1).std(axis=1)

exceptions

Exceptions for the metrics module.

Classes
InvalidMetricNameException

Bases: ValueError

Exception raised when an invalid metric name is provided.

metric_models

Classes
AccuracyMetricNames

Bases: MetricNamesBase

Define the names of the accuracy metrics.

AccuracyMetricResults

Bases: MetricResults

Define the results of the accuracy metrics.

Functions
populate_object_with_metrics(actual: pd.Series, predicted: pd.Series) -> AccuracyMetricResults classmethod

Populate the object with the calculated metrics.

Parameters:

Name Type Description Default
actual Series

The actual values

required
predicted Series

The predicted values

required

Returns:

Type Description
AccuracyMetricResults

AccuracyMetricResults object with the metrics

Source code in mmm_eval/metrics/metric_models.py
@classmethod
def populate_object_with_metrics(cls, actual: pd.Series, predicted: pd.Series) -> "AccuracyMetricResults":
    """Populate the object with the calculated metrics.

    Args:
        actual: The actual values
        predicted: The predicted values

    Returns:
        AccuracyMetricResults object with the metrics

    """
    return cls(
        mape=mean_absolute_percentage_error(actual, predicted),
        r_squared=r2_score(actual, predicted),
    )
to_df() -> pd.DataFrame

Convert the accuracy metric results to a long DataFrame format.

Source code in mmm_eval/metrics/metric_models.py
def to_df(self) -> pd.DataFrame:
    """Convert the accuracy metric results to a long DataFrame format."""
    df = pd.DataFrame(
        [
            self._create_single_metric_dataframe_row(
                general_metric_name=AccuracyMetricNames.MAPE.value,
                specific_metric_name=AccuracyMetricNames.MAPE.value,
                metric_value=self.mape,
            ),
            self._create_single_metric_dataframe_row(
                general_metric_name=AccuracyMetricNames.R_SQUARED.value,
                specific_metric_name=AccuracyMetricNames.R_SQUARED.value,
                metric_value=self.r_squared,
            ),
        ]
    )
    return self.add_pass_fail_column(df)
CrossValidationMetricNames

Bases: MetricNamesBase

Define the names of the cross-validation metrics.

CrossValidationMetricResults

Bases: MetricResults

Define the results of the cross-validation metrics.

Functions
to_df() -> pd.DataFrame

Convert the cross-validation metric results to a long DataFrame format.

Source code in mmm_eval/metrics/metric_models.py
def to_df(self) -> pd.DataFrame:
    """Convert the cross-validation metric results to a long DataFrame format."""
    df = pd.DataFrame(
        [
            self._create_single_metric_dataframe_row(
                general_metric_name=CrossValidationMetricNames.MEAN_MAPE.value,
                specific_metric_name=CrossValidationMetricNames.MEAN_MAPE.value,
                metric_value=self.mean_mape,
            ),
            self._create_single_metric_dataframe_row(
                general_metric_name=CrossValidationMetricNames.STD_MAPE.value,
                specific_metric_name=CrossValidationMetricNames.STD_MAPE.value,
                metric_value=self.std_mape,
            ),
            self._create_single_metric_dataframe_row(
                general_metric_name=CrossValidationMetricNames.MEAN_R_SQUARED.value,
                specific_metric_name=CrossValidationMetricNames.MEAN_R_SQUARED.value,
                metric_value=self.mean_r_squared,
            ),
        ]
    )
    return self.add_pass_fail_column(df)
MetricNamesBase

Bases: Enum

Base class for metric name enums.

Functions
to_list() -> list[str] classmethod

Convert the enum to a list of strings.

Source code in mmm_eval/metrics/metric_models.py
@classmethod
def to_list(cls) -> list[str]:
    """Convert the enum to a list of strings."""
    return [member.value for member in cls]
MetricResults

Bases: BaseModel

Define the results of the metrics.

Functions
add_pass_fail_column(df: pd.DataFrame) -> pd.DataFrame

Add a pass/fail column to the DataFrame based on metric thresholds.

Parameters:

Name Type Description Default
df DataFrame

DataFrame with general_metric_name and metric_value columns

required

Returns:

Type Description
DataFrame

DataFrame with additional metric_pass column

Source code in mmm_eval/metrics/metric_models.py
def add_pass_fail_column(self, df: pd.DataFrame) -> pd.DataFrame:
    """Add a pass/fail column to the DataFrame based on metric thresholds.

    Args:
        df: DataFrame with general_metric_name and metric_value columns

    Returns:
        DataFrame with additional metric_pass column

    """
    df_copy = df.copy()
    df_copy[TestResultDFAttributes.METRIC_PASS.value] = df_copy.apply(
        lambda row: self._check_metric_threshold(
            row[TestResultDFAttributes.GENERAL_METRIC_NAME.value], row[TestResultDFAttributes.METRIC_VALUE.value]
        ),
        axis=1,
    )
    return df_copy
to_df() -> pd.DataFrame

Convert the class of test results to a flat DataFrame format.

Source code in mmm_eval/metrics/metric_models.py
def to_df(self) -> pd.DataFrame:
    """Convert the class of test results to a flat DataFrame format."""
    raise NotImplementedError("This method should be implemented by the subclass.")
to_dict() -> dict[str, Any]

Convert the class of test results to dictionary format.

Source code in mmm_eval/metrics/metric_models.py
def to_dict(self) -> dict[str, Any]:
    """Convert the class of test results to dictionary format."""
    return self.model_dump()
PerturbationMetricNames

Bases: MetricNamesBase

Define the names of the perturbation metrics.

PerturbationMetricResults

Bases: MetricResults

Define the results of the perturbation metrics.

Functions
to_df() -> pd.DataFrame

Convert the perturbation metric results to a long DataFrame format.

Source code in mmm_eval/metrics/metric_models.py
def to_df(self) -> pd.DataFrame:
    """Convert the perturbation metric results to a long DataFrame format."""
    df = pd.DataFrame(
        self._create_channel_based_metric_dataframe_rows(
            channel_series=self.percentage_change_for_each_channel,
            metric_name=PerturbationMetricNames.PERCENTAGE_CHANGE,
        )
    )
    return self.add_pass_fail_column(df)
RefreshStabilityMetricNames

Bases: MetricNamesBase

Define the names of the stability metrics.

RefreshStabilityMetricResults

Bases: MetricResults

Define the results of the refresh stability metrics.

Functions
to_df() -> pd.DataFrame

Convert the refresh stability metric results to a long DataFrame format.

Source code in mmm_eval/metrics/metric_models.py
def to_df(self) -> pd.DataFrame:
    """Convert the refresh stability metric results to a long DataFrame format."""
    rows = []

    # Add mean and std percentage change for each channel
    rows.extend(
        self._create_channel_based_metric_dataframe_rows(
            channel_series=self.mean_percentage_change_for_each_channel,
            metric_name=RefreshStabilityMetricNames.MEAN_PERCENTAGE_CHANGE,
        )
    )
    rows.extend(
        self._create_channel_based_metric_dataframe_rows(
            channel_series=self.std_percentage_change_for_each_channel,
            metric_name=RefreshStabilityMetricNames.STD_PERCENTAGE_CHANGE,
        )
    )

    df = pd.DataFrame(rows)
    return self.add_pass_fail_column(df)
TestResultDFAttributes

Bases: MetricNamesBase

Define the attributes of the test result DataFrame.

threshold_constants

Classes
AccuracyThresholdConstants

Constants for the accuracy threshold.

CrossValidationThresholdConstants

Constants for the cross-validation threshold.

PerturbationThresholdConstants

Constants for the perturbation threshold.

RefreshStabilityThresholdConstants

Constants for the refresh stability threshold.