Unverified Commit 0f09420d authored by Claude's avatar Claude
Browse files

Fix MAPE calculation to use correct denominator

- Changed calculate_mape to divide by valid_count (non-zero actuals) instead of total count
- Added proper validation checks for empty/mismatched arrays
- Made accuracy calculation more explicit with clear loop
- Fixed bug where MAPE was incorrectly calculated when some actual values were zero
parent 4ffedd7c
Loading
Loading
Loading
Loading
+21 −8
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@ def calculate_mape(actual_values, predicted_values):
    Calculate Mean Absolute Percentage Error (MAPE)

    MAPE = (1/n) * Σ(|actual - predicted| / |actual|) * 100
    where n is the count of non-zero actual values

    Args:
        actual_values: List of actual values
@@ -52,17 +53,25 @@ def calculate_mape(actual_values, predicted_values):
    Returns:
        MAPE as percentage
    """
    if len(actual_values) == 0 or len(predicted_values) == 0:
    if not actual_values or not predicted_values:
        return 0.0

    if len(actual_values) != len(predicted_values):
        return 0.0

    n = len(actual_values)
    total_percentage_error = 0.0
    valid_count = 0

    for actual, predicted in zip(actual_values, predicted_values):
        if actual != 0:
            total_percentage_error += abs(actual - predicted) / abs(actual)
        if actual != 0:  # Only include non-zero actuals
            percentage_error = abs(actual - predicted) / abs(actual)
            total_percentage_error += percentage_error
            valid_count += 1

    if valid_count == 0:
        return 0.0

    mape = (total_percentage_error / n) * 100
    mape = (total_percentage_error / valid_count) * 100
    return mape


@@ -79,15 +88,19 @@ def calculate_accuracy(actual_values, predicted_values):
    Returns:
        Accuracy as percentage
    """
    if len(actual_values) == 0 or len(predicted_values) == 0:
    if not actual_values or not predicted_values:
        return 0.0

    if len(actual_values) != len(predicted_values):
        return 0.0

    total_actual = sum(actual_values)
    if total_actual == 0:
        return 0.0

    total_absolute_error = sum(abs(actual - predicted)
                               for actual, predicted in zip(actual_values, predicted_values))
    total_absolute_error = 0.0
    for actual, predicted in zip(actual_values, predicted_values):
        total_absolute_error += abs(actual - predicted)

    accuracy = (1 - (total_absolute_error / total_actual)) * 100
    return accuracy