Unverified Commit 4ffedd7c authored by Claude's avatar Claude
Browse files

Implement proper MAPE and accuracy calculation methods

- Created calculate_mape() function using proper MAPE formula:
  MAPE = (1/n) * Σ(|actual - predicted| / |actual|) * 100

- Created calculate_accuracy() function using total error ratio:
  Accuracy = (1 - Σ|actual - predicted| / Σactual) * 100

- Updated calculate_results() to store raw actual and predicted values
- Use proper calculation methods instead of shortcuts
- Updated template to accurately describe the metrics
parent b34b8ff6
Loading
Loading
Loading
Loading
+71 −18
Original line number Diff line number Diff line
@@ -39,6 +39,60 @@ def load_test_data():
        return json.load(f)


def calculate_mape(actual_values, predicted_values):
    """
    Calculate Mean Absolute Percentage Error (MAPE)

    MAPE = (1/n) * Σ(|actual - predicted| / |actual|) * 100

    Args:
        actual_values: List of actual values
        predicted_values: List of predicted values

    Returns:
        MAPE as percentage
    """
    if len(actual_values) == 0 or len(predicted_values) == 0:
        return 0.0

    n = len(actual_values)
    total_percentage_error = 0.0

    for actual, predicted in zip(actual_values, predicted_values):
        if actual != 0:
            total_percentage_error += abs(actual - predicted) / abs(actual)

    mape = (total_percentage_error / n) * 100
    return mape


def calculate_accuracy(actual_values, predicted_values):
    """
    Calculate prediction accuracy as the complement of total error ratio

    Accuracy = (1 - Σ|actual - predicted| / Σactual) * 100

    Args:
        actual_values: List of actual values
        predicted_values: List of predicted values

    Returns:
        Accuracy as percentage
    """
    if len(actual_values) == 0 or len(predicted_values) == 0:
        return 0.0

    total_actual = sum(actual_values)
    if total_actual == 0:
        return 0.0

    total_absolute_error = sum(abs(actual - predicted)
                               for actual, predicted in zip(actual_values, predicted_values))

    accuracy = (1 - (total_absolute_error / total_actual)) * 100
    return accuracy


def calculate_results(predictions, test_data):
    """
    Calculate financial results comparing predictions to actual data
@@ -161,7 +215,8 @@ def calculate_results(predictions, test_data):
                    'total_stockout': 0,
                    'total_revenue': 0,
                    'total_profit': 0,
                    'errors': []
                    'actual_values': [],
                    'predicted_values': []
                }

            perf = results['watch_performance'][watch_id]
@@ -171,7 +226,8 @@ def calculate_results(predictions, test_data):
            perf['total_stockout'] += stockout
            perf['total_revenue'] += revenue
            perf['total_profit'] += profit
            perf['errors'].append(error_pct)
            perf['actual_values'].append(actual_demand)
            perf['predicted_values'].append(predicted_demand)

        results['monthly_comparison'].append(month_comparison)

@@ -179,27 +235,24 @@ def calculate_results(predictions, test_data):
    for key in results['financial_summary']:
        results['financial_summary'][key] = round(results['financial_summary'][key], 2)

    # Calculate overall prediction accuracy
    all_errors = []
    accurate_predictions = 0  # Count predictions within 20% of actual
    total_predictions = 0
    # Calculate prediction metrics using proper formulas
    all_actual_values = []
    all_predicted_values = []

    for watch_id, perf in results['watch_performance'].items():
        # MAPE for this watch
        avg_error = sum(perf['errors']) / len(perf['errors'])
        perf['mape'] = round(avg_error, 1)
        # Calculate MAPE for this watch using the proper formula
        perf['mape'] = round(calculate_mape(perf['actual_values'], perf['predicted_values']), 1)

        # Prediction accuracy: % of predictions within 20% of actual
        accurate_count = sum(1 for err in perf['errors'] if err <= 20)
        perf['accuracy'] = round((accurate_count / len(perf['errors']) * 100), 1)
        # Calculate accuracy for this watch using the proper formula
        perf['accuracy'] = round(calculate_accuracy(perf['actual_values'], perf['predicted_values']), 1)

        all_errors.extend(perf['errors'])
        accurate_predictions += accurate_count
        total_predictions += len(perf['errors'])
        # Accumulate for overall metrics
        all_actual_values.extend(perf['actual_values'])
        all_predicted_values.extend(perf['predicted_values'])

    # Overall metrics
    overall_mape = sum(all_errors) / len(all_errors)
    overall_accuracy = (accurate_predictions / total_predictions * 100) if total_predictions > 0 else 0
    # Calculate overall metrics using proper formulas
    overall_mape = calculate_mape(all_actual_values, all_predicted_values)
    overall_accuracy = calculate_accuracy(all_actual_values, all_predicted_values)

    results['prediction_accuracy'] = {
        'mape': round(overall_mape, 1),
+4 −4
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@
            <div class="column is-6">
                <div class="box has-text-centered" style="background: linear-gradient(135deg, #764ba2 0%, #667eea 100%); color: white;">
                    <h3 class="title is-4" style="color: white;">Prediction Accuracy</h3>
                    <p class="subtitle is-6" style="color: white; margin-bottom: 0.5rem;">% within 20% of actual</p>
                    <p class="subtitle is-6" style="color: white; margin-bottom: 0.5rem;">1 - (Total Error / Total Actual)</p>
                    <div class="score-display">{{ results.prediction_accuracy.accuracy }}%</div>
                    <p class="subtitle is-6" style="color: white; margin-top: 0.5rem;">Higher is better</p>
                </div>
@@ -204,17 +204,17 @@
            <div class="content">
                {% if results.prediction_accuracy.accuracy >= 90 %}
                <div class="notification is-success">
                    <p><strong>Excellent work!</strong> Your predictions were highly accurate ({{ results.prediction_accuracy.accuracy }}% within 20% of actual, MAPE: {{ results.prediction_accuracy.mape }}%).</p>
                    <p><strong>Excellent work!</strong> Your predictions were highly accurate ({{ results.prediction_accuracy.accuracy }}% accuracy, {{ results.prediction_accuracy.mape }}% MAPE).</p>
                    <p>You demonstrated strong understanding of demand patterns and seasonality.</p>
                </div>
                {% elif results.prediction_accuracy.accuracy >= 75 %}
                <div class="notification is-info">
                    <p><strong>Good job!</strong> Your predictions were reasonably accurate ({{ results.prediction_accuracy.accuracy }}% within 20% of actual, MAPE: {{ results.prediction_accuracy.mape }}%).</p>
                    <p><strong>Good job!</strong> Your predictions were reasonably accurate ({{ results.prediction_accuracy.accuracy }}% accuracy, {{ results.prediction_accuracy.mape }}% MAPE).</p>
                    <p>There's room for improvement - review the monthly patterns to identify where you can refine your forecasts.</p>
                </div>
                {% else %}
                <div class="notification is-warning">
                    <p><strong>Keep learning!</strong> Your predictions had significant variance from actual demand ({{ results.prediction_accuracy.accuracy }}% within 20% of actual, MAPE: {{ results.prediction_accuracy.mape }}%).</p>
                    <p><strong>Keep learning!</strong> Your predictions had significant variance from actual demand ({{ results.prediction_accuracy.accuracy }}% accuracy, {{ results.prediction_accuracy.mape }}% MAPE).</p>
                    <p>Consider: Did you account for seasonal patterns? Did you follow the growth trends?</p>
                </div>
                {% endif %}