From afa31e2a943a6f84d19f88e77b04e89397b5836d Mon Sep 17 00:00:00 2001 From: foefl Date: Wed, 16 Apr 2025 11:18:51 +0200 Subject: [PATCH] major overhaul of forecast pipeline --- pyproject.toml | 7 +++--- src/delta_barth/analysis/forecast.py | 36 +++++++++++++++------------- tests/analysis/test_forecast.py | 22 ++++++++++++++--- 3 files changed, 42 insertions(+), 23 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0974385..cf56355 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "delta-barth" -version = "0.5.7dev1" +version = "0.5.7dev2" description = "workflows and pipelines for the Python-based Plugin of Delta Barth's ERP system" authors = [ {name = "Florian Förster", email = "f.foerster@d-opt.com"}, @@ -44,7 +44,8 @@ filterwarnings = [ ] markers = [ "api_con_required: tests require an API connection (deselect with '-m \"not api_con_required\"')", - "new: to test only new tests, usually removed afterwards (deselect with '-m \"not quick\"')", + "new: to test only new tests, usually removed afterwards (deselect with '-m \"not new\"')", + "forecast: main components of forecast pipeline (deselect with '-m \"not forecast\"')" ] log_cli = true @@ -73,7 +74,7 @@ directory = "reports/coverage" [tool.bumpversion] -current_version = "0.5.7dev1" +current_version = "0.5.7dev2" parse = """(?x) (?P0|[1-9]\\d*)\\. (?P0|[1-9]\\d*)\\. diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 1cd3fd9..39bb13f 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -208,17 +208,25 @@ def _process_sales( df_cust["jahr"] = df_cust[DATE_FEAT].dt.year df_cust["monat"] = df_cust[DATE_FEAT].dt.month - current_year = datetime.now().year - current_month = datetime.now().month + monthly_sum_data_only = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index() + + current_year = datetime.datetime.now().year + current_month = datetime.datetime.now().month years = range(df_cust["jahr"].min(), current_year + 1) - old_monthly_sum = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index() - all_month_year_combinations = pd.DataFrame( - [(year, month) for year in years for month in range(1, 13) if (year < current_year or (year == current_year and month <= current_month))], columns=["jahr", "monat"] + [ + (year, month) + for year in years + for month in range(1, 13) + if (year < current_year or (year == current_year and month <= current_month)) + ], + columns=["jahr", "monat"], ) - monthly_sum = pd.merge(all_month_year_combinations, old_monthly_sum, on=["jahr", "monat"], how="left") + monthly_sum = pd.merge( + all_month_year_combinations, monthly_sum_data_only, on=["jahr", "monat"], how="left" + ) monthly_sum[SALES_FEAT] = monthly_sum[SALES_FEAT].fillna(0) monthly_sum[DATE_FEAT] = ( monthly_sum["monat"].astype(str) + "." + monthly_sum["jahr"].astype(str) @@ -256,27 +264,22 @@ def _process_sales( too_few_month_points: bool = True dates = cast(pd.DatetimeIndex, monthly_sum.index) - # print("dates: ", dates) # baseline: 3 years - 36 months starting_date = datetime.datetime.now() - relativedelta(months=36) - target_index, succ = next( - ((i, True) for i, date in enumerate(dates) if date >= starting_date), (len(dates) - 1, False) + target_index, _ = next( + ((i, True) for i, date in enumerate(dates) if date >= starting_date), + (len(dates) - 1, False), ) - # print("start idx: ", target_index, "length dates: ", len(dates)) - for add_year, date_idx in enumerate(range(start_index, -1, -12)): - # print("date_idx: ", date_idx) + for add_year, date_idx in enumerate(range(target_index, -1, -12)): first_date = dates[date_idx] - # print("first date: ", first_date) split_date = dates[-6] train = cast( pd.DataFrame, monthly_sum.loc[first_date:split_date].copy(), # type: ignore ) - # print(train) - # print("Length train: ", len(train)) test = cast( pd.DataFrame, monthly_sum.loc[split_date:].copy(), # type: ignore @@ -286,7 +289,7 @@ def _process_sales( # test set size fixed at 6 --> first iteration: baseline - 6 entries # for each new year 10 new data points (i.e., sales strictly positive) needed - if len(train[train[SALES_FEAT] > 0]) >= 30 + 10 * add_year: + if len(train[train[SALES_FEAT] > 0]) >= (base_num_data_points_months + 10 * add_year): too_few_month_points = False rand = RandomizedSearchCV( @@ -314,7 +317,6 @@ def _process_sales( # --- new: store best_estimator best_estimator = copy.copy(rand.best_estimator_) - # ?? --- new: use best_estimator to calculate future values and store them in forecast if best_estimator is not None: X_future = pd.DataFrame( {"jahr": future_dates.year, "monat": future_dates.month}, index=future_dates diff --git a/tests/analysis/test_forecast.py b/tests/analysis/test_forecast.py index d467ca0..cc2cd39 100644 --- a/tests/analysis/test_forecast.py +++ b/tests/analysis/test_forecast.py @@ -1,4 +1,6 @@ +import datetime from datetime import datetime as Datetime +from pathlib import Path from unittest.mock import patch import numpy as np @@ -255,6 +257,7 @@ def test_preprocess_sales_FailOnTargetFeature( assert pipe.results is None +@pytest.mark.forecast def test_process_sales_Success(sales_data_real_preproc): data = sales_data_real_preproc.copy() pipe = PipeResult(data, STATUS_HANDLER.SUCCESS) @@ -277,6 +280,7 @@ def test_process_sales_Success(sales_data_real_preproc): assert pipe.statistics.xgb_params is not None +@pytest.mark.forecast def test_process_sales_FailTooFewPoints(sales_data_real_preproc): data = sales_data_real_preproc.copy() data = data.iloc[:20, :] @@ -303,6 +307,7 @@ def test_process_sales_FailTooFewPoints(sales_data_real_preproc): assert pipe.statistics.xgb_params is None +@pytest.mark.forecast def test_process_sales_FailTooFewMonthPoints(sales_data_real_preproc): data = sales_data_real_preproc.copy() pipe = PipeResult(data, STATUS_HANDLER.SUCCESS) @@ -329,8 +334,19 @@ def test_process_sales_FailTooFewMonthPoints(sales_data_real_preproc): assert pipe.statistics.xgb_params is None +@pytest.mark.forecast def test_process_sales_FailNoReliableForecast(sales_data_real_preproc): - data = sales_data_real_preproc.copy() + # prepare fake data + df = sales_data_real_preproc.copy() + f_dates = "buchungs_datum" + end = datetime.datetime.now() + start = df[f_dates].max() + fake_dates = pd.date_range(start, end, freq="MS") + fake_data = [(1234, 1014, 1024, 1000, 10, date) for date in fake_dates] + fake_df = pd.DataFrame(fake_data, columns=df.columns) + enhanced_df = pd.concat((df, fake_df), ignore_index=True) + + data = enhanced_df.copy() data["betrag"] = 10000 print(data["betrag"]) data = data.iloc[:20000, :] @@ -340,7 +356,7 @@ def test_process_sales_FailNoReliableForecast(sales_data_real_preproc): def __init__(self, *args, **kwargs) -> None: class Predictor: def predict(self, *args, **kwargs): - return np.array([1, 1, 1, 1]) + return np.array([1, 1, 1, 1], dtype=np.float64) self.best_estimator_ = Predictor() @@ -354,7 +370,7 @@ def test_process_sales_FailNoReliableForecast(sales_data_real_preproc): pipe = fc._process_sales( pipe, min_num_data_points=1, - base_num_data_points_months=-100, + base_num_data_points_months=1, ) assert pipe.status != STATUS_HANDLER.SUCCESS