From 2055ee5c8b4ce3e5d6a93d5f7088eac5cb10300a Mon Sep 17 00:00:00 2001 From: foefl Date: Thu, 10 Apr 2025 14:07:31 +0200 Subject: [PATCH 01/15] remove unneeded print statement --- src/delta_barth/analysis/forecast.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index aa8e59c..0126dc8 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -274,7 +274,6 @@ def _process_sales( best_score_mae = error best_score_r2 = cast(float, r2_score(y_test, y_pred)) best_start_year = start_year - print("executed") forecast = test.copy() forecast.loc[:, "vorhersage"] = y_pred -- 2.34.1 From 14c4efedf717227dbf0b1d1c2295d7084521c596 Mon Sep 17 00:00:00 2001 From: foefl Date: Thu, 10 Apr 2025 14:39:41 +0200 Subject: [PATCH 02/15] add hints for changes --- src/delta_barth/analysis/forecast.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 0126dc8..60b619c 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -238,6 +238,7 @@ def _process_sales( too_few_month_points: bool = True forecast: pd.DataFrame | None = None + # change sliding window to monthly basis for start_year in range(current_year - 4, first_year - 1, -1): train = cast( pd.DataFrame, @@ -274,9 +275,10 @@ def _process_sales( best_score_mae = error best_score_r2 = cast(float, r2_score(y_test, y_pred)) best_start_year = start_year + # overwrite with pre-defined prognosis DF forecast = test.copy() forecast.loc[:, "vorhersage"] = y_pred - + # remove if forecast is not None: forecast = forecast.drop(SALES_FEAT, axis=1).reset_index(drop=True) best_score_mae = best_score_mae if not math.isinf(best_score_mae) else None -- 2.34.1 From 4ef8fc5e9d54b2296306b5abf9bd6ef37de408df Mon Sep 17 00:00:00 2001 From: frasu Date: Thu, 10 Apr 2025 14:58:01 +0000 Subject: [PATCH 03/15] src/delta_barth/analysis/forecast.py aktualisiert --- src/delta_barth/analysis/forecast.py | 36 +++++++++++++++++++--------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 60b619c..5505613 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -231,6 +231,8 @@ def _process_sales( "early_stopping_rounds": [20, 50], } + # --- new: best_estimator + best_estimator: BestEstimatorXGBRegressor | None = None best_params: BestParametersXGBRegressor | None = None best_score_mae: float | None = float("inf") best_score_r2: float | None = None @@ -238,20 +240,29 @@ def _process_sales( too_few_month_points: bool = True forecast: pd.DataFrame | None = None - # change sliding window to monthly basis - for start_year in range(current_year - 4, first_year - 1, -1): + # --- new: dates und forecast + #last_date = pd.to_datetime(monthly_sum.index[-1], format="%m.%Y") + last_date = pd.to_datetime(datetime.now().strftime("%m.%Y"), format="%m.%Y") + future_dates = pd.date_range(start=last_date + pd.DateOffset(months=1), periods=anzahl, freq="MS") + forecast = pd.DataFrame({"Datum": future_dates.strftime("%m.%Y")}).set_index("Datum") + + dates = monthly_sum.index + for index, i in enumerate(range(len(dates)-36, -1, -12)): + current_date = dates[i] + split_date = dates[-anzahl] + train = cast( pd.DataFrame, - monthly_sum[monthly_sum.index.year >= start_year].iloc[:-5].copy(), # type: ignore + monthly_sum.loc[current_date:split_date].copy(), # type: ignore ) test = cast( pd.DataFrame, - monthly_sum[monthly_sum.index.year >= start_year].iloc[-5:].copy(), # type: ignore + monthly_sum.loc[split_date:].copy(), # type: ignore ) X_train, X_test = train[features], test[features] y_train, y_test = train[target], test[target] - if len(train) >= (base_num_data_points_months + 10 * (current_year - 4 - start_year)): + if len(train) >= 30 + 10 * index: too_few_month_points = False rand = RandomizedSearchCV( @@ -271,16 +282,19 @@ def _process_sales( if len(np.unique(y_pred)) != 1: error = cast(float, mean_absolute_error(y_test, y_pred)) if error < best_score_mae: + # --- new: store best_estimator + best_estimator = cast(BestEstimatorXGBRegressor, rand.best_estimator_) best_params = cast(BestParametersXGBRegressor, rand.best_params_) best_score_mae = error best_score_r2 = cast(float, r2_score(y_test, y_pred)) best_start_year = start_year - # overwrite with pre-defined prognosis DF - forecast = test.copy() - forecast.loc[:, "vorhersage"] = y_pred - # remove - if forecast is not None: - forecast = forecast.drop(SALES_FEAT, axis=1).reset_index(drop=True) + + # --- new: use best_estimator to calculate future values and store them in forecast + if best_estimator is not None: + X_future = pd.DataFrame({"jahr": future_dates.year, "monat": future_dates.month}, index=future_dates) + y_future = rand.best_estimator_.predict(X_future) + forecast["vorhersage"] = y_future + best_score_mae = best_score_mae if not math.isinf(best_score_mae) else None if too_few_month_points: -- 2.34.1 From 29343262587a9c7f9648e4d9aced1cb4072943f8 Mon Sep 17 00:00:00 2001 From: frasu Date: Thu, 10 Apr 2025 17:10:56 +0000 Subject: [PATCH 04/15] src/delta_barth/analysis/forecast.py aktualisiert --- src/delta_barth/analysis/forecast.py | 43 +++++++++++++++++----------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 5505613..c32e078 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -1,6 +1,8 @@ from __future__ import annotations import datetime +# --- new: for calculating timedelta +from dateutil.relativedelta import relativedelta import math from collections.abc import Mapping, Set from dataclasses import asdict @@ -215,8 +217,16 @@ def _process_sales( features = ["jahr", "monat"] target = SALES_FEAT - current_year = datetime.datetime.now().year - first_year = cast(int, df_cust["jahr"].min()) + + # --- new: not necessary anymore + #current_year = datetime.datetime.now().year + #first_year = cast(int, df_cust["jahr"].min()) + + # --- new: dates und forecast + #last_date = pd.to_datetime(monthly_sum.index[-1], format="%m.%Y") + last_date = pd.to_datetime(datetime.now().strftime("%m.%Y"), format="%m.%Y") + future_dates = pd.date_range(start=last_date + pd.DateOffset(months=1), periods=6, freq="MS") + forecast = pd.DataFrame({"datum": future_dates.strftime("%m.%Y")}).set_index("datum") # Randomized Search kfold = KFold(n_splits=5, shuffle=True) @@ -231,8 +241,9 @@ def _process_sales( "early_stopping_rounds": [20, 50], } - # --- new: best_estimator - best_estimator: BestEstimatorXGBRegressor | None = None + # --- new: best_estimator (internal usage) + best_estimator = None + best_params: BestParametersXGBRegressor | None = None best_score_mae: float | None = float("inf") best_score_r2: float | None = None @@ -240,20 +251,19 @@ def _process_sales( too_few_month_points: bool = True forecast: pd.DataFrame | None = None - # --- new: dates und forecast - #last_date = pd.to_datetime(monthly_sum.index[-1], format="%m.%Y") - last_date = pd.to_datetime(datetime.now().strftime("%m.%Y"), format="%m.%Y") - future_dates = pd.date_range(start=last_date + pd.DateOffset(months=1), periods=anzahl, freq="MS") - forecast = pd.DataFrame({"Datum": future_dates.strftime("%m.%Y")}).set_index("Datum") - dates = monthly_sum.index - for index, i in enumerate(range(len(dates)-36, -1, -12)): - current_date = dates[i] - split_date = dates[-anzahl] + # --- new: use monthly basis for time windows + starting_date = datetime.now() - relativedelta(months=36) + #starting_date = dates.max() - relativedelta(months=36) + start_index = next((i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1) + + for index, i in enumerate(range(start_index, -1, -12)): + start_date = dates[i] + split_date = dates[-6] train = cast( pd.DataFrame, - monthly_sum.loc[current_date:split_date].copy(), # type: ignore + monthly_sum.loc[start_date:split_date].copy(), # type: ignore ) test = cast( pd.DataFrame, @@ -287,12 +297,13 @@ def _process_sales( best_params = cast(BestParametersXGBRegressor, rand.best_params_) best_score_mae = error best_score_r2 = cast(float, r2_score(y_test, y_pred)) - best_start_year = start_year + # --- new: use store start_date in best_start_year + best_start_year = start_date # --- new: use best_estimator to calculate future values and store them in forecast if best_estimator is not None: X_future = pd.DataFrame({"jahr": future_dates.year, "monat": future_dates.month}, index=future_dates) - y_future = rand.best_estimator_.predict(X_future) + y_future = best_estimator.predict(X_future) forecast["vorhersage"] = y_future best_score_mae = best_score_mae if not math.isinf(best_score_mae) else None -- 2.34.1 From f49744ca45de600123123db581e12c48c1f60283 Mon Sep 17 00:00:00 2001 From: frasu Date: Thu, 10 Apr 2025 17:33:00 +0000 Subject: [PATCH 05/15] src/delta_barth/analysis/forecast.py aktualisiert --- src/delta_barth/analysis/forecast.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index c32e078..bc402bc 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -218,12 +218,7 @@ def _process_sales( features = ["jahr", "monat"] target = SALES_FEAT - # --- new: not necessary anymore - #current_year = datetime.datetime.now().year - #first_year = cast(int, df_cust["jahr"].min()) - - # --- new: dates und forecast - #last_date = pd.to_datetime(monthly_sum.index[-1], format="%m.%Y") + # --- new: dates and forecast last_date = pd.to_datetime(datetime.now().strftime("%m.%Y"), format="%m.%Y") future_dates = pd.date_range(start=last_date + pd.DateOffset(months=1), periods=6, freq="MS") forecast = pd.DataFrame({"datum": future_dates.strftime("%m.%Y")}).set_index("datum") @@ -241,7 +236,7 @@ def _process_sales( "early_stopping_rounds": [20, 50], } - # --- new: best_estimator (internal usage) + # --- new: best_estimator (internal usage only) best_estimator = None best_params: BestParametersXGBRegressor | None = None @@ -258,12 +253,12 @@ def _process_sales( start_index = next((i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1) for index, i in enumerate(range(start_index, -1, -12)): - start_date = dates[i] + first_date = dates[i] split_date = dates[-6] train = cast( pd.DataFrame, - monthly_sum.loc[start_date:split_date].copy(), # type: ignore + monthly_sum.loc[first_date:split_date].copy(), # type: ignore ) test = cast( pd.DataFrame, @@ -272,6 +267,7 @@ def _process_sales( X_train, X_test = train[features], test[features] y_train, y_test = train[target], test[target] + # --- new: adapted condition to fit new for-loop if len(train) >= 30 + 10 * index: too_few_month_points = False @@ -292,13 +288,13 @@ def _process_sales( if len(np.unique(y_pred)) != 1: error = cast(float, mean_absolute_error(y_test, y_pred)) if error < best_score_mae: - # --- new: store best_estimator - best_estimator = cast(BestEstimatorXGBRegressor, rand.best_estimator_) best_params = cast(BestParametersXGBRegressor, rand.best_params_) best_score_mae = error best_score_r2 = cast(float, r2_score(y_test, y_pred)) - # --- new: use store start_date in best_start_year - best_start_year = start_date + # --- new: use first_date for best_start_year + best_start_year = first_date.year + # --- new: store best_estimator + best_estimator = rand.best_estimator_ # --- new: use best_estimator to calculate future values and store them in forecast if best_estimator is not None: -- 2.34.1 From 5d1f5199d318d00c1cf3fd304dda1737be2ee0aa Mon Sep 17 00:00:00 2001 From: foefl Date: Fri, 11 Apr 2025 10:37:49 +0200 Subject: [PATCH 06/15] prototype ideas --- src/delta_barth/analysis/forecast.py | 76 ++++++++++++++++++---------- src/delta_barth/constants.py | 4 +- 2 files changed, 50 insertions(+), 30 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index bc402bc..55e42ac 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -1,8 +1,7 @@ from __future__ import annotations +import copy import datetime -# --- new: for calculating timedelta -from dateutil.relativedelta import relativedelta import math from collections.abc import Mapping, Set from dataclasses import asdict @@ -13,6 +12,9 @@ import numpy as np import pandas as pd import scipy.stats import sqlalchemy as sql + +# --- new: for calculating timedelta +from dateutil.relativedelta import relativedelta from sklearn.metrics import mean_absolute_error, r2_score from sklearn.model_selection import KFold, RandomizedSearchCV from xgboost import XGBRegressor @@ -185,16 +187,14 @@ def _process_sales( PipeResult _description_ """ - # cust_data: CustomerDataSalesForecast = CustomerDataSalesForecast() - # filter data data = pipe.data assert data is not None, "processing not existing pipe result" DATE_FEAT: Final[str] = "buchungs_datum" SALES_FEAT: Final[str] = "betrag" - df_firma = data[(data["betrag"] > 0)] - df_cust = df_firma.copy() + df_filter = data[(data["betrag"] > 0)] + df_cust = df_filter.copy() df_cust = df_cust.sort_values(by=DATE_FEAT).reset_index() len_ds = len(df_cust) @@ -218,9 +218,11 @@ def _process_sales( features = ["jahr", "monat"] target = SALES_FEAT - # --- new: dates and forecast - last_date = pd.to_datetime(datetime.now().strftime("%m.%Y"), format="%m.%Y") - future_dates = pd.date_range(start=last_date + pd.DateOffset(months=1), periods=6, freq="MS") + # ?? --- new: dates and forecast + last_date = pd.to_datetime(datetime.datetime.now().strftime("%m.%Y"), format="%m.%Y") + future_dates = pd.date_range( + start=last_date + pd.DateOffset(months=1), periods=6, freq="MS" + ) forecast = pd.DataFrame({"datum": future_dates.strftime("%m.%Y")}).set_index("datum") # Randomized Search @@ -236,30 +238,42 @@ def _process_sales( "early_stopping_rounds": [20, 50], } - # --- new: best_estimator (internal usage only) + # ?? --- new: best_estimator (internal usage only) best_estimator = None - best_params: BestParametersXGBRegressor | None = None best_score_mae: float | None = float("inf") best_score_r2: float | None = None best_start_year: int | None = None too_few_month_points: bool = True - forecast: pd.DataFrame | None = None - + # forecast: pd.DataFrame | None = None + # TODO: write routine to pad missing values in datetime row + # TODO problem: continuous timeline expected, but values can be empty for multiple months + # TODO: therefore, stepping with fixed value n does not result in timedelta of n episodes + # Option A: pad data frame with zero values --> could impede forecast algorithm + # Option B: calculate next index based on timedelta dates = monthly_sum.index - # --- new: use monthly basis for time windows - starting_date = datetime.now() - relativedelta(months=36) - #starting_date = dates.max() - relativedelta(months=36) - start_index = next((i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1) + # print("dates: ", dates) + # ?? --- new: use monthly basis for time windows + # baseline: 3 years - 36 months + starting_date = datetime.datetime.now() - relativedelta(months=12) + # starting_date = dates.max() - relativedelta(months=36) + start_index = next( + (i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1 + ) + print("start idx: ", start_index, "length dates: ", len(dates)) - for index, i in enumerate(range(start_index, -1, -12)): - first_date = dates[i] + for add_year, date_idx in enumerate(range(start_index, -1, -12)): + print("date_idx: ", date_idx) + first_date = dates[date_idx] + print("first date: ", first_date) split_date = dates[-6] train = cast( pd.DataFrame, monthly_sum.loc[first_date:split_date].copy(), # type: ignore ) + print(train) + print("Length train: ", len(train)) test = cast( pd.DataFrame, monthly_sum.loc[split_date:].copy(), # type: ignore @@ -267,8 +281,10 @@ def _process_sales( X_train, X_test = train[features], test[features] y_train, y_test = train[target], test[target] - # --- new: adapted condition to fit new for-loop - if len(train) >= 30 + 10 * index: + # ?? --- new: adapted condition to fit new for-loop + # test set size fixed at 6 --> first iteration: baseline - 6 entries + # for each new year 10 new data points needed + if len(train) >= 30 + 10 * add_year: too_few_month_points = False rand = RandomizedSearchCV( @@ -294,14 +310,16 @@ def _process_sales( # --- new: use first_date for best_start_year best_start_year = first_date.year # --- new: store best_estimator - best_estimator = rand.best_estimator_ + best_estimator = copy.copy(rand.best_estimator_) - # --- new: use best_estimator to calculate future values and store them in forecast + # ?? --- new: use best_estimator to calculate future values and store them in forecast if best_estimator is not None: - X_future = pd.DataFrame({"jahr": future_dates.year, "monat": future_dates.month}, index=future_dates) - y_future = best_estimator.predict(X_future) - forecast["vorhersage"] = y_future - + X_future = pd.DataFrame( + {"jahr": future_dates.year, "monat": future_dates.month}, index=future_dates + ) + y_future = best_estimator.predict(X_future) # type: ignore + forecast["vorhersage"] = y_future + best_score_mae = best_score_mae if not math.isinf(best_score_mae) else None if too_few_month_points: @@ -317,7 +335,9 @@ def _process_sales( pipe.stats(stats) return pipe - assert forecast is not None, "forecast is None, but was attempted to be returned" + assert "vorhersage" in forecast.columns, ( + "forecast does not contain prognosis values, but was attempted to be returned" + ) status = STATUS_HANDLER.SUCCESS pipe.success(forecast, status) stats = SalesForecastStatistics( diff --git a/src/delta_barth/constants.py b/src/delta_barth/constants.py index 45c74ba..5f163ae 100644 --- a/src/delta_barth/constants.py +++ b/src/delta_barth/constants.py @@ -17,7 +17,7 @@ DUMMY_DATA_PATH: Final[Path] = dummy_data_pth # ** logging ENABLE_LOGGING: Final[bool] = True LOGGING_TO_FILE: Final[bool] = True -LOGGING_TO_STDERR: Final[bool] = True +LOGGING_TO_STDERR: Final[bool] = False LOG_FILENAME: Final[str] = "dopt-delbar.log" # ** databases @@ -40,7 +40,7 @@ class KnownDelBarApiErrorCodes(enum.Enum): # ** API -API_CON_TIMEOUT: Final[float] = 5.0 # secs to response +API_CON_TIMEOUT: Final[float] = 10.0 # secs to response # ** API response parsing # ** column mapping [API-Response --> Target-Features] COL_MAP_SALES_PROGNOSIS: Final[DualDict[str, str]] = DualDict( -- 2.34.1 From 44846d03e07c27b976cfe3153303852de15153dd Mon Sep 17 00:00:00 2001 From: foefl Date: Fri, 11 Apr 2025 12:24:36 +0200 Subject: [PATCH 07/15] revert change made to relativedelta --- src/delta_barth/analysis/forecast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 55e42ac..21bb096 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -255,7 +255,7 @@ def _process_sales( # print("dates: ", dates) # ?? --- new: use monthly basis for time windows # baseline: 3 years - 36 months - starting_date = datetime.datetime.now() - relativedelta(months=12) + starting_date = datetime.datetime.now() - relativedelta(months=36) # starting_date = dates.max() - relativedelta(months=36) start_index = next( (i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1 -- 2.34.1 From 104adaf2c5844d21f6bc81dd4964675481e6dbef Mon Sep 17 00:00:00 2001 From: foefl Date: Fri, 11 Apr 2025 12:44:20 +0200 Subject: [PATCH 08/15] adapt forecast dataframe to be compatible with pipeline output --- src/delta_barth/analysis/forecast.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 21bb096..6853e37 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -223,7 +223,7 @@ def _process_sales( future_dates = pd.date_range( start=last_date + pd.DateOffset(months=1), periods=6, freq="MS" ) - forecast = pd.DataFrame({"datum": future_dates.strftime("%m.%Y")}).set_index("datum") + forecast = pd.DataFrame({"datum": future_dates}).set_index("datum") # Randomized Search kfold = KFold(n_splits=5, shuffle=True) @@ -255,8 +255,8 @@ def _process_sales( # print("dates: ", dates) # ?? --- new: use monthly basis for time windows # baseline: 3 years - 36 months - starting_date = datetime.datetime.now() - relativedelta(months=36) - # starting_date = dates.max() - relativedelta(months=36) + # starting_date = datetime.datetime.now() - relativedelta(months=36) + starting_date = dates.max() - relativedelta(months=36) start_index = next( (i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1 ) @@ -319,6 +319,9 @@ def _process_sales( ) y_future = best_estimator.predict(X_future) # type: ignore forecast["vorhersage"] = y_future + forecast["jahr"] = forecast.index.year # type: ignore + forecast["monat"] = forecast.index.month # type: ignore + forecast = forecast.reset_index(drop=True) best_score_mae = best_score_mae if not math.isinf(best_score_mae) else None -- 2.34.1 From 4427c98a4ee38be10092941a04b7430fb657fd20 Mon Sep 17 00:00:00 2001 From: foefl Date: Fri, 11 Apr 2025 13:58:50 +0200 Subject: [PATCH 09/15] reset default time window starting in current time --- src/delta_barth/analysis/forecast.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 6853e37..ff16175 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -255,8 +255,8 @@ def _process_sales( # print("dates: ", dates) # ?? --- new: use monthly basis for time windows # baseline: 3 years - 36 months - # starting_date = datetime.datetime.now() - relativedelta(months=36) - starting_date = dates.max() - relativedelta(months=36) + starting_date = datetime.datetime.now() - relativedelta(months=36) + # starting_date = dates.max() - relativedelta(months=36) start_index = next( (i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1 ) -- 2.34.1 From 2f3d51de0ca8d4725fc9ff347e07f10689fce3e0 Mon Sep 17 00:00:00 2001 From: foefl Date: Fri, 11 Apr 2025 14:00:07 +0200 Subject: [PATCH 10/15] add cast to index type --- src/delta_barth/analysis/forecast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index ff16175..87396a3 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -251,7 +251,7 @@ def _process_sales( # TODO: therefore, stepping with fixed value n does not result in timedelta of n episodes # Option A: pad data frame with zero values --> could impede forecast algorithm # Option B: calculate next index based on timedelta - dates = monthly_sum.index + dates = cast(pd.DatetimeIndex, monthly_sum.index) # print("dates: ", dates) # ?? --- new: use monthly basis for time windows # baseline: 3 years - 36 months -- 2.34.1 From d507d5113662633941e9b62574e5f7f0ca7c9090 Mon Sep 17 00:00:00 2001 From: frasu Date: Sun, 13 Apr 2025 12:58:36 +0000 Subject: [PATCH 11/15] src/delta_barth/analysis/forecast.py aktualisiert --- src/delta_barth/analysis/forecast.py | 40 +++++++++++++++------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 87396a3..a9cf701 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -208,7 +208,17 @@ def _process_sales( df_cust["jahr"] = df_cust[DATE_FEAT].dt.year df_cust["monat"] = df_cust[DATE_FEAT].dt.month - monthly_sum = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index() + current_year = datetime.now().year + current_month = datetime.now().month + years = range(df_cust["jahr"].min(), current_year + 1) + + old_monthly_sum = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index() + + all_month_year_combinations = pd.DataFrame( + [(year, month) for year in years for month in range(1, 13) if (year < current_year or (year == current_year and month <= current_month))], columns=["jahr", "monat"] + ) + + monthly_sum = pd.merge(all_month_year_combinations, old_monthly_sum, on=["jahr", "monat"], how='left') monthly_sum[DATE_FEAT] = ( monthly_sum["monat"].astype(str) + "." + monthly_sum["jahr"].astype(str) ) @@ -228,7 +238,7 @@ def _process_sales( # Randomized Search kfold = KFold(n_splits=5, shuffle=True) params: ParamSearchXGBRegressor = { - "n_estimators": scipy.stats.poisson(mu=1000), + "n_estimators": scipy.stats.poisson(mu=100), "learning_rate": [0.03, 0.04, 0.05], "max_depth": range(2, 9), "min_child_weight": range(1, 5), @@ -245,35 +255,29 @@ def _process_sales( best_score_r2: float | None = None best_start_year: int | None = None too_few_month_points: bool = True - # forecast: pd.DataFrame | None = None - # TODO: write routine to pad missing values in datetime row - # TODO problem: continuous timeline expected, but values can be empty for multiple months - # TODO: therefore, stepping with fixed value n does not result in timedelta of n episodes - # Option A: pad data frame with zero values --> could impede forecast algorithm - # Option B: calculate next index based on timedelta + dates = cast(pd.DatetimeIndex, monthly_sum.index) # print("dates: ", dates) - # ?? --- new: use monthly basis for time windows # baseline: 3 years - 36 months starting_date = datetime.datetime.now() - relativedelta(months=36) - # starting_date = dates.max() - relativedelta(months=36) - start_index = next( - (i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1 + + target_index, succ = next( + ((i, True) for i, date in enumerate(dates) if date >= starting_date), (len(dates) - 1, False) ) - print("start idx: ", start_index, "length dates: ", len(dates)) + # print("start idx: ", target_index, "length dates: ", len(dates)) for add_year, date_idx in enumerate(range(start_index, -1, -12)): - print("date_idx: ", date_idx) + # print("date_idx: ", date_idx) first_date = dates[date_idx] - print("first date: ", first_date) + # print("first date: ", first_date) split_date = dates[-6] train = cast( pd.DataFrame, monthly_sum.loc[first_date:split_date].copy(), # type: ignore ) - print(train) - print("Length train: ", len(train)) + # print(train) + # print("Length train: ", len(train)) test = cast( pd.DataFrame, monthly_sum.loc[split_date:].copy(), # type: ignore @@ -284,7 +288,7 @@ def _process_sales( # ?? --- new: adapted condition to fit new for-loop # test set size fixed at 6 --> first iteration: baseline - 6 entries # for each new year 10 new data points needed - if len(train) >= 30 + 10 * add_year: + if len(train[train[SALES_FEAT] > 0]) >= 30 + 10 * add_year: too_few_month_points = False rand = RandomizedSearchCV( -- 2.34.1 From 65e3b6ffab3684677c8f50fadb14ba0caff787bb Mon Sep 17 00:00:00 2001 From: frasu Date: Sun, 13 Apr 2025 14:46:01 +0000 Subject: [PATCH 12/15] src/delta_barth/analysis/forecast.py aktualisiert --- src/delta_barth/analysis/forecast.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index a9cf701..1cd3fd9 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -218,7 +218,8 @@ def _process_sales( [(year, month) for year in years for month in range(1, 13) if (year < current_year or (year == current_year and month <= current_month))], columns=["jahr", "monat"] ) - monthly_sum = pd.merge(all_month_year_combinations, old_monthly_sum, on=["jahr", "monat"], how='left') + monthly_sum = pd.merge(all_month_year_combinations, old_monthly_sum, on=["jahr", "monat"], how="left") + monthly_sum[SALES_FEAT] = monthly_sum[SALES_FEAT].fillna(0) monthly_sum[DATE_FEAT] = ( monthly_sum["monat"].astype(str) + "." + monthly_sum["jahr"].astype(str) ) @@ -228,7 +229,6 @@ def _process_sales( features = ["jahr", "monat"] target = SALES_FEAT - # ?? --- new: dates and forecast last_date = pd.to_datetime(datetime.datetime.now().strftime("%m.%Y"), format="%m.%Y") future_dates = pd.date_range( start=last_date + pd.DateOffset(months=1), periods=6, freq="MS" @@ -248,7 +248,6 @@ def _process_sales( "early_stopping_rounds": [20, 50], } - # ?? --- new: best_estimator (internal usage only) best_estimator = None best_params: BestParametersXGBRegressor | None = None best_score_mae: float | None = float("inf") @@ -285,9 +284,8 @@ def _process_sales( X_train, X_test = train[features], test[features] y_train, y_test = train[target], test[target] - # ?? --- new: adapted condition to fit new for-loop # test set size fixed at 6 --> first iteration: baseline - 6 entries - # for each new year 10 new data points needed + # for each new year 10 new data points (i.e., sales strictly positive) needed if len(train[train[SALES_FEAT] > 0]) >= 30 + 10 * add_year: too_few_month_points = False -- 2.34.1 From 3c8f18e4e3b8298f487c4628da9015c03305253b Mon Sep 17 00:00:00 2001 From: foefl Date: Wed, 16 Apr 2025 10:53:57 +0200 Subject: [PATCH 13/15] add edge case --- tests/test_session.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_session.py b/tests/test_session.py index 31cd51d..bf28c92 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -64,6 +64,7 @@ def test_session_setup_db_management(tmp_path): @patch("delta_barth.logging.ENABLE_LOGGING", True) @patch("delta_barth.logging.LOGGING_TO_FILE", True) +@patch("delta_barth.logging.LOGGING_TO_STDERR", True) def test_session_setup_logging(tmp_path): str_path = str(tmp_path) foldername: str = "logging_test" -- 2.34.1 From 670e2e549f7e19700842cc3358fe393b6afb3c6e Mon Sep 17 00:00:00 2001 From: foefl Date: Wed, 16 Apr 2025 11:14:55 +0200 Subject: [PATCH 14/15] add edge case for unittest in pipeline --- tests/test_pipelines.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index b1387b7..e1e8b1e 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -1,17 +1,15 @@ -import importlib import json from unittest.mock import patch import pytest import sqlalchemy as sql -import delta_barth.pipelines from delta_barth import databases as db from delta_barth import pipelines as pl from delta_barth.errors import STATUS_HANDLER -def test_write_performance_metrics(session): +def test_write_performance_metrics_Success(session): pipe_name = "test_pipe" t_start = 20_000_000_000 t_end = 30_000_000_000 @@ -33,6 +31,20 @@ def test_write_performance_metrics(session): assert metrics.execution_duration == 10 +def test_write_performance_metrics_FailStartingTime(session): + pipe_name = "test_pipe" + t_start = 30_000_000_000 + t_end = 20_000_000_000 + + with patch("delta_barth.pipelines.SESSION", session): + with pytest.raises(ValueError): + _ = pl._write_performance_metrics( + pipeline_name=pipe_name, + time_start=t_start, + time_end=t_end, + ) + + @patch("delta_barth.analysis.forecast.SALES_BASE_NUM_DATAPOINTS_MONTHS", 1) def test_sales_prognosis_pipeline(exmpl_api_sales_prognosis_resp, session): with patch( -- 2.34.1 From afa31e2a943a6f84d19f88e77b04e89397b5836d Mon Sep 17 00:00:00 2001 From: foefl Date: Wed, 16 Apr 2025 11:18:51 +0200 Subject: [PATCH 15/15] major overhaul of forecast pipeline --- pyproject.toml | 7 +++--- src/delta_barth/analysis/forecast.py | 36 +++++++++++++++------------- tests/analysis/test_forecast.py | 22 ++++++++++++++--- 3 files changed, 42 insertions(+), 23 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0974385..cf56355 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "delta-barth" -version = "0.5.7dev1" +version = "0.5.7dev2" description = "workflows and pipelines for the Python-based Plugin of Delta Barth's ERP system" authors = [ {name = "Florian Förster", email = "f.foerster@d-opt.com"}, @@ -44,7 +44,8 @@ filterwarnings = [ ] markers = [ "api_con_required: tests require an API connection (deselect with '-m \"not api_con_required\"')", - "new: to test only new tests, usually removed afterwards (deselect with '-m \"not quick\"')", + "new: to test only new tests, usually removed afterwards (deselect with '-m \"not new\"')", + "forecast: main components of forecast pipeline (deselect with '-m \"not forecast\"')" ] log_cli = true @@ -73,7 +74,7 @@ directory = "reports/coverage" [tool.bumpversion] -current_version = "0.5.7dev1" +current_version = "0.5.7dev2" parse = """(?x) (?P0|[1-9]\\d*)\\. (?P0|[1-9]\\d*)\\. diff --git a/src/delta_barth/analysis/forecast.py b/src/delta_barth/analysis/forecast.py index 1cd3fd9..39bb13f 100644 --- a/src/delta_barth/analysis/forecast.py +++ b/src/delta_barth/analysis/forecast.py @@ -208,17 +208,25 @@ def _process_sales( df_cust["jahr"] = df_cust[DATE_FEAT].dt.year df_cust["monat"] = df_cust[DATE_FEAT].dt.month - current_year = datetime.now().year - current_month = datetime.now().month + monthly_sum_data_only = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index() + + current_year = datetime.datetime.now().year + current_month = datetime.datetime.now().month years = range(df_cust["jahr"].min(), current_year + 1) - old_monthly_sum = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index() - all_month_year_combinations = pd.DataFrame( - [(year, month) for year in years for month in range(1, 13) if (year < current_year or (year == current_year and month <= current_month))], columns=["jahr", "monat"] + [ + (year, month) + for year in years + for month in range(1, 13) + if (year < current_year or (year == current_year and month <= current_month)) + ], + columns=["jahr", "monat"], ) - monthly_sum = pd.merge(all_month_year_combinations, old_monthly_sum, on=["jahr", "monat"], how="left") + monthly_sum = pd.merge( + all_month_year_combinations, monthly_sum_data_only, on=["jahr", "monat"], how="left" + ) monthly_sum[SALES_FEAT] = monthly_sum[SALES_FEAT].fillna(0) monthly_sum[DATE_FEAT] = ( monthly_sum["monat"].astype(str) + "." + monthly_sum["jahr"].astype(str) @@ -256,27 +264,22 @@ def _process_sales( too_few_month_points: bool = True dates = cast(pd.DatetimeIndex, monthly_sum.index) - # print("dates: ", dates) # baseline: 3 years - 36 months starting_date = datetime.datetime.now() - relativedelta(months=36) - target_index, succ = next( - ((i, True) for i, date in enumerate(dates) if date >= starting_date), (len(dates) - 1, False) + target_index, _ = next( + ((i, True) for i, date in enumerate(dates) if date >= starting_date), + (len(dates) - 1, False), ) - # print("start idx: ", target_index, "length dates: ", len(dates)) - for add_year, date_idx in enumerate(range(start_index, -1, -12)): - # print("date_idx: ", date_idx) + for add_year, date_idx in enumerate(range(target_index, -1, -12)): first_date = dates[date_idx] - # print("first date: ", first_date) split_date = dates[-6] train = cast( pd.DataFrame, monthly_sum.loc[first_date:split_date].copy(), # type: ignore ) - # print(train) - # print("Length train: ", len(train)) test = cast( pd.DataFrame, monthly_sum.loc[split_date:].copy(), # type: ignore @@ -286,7 +289,7 @@ def _process_sales( # test set size fixed at 6 --> first iteration: baseline - 6 entries # for each new year 10 new data points (i.e., sales strictly positive) needed - if len(train[train[SALES_FEAT] > 0]) >= 30 + 10 * add_year: + if len(train[train[SALES_FEAT] > 0]) >= (base_num_data_points_months + 10 * add_year): too_few_month_points = False rand = RandomizedSearchCV( @@ -314,7 +317,6 @@ def _process_sales( # --- new: store best_estimator best_estimator = copy.copy(rand.best_estimator_) - # ?? --- new: use best_estimator to calculate future values and store them in forecast if best_estimator is not None: X_future = pd.DataFrame( {"jahr": future_dates.year, "monat": future_dates.month}, index=future_dates diff --git a/tests/analysis/test_forecast.py b/tests/analysis/test_forecast.py index d467ca0..cc2cd39 100644 --- a/tests/analysis/test_forecast.py +++ b/tests/analysis/test_forecast.py @@ -1,4 +1,6 @@ +import datetime from datetime import datetime as Datetime +from pathlib import Path from unittest.mock import patch import numpy as np @@ -255,6 +257,7 @@ def test_preprocess_sales_FailOnTargetFeature( assert pipe.results is None +@pytest.mark.forecast def test_process_sales_Success(sales_data_real_preproc): data = sales_data_real_preproc.copy() pipe = PipeResult(data, STATUS_HANDLER.SUCCESS) @@ -277,6 +280,7 @@ def test_process_sales_Success(sales_data_real_preproc): assert pipe.statistics.xgb_params is not None +@pytest.mark.forecast def test_process_sales_FailTooFewPoints(sales_data_real_preproc): data = sales_data_real_preproc.copy() data = data.iloc[:20, :] @@ -303,6 +307,7 @@ def test_process_sales_FailTooFewPoints(sales_data_real_preproc): assert pipe.statistics.xgb_params is None +@pytest.mark.forecast def test_process_sales_FailTooFewMonthPoints(sales_data_real_preproc): data = sales_data_real_preproc.copy() pipe = PipeResult(data, STATUS_HANDLER.SUCCESS) @@ -329,8 +334,19 @@ def test_process_sales_FailTooFewMonthPoints(sales_data_real_preproc): assert pipe.statistics.xgb_params is None +@pytest.mark.forecast def test_process_sales_FailNoReliableForecast(sales_data_real_preproc): - data = sales_data_real_preproc.copy() + # prepare fake data + df = sales_data_real_preproc.copy() + f_dates = "buchungs_datum" + end = datetime.datetime.now() + start = df[f_dates].max() + fake_dates = pd.date_range(start, end, freq="MS") + fake_data = [(1234, 1014, 1024, 1000, 10, date) for date in fake_dates] + fake_df = pd.DataFrame(fake_data, columns=df.columns) + enhanced_df = pd.concat((df, fake_df), ignore_index=True) + + data = enhanced_df.copy() data["betrag"] = 10000 print(data["betrag"]) data = data.iloc[:20000, :] @@ -340,7 +356,7 @@ def test_process_sales_FailNoReliableForecast(sales_data_real_preproc): def __init__(self, *args, **kwargs) -> None: class Predictor: def predict(self, *args, **kwargs): - return np.array([1, 1, 1, 1]) + return np.array([1, 1, 1, 1], dtype=np.float64) self.best_estimator_ = Predictor() @@ -354,7 +370,7 @@ def test_process_sales_FailNoReliableForecast(sales_data_real_preproc): pipe = fc._process_sales( pipe, min_num_data_points=1, - base_num_data_points_months=-100, + base_num_data_points_months=1, ) assert pipe.status != STATUS_HANDLER.SUCCESS -- 2.34.1