major overhaul of forecast pipeline #21
@ -208,7 +208,17 @@ def _process_sales(
|
||||
df_cust["jahr"] = df_cust[DATE_FEAT].dt.year
|
||||
df_cust["monat"] = df_cust[DATE_FEAT].dt.month
|
||||
|
||||
monthly_sum = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index()
|
||||
current_year = datetime.now().year
|
||||
current_month = datetime.now().month
|
||||
years = range(df_cust["jahr"].min(), current_year + 1)
|
||||
|
||||
old_monthly_sum = df_cust.groupby(["jahr", "monat"])[SALES_FEAT].sum().reset_index()
|
||||
|
||||
all_month_year_combinations = pd.DataFrame(
|
||||
[(year, month) for year in years for month in range(1, 13) if (year < current_year or (year == current_year and month <= current_month))], columns=["jahr", "monat"]
|
||||
)
|
||||
|
||||
monthly_sum = pd.merge(all_month_year_combinations, old_monthly_sum, on=["jahr", "monat"], how='left')
|
||||
monthly_sum[DATE_FEAT] = (
|
||||
monthly_sum["monat"].astype(str) + "." + monthly_sum["jahr"].astype(str)
|
||||
)
|
||||
@ -228,7 +238,7 @@ def _process_sales(
|
||||
# Randomized Search
|
||||
kfold = KFold(n_splits=5, shuffle=True)
|
||||
params: ParamSearchXGBRegressor = {
|
||||
"n_estimators": scipy.stats.poisson(mu=1000),
|
||||
"n_estimators": scipy.stats.poisson(mu=100),
|
||||
"learning_rate": [0.03, 0.04, 0.05],
|
||||
"max_depth": range(2, 9),
|
||||
"min_child_weight": range(1, 5),
|
||||
@ -245,35 +255,29 @@ def _process_sales(
|
||||
best_score_r2: float | None = None
|
||||
best_start_year: int | None = None
|
||||
too_few_month_points: bool = True
|
||||
# forecast: pd.DataFrame | None = None
|
||||
# TODO: write routine to pad missing values in datetime row
|
||||
# TODO problem: continuous timeline expected, but values can be empty for multiple months
|
||||
# TODO: therefore, stepping with fixed value n does not result in timedelta of n episodes
|
||||
# Option A: pad data frame with zero values --> could impede forecast algorithm
|
||||
# Option B: calculate next index based on timedelta
|
||||
|
||||
dates = cast(pd.DatetimeIndex, monthly_sum.index)
|
||||
# print("dates: ", dates)
|
||||
# ?? --- new: use monthly basis for time windows
|
||||
# baseline: 3 years - 36 months
|
||||
starting_date = datetime.datetime.now() - relativedelta(months=36)
|
||||
# starting_date = dates.max() - relativedelta(months=36)
|
||||
start_index = next(
|
||||
(i for i, date in enumerate(dates) if date >= starting_date), len(dates) - 1
|
||||
|
||||
target_index, succ = next(
|
||||
((i, True) for i, date in enumerate(dates) if date >= starting_date), (len(dates) - 1, False)
|
||||
)
|
||||
print("start idx: ", start_index, "length dates: ", len(dates))
|
||||
# print("start idx: ", target_index, "length dates: ", len(dates))
|
||||
|
||||
for add_year, date_idx in enumerate(range(start_index, -1, -12)):
|
||||
print("date_idx: ", date_idx)
|
||||
# print("date_idx: ", date_idx)
|
||||
first_date = dates[date_idx]
|
||||
print("first date: ", first_date)
|
||||
# print("first date: ", first_date)
|
||||
split_date = dates[-6]
|
||||
|
||||
train = cast(
|
||||
pd.DataFrame,
|
||||
monthly_sum.loc[first_date:split_date].copy(), # type: ignore
|
||||
)
|
||||
print(train)
|
||||
print("Length train: ", len(train))
|
||||
# print(train)
|
||||
# print("Length train: ", len(train))
|
||||
test = cast(
|
||||
pd.DataFrame,
|
||||
monthly_sum.loc[split_date:].copy(), # type: ignore
|
||||
@ -284,7 +288,7 @@ def _process_sales(
|
||||
# ?? --- new: adapted condition to fit new for-loop
|
||||
# test set size fixed at 6 --> first iteration: baseline - 6 entries
|
||||
# for each new year 10 new data points needed
|
||||
if len(train) >= 30 + 10 * add_year:
|
||||
if len(train[train[SALES_FEAT] > 0]) >= 30 + 10 * add_year:
|
||||
too_few_month_points = False
|
||||
|
||||
rand = RandomizedSearchCV(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user