98 lines
3.2 KiB
Python
98 lines
3.2 KiB
Python
import json
|
|
from unittest.mock import patch
|
|
|
|
import pytest
|
|
import sqlalchemy as sql
|
|
|
|
from delta_barth import databases as db
|
|
from delta_barth import pipelines as pl
|
|
from delta_barth.errors import STATUS_HANDLER
|
|
|
|
|
|
def test_write_performance_metrics_Success(session):
|
|
pipe_name = "test_pipe"
|
|
t_start = 20_000_000_000
|
|
t_end = 30_000_000_000
|
|
|
|
with patch("delta_barth.pipelines.SESSION", session):
|
|
metrics = pl._write_performance_metrics(
|
|
pipeline_name=pipe_name,
|
|
time_start=t_start,
|
|
time_end=t_end,
|
|
)
|
|
assert metrics["pipeline_name"] == pipe_name
|
|
assert metrics["execution_duration"] == 10
|
|
|
|
with session.db_engine.begin() as con:
|
|
ret = con.execute(sql.select(db.perf_meas))
|
|
|
|
metrics = ret.all()[-1]
|
|
assert metrics.pipeline_name == pipe_name
|
|
assert metrics.execution_duration == 10
|
|
|
|
|
|
def test_write_performance_metrics_FailStartingTime(session):
|
|
pipe_name = "test_pipe"
|
|
t_start = 30_000_000_000
|
|
t_end = 20_000_000_000
|
|
|
|
with patch("delta_barth.pipelines.SESSION", session):
|
|
with pytest.raises(ValueError):
|
|
_ = pl._write_performance_metrics(
|
|
pipeline_name=pipe_name,
|
|
time_start=t_start,
|
|
time_end=t_end,
|
|
)
|
|
|
|
|
|
@patch("delta_barth.analysis.forecast.SALES_BASE_NUM_DATAPOINTS_MONTHS", 1)
|
|
def test_sales_prognosis_pipeline(exmpl_api_sales_prognosis_resp, session):
|
|
with patch(
|
|
"delta_barth.analysis.forecast.get_sales_prognosis_data",
|
|
) as mock:
|
|
mock.return_value = (exmpl_api_sales_prognosis_resp, STATUS_HANDLER.SUCCESS)
|
|
with patch("delta_barth.pipelines.SESSION", session):
|
|
json_export = pl.pipeline_sales_forecast(None, None)
|
|
|
|
assert isinstance(json_export, str)
|
|
parsed_resp = json.loads(json_export)
|
|
assert "response" in parsed_resp
|
|
assert "status" in parsed_resp
|
|
assert "daten" in parsed_resp["response"]
|
|
assert len(parsed_resp["response"]["daten"]) > 0
|
|
assert "code" in parsed_resp["status"]
|
|
assert parsed_resp["status"]["code"] == 0
|
|
|
|
with session.db_engine.begin() as con:
|
|
ret = con.execute(sql.select(db.perf_meas))
|
|
|
|
metrics = ret.all()[-1]
|
|
assert metrics.pipeline_name == "sales_forecast"
|
|
assert metrics.execution_duration > 0
|
|
|
|
|
|
@pytest.mark.new
|
|
def test_sales_prognosis_pipeline_dummy(session):
|
|
with patch("delta_barth.pipelines.SESSION", session):
|
|
json_export = pl.pipeline_sales_forecast_dummy(None, None)
|
|
|
|
assert isinstance(json_export, str)
|
|
parsed_resp = json.loads(json_export)
|
|
assert "response" in parsed_resp
|
|
assert "status" in parsed_resp
|
|
assert "daten" in parsed_resp["response"]
|
|
assert len(parsed_resp["response"]["daten"]) > 0
|
|
entry = parsed_resp["response"]["daten"][0]
|
|
assert entry["jahr"] == 2022
|
|
assert entry["monat"] == 11
|
|
assert entry["vorhersage"] == pytest.approx(47261.058594)
|
|
assert "code" in parsed_resp["status"]
|
|
assert parsed_resp["status"]["code"] == 0
|
|
|
|
with session.db_engine.begin() as con:
|
|
ret = con.execute(sql.select(db.perf_meas))
|
|
|
|
metrics = ret.all()[-1]
|
|
assert metrics.pipeline_name == "sales_forecast_dummy"
|
|
assert metrics.execution_duration > 0
|