From a0d189ac9f55aa3a648174ba92c556c979fca5a9 Mon Sep 17 00:00:00 2001 From: foefl Date: Fri, 4 Apr 2025 13:37:05 +0200 Subject: [PATCH] add logging of pipeline metrics in database --- pyproject.toml | 4 +-- src/delta_barth/pipelines.py | 2 +- tests/test_pipelines.py | 49 +++++++++++++++++++++++++++++++----- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ba2bc47..908fdd2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "delta-barth" -version = "0.5.5dev1" +version = "0.5.5" description = "workflows and pipelines for the Python-based Plugin of Delta Barth's ERP system" authors = [ {name = "Florian Förster", email = "f.foerster@d-opt.com"}, @@ -73,7 +73,7 @@ directory = "reports/coverage" [tool.bumpversion] -current_version = "0.5.5dev1" +current_version = "0.5.5" parse = """(?x) (?P0|[1-9]\\d*)\\. (?P0|[1-9]\\d*)\\. diff --git a/src/delta_barth/pipelines.py b/src/delta_barth/pipelines.py index 53da885..a63597c 100644 --- a/src/delta_barth/pipelines.py +++ b/src/delta_barth/pipelines.py @@ -22,7 +22,7 @@ def _write_performance_metrics( ) -> PipelineMetrics: if time_end < time_start: raise ValueError("Ending time smaller than starting time") - execution_duration = (time_end - time_start) / 10e9 + execution_duration = (time_end - time_start) / 1e9 metrics = PipelineMetrics( pipeline_name=pipeline_name, execution_duration=execution_duration, diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 3fe9387..b1387b7 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -3,23 +3,44 @@ import json from unittest.mock import patch import pytest +import sqlalchemy as sql import delta_barth.pipelines +from delta_barth import databases as db from delta_barth import pipelines as pl from delta_barth.errors import STATUS_HANDLER -def test_write_performance_metrics(session): ... +def test_write_performance_metrics(session): + pipe_name = "test_pipe" + t_start = 20_000_000_000 + t_end = 30_000_000_000 + + with patch("delta_barth.pipelines.SESSION", session): + metrics = pl._write_performance_metrics( + pipeline_name=pipe_name, + time_start=t_start, + time_end=t_end, + ) + assert metrics["pipeline_name"] == pipe_name + assert metrics["execution_duration"] == 10 + + with session.db_engine.begin() as con: + ret = con.execute(sql.select(db.perf_meas)) + + metrics = ret.all()[-1] + assert metrics.pipeline_name == pipe_name + assert metrics.execution_duration == 10 @patch("delta_barth.analysis.forecast.SALES_BASE_NUM_DATAPOINTS_MONTHS", 1) -def test_sales_prognosis_pipeline(exmpl_api_sales_prognosis_resp): +def test_sales_prognosis_pipeline(exmpl_api_sales_prognosis_resp, session): with patch( "delta_barth.analysis.forecast.get_sales_prognosis_data", ) as mock: mock.return_value = (exmpl_api_sales_prognosis_resp, STATUS_HANDLER.SUCCESS) - importlib.reload(delta_barth.pipelines) - json_export = pl.pipeline_sales_forecast(None, None) + with patch("delta_barth.pipelines.SESSION", session): + json_export = pl.pipeline_sales_forecast(None, None) assert isinstance(json_export, str) parsed_resp = json.loads(json_export) @@ -30,9 +51,18 @@ def test_sales_prognosis_pipeline(exmpl_api_sales_prognosis_resp): assert "code" in parsed_resp["status"] assert parsed_resp["status"]["code"] == 0 + with session.db_engine.begin() as con: + ret = con.execute(sql.select(db.perf_meas)) -def test_sales_prognosis_pipeline_dummy(): - json_export = pl.pipeline_sales_forecast_dummy(None, None) + metrics = ret.all()[-1] + assert metrics.pipeline_name == "sales_forecast" + assert metrics.execution_duration > 0 + + +@pytest.mark.new +def test_sales_prognosis_pipeline_dummy(session): + with patch("delta_barth.pipelines.SESSION", session): + json_export = pl.pipeline_sales_forecast_dummy(None, None) assert isinstance(json_export, str) parsed_resp = json.loads(json_export) @@ -46,3 +76,10 @@ def test_sales_prognosis_pipeline_dummy(): assert entry["vorhersage"] == pytest.approx(47261.058594) assert "code" in parsed_resp["status"] assert parsed_resp["status"]["code"] == 0 + + with session.db_engine.begin() as con: + ret = con.execute(sql.select(db.perf_meas)) + + metrics = ret.all()[-1] + assert metrics.pipeline_name == "sales_forecast_dummy" + assert metrics.execution_duration > 0