code cleansing, related to #2

This commit is contained in:
Florian Förster 2025-10-23 10:31:00 +02:00
parent a9f9ffc5e7
commit 9dc7b4f6ff
2 changed files with 7 additions and 66 deletions

View File

@ -28,13 +28,7 @@ warnings.filterwarnings(
category=UserWarning, category=UserWarning,
) )
# input parameters: user-defined
file_path: Path = Path(r"C:\Users\demon\Documents\EKF\Analyse_fuer_Florian\bild2.bmp")
pixels_per_metric_X: float = 0.251
pixels_per_metric_Y: float = 0.251
# measuring
def midpoint( def midpoint(
pt_A: npt.NDArray[np.floating], pt_A: npt.NDArray[np.floating],
pt_B: npt.NDArray[np.floating], pt_B: npt.NDArray[np.floating],
@ -47,11 +41,8 @@ def check_box_redundancy(
box_2: t.Box, box_2: t.Box,
tolerance: float = 5.0, tolerance: float = 5.0,
) -> bool: ) -> bool:
# unpack the boxes
c1, s1, _ = box_1 c1, s1, _ = box_1
c2, s2, _ = box_2 c2, s2, _ = box_2
# sort width and height such that (w, h) == (h, w) is treated the same
# (might have been recognized in different orders)
s1 = sorted(s1) s1 = sorted(s1)
s2 = sorted(s2) s2 = sorted(s2)
@ -61,7 +52,6 @@ def check_box_redundancy(
return bool(center_dist < tolerance and size_diff < tolerance) return bool(center_dist < tolerance and size_diff < tolerance)
# ** main function
def measure_length( def measure_length(
img_path: Path, img_path: Path,
pixels_per_metric_X: float, pixels_per_metric_X: float,
@ -74,78 +64,61 @@ def measure_length(
cropped = image[500:1500, 100 : image.shape[1] - 100] cropped = image[500:1500, 100 : image.shape[1] - 100]
orig = cropped.copy() orig = cropped.copy()
# change colours in the image to black and white
gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
_, binary = cv2.threshold(gray, const.THRESHOLD_BW, 255, cv2.THRESH_BINARY) _, binary = cv2.threshold(gray, const.THRESHOLD_BW, 255, cv2.THRESH_BINARY)
# perform edge detection, identify rectangular shapes
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel) closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
edged = cv2.Canny(closed, 50, 100) edged = cv2.Canny(closed, 50, 100)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts) cnts = imutils.grab_contours(cnts)
if cnts is None: if cnts is None:
raise errors.ContourCalculationError( raise errors.ContourCalculationError(
"No contours were found in the provided image. Can not continue analysis." "No contours were found in the provided image. Can not continue analysis."
) )
# sort the contours from left to right (i.e., use x coordinates)
cnts, _ = contours.sort_contours(cnts) cnts, _ = contours.sort_contours(cnts)
# check if this sorting was correct (might not be correct if we have overlaps or misfindings)
# get x coordinates of bounding boxes
x_coords = [cv2.boundingRect(c)[0] for c in cnts] x_coords = [cv2.boundingRect(c)[0] for c in cnts]
# check if x coordinates are sorted in increasing order
is_sorted = np.all(x1 <= x2 for x1, x2 in zip(x_coords, x_coords[1:])) # type: ignore is_sorted = np.all(x1 <= x2 for x1, x2 in zip(x_coords, x_coords[1:])) # type: ignore
if not is_sorted: if not is_sorted:
raise errors.ContourCalculationError( raise errors.ContourCalculationError(
"Contour detection not valid: contours are not " "Contour detection not valid: contours are not "
"properly sorted from left to right." "properly sorted from left to right."
) )
# to store only electrodes contours and nothing redundant
accepted_boxes: list[t.Box] = [] accepted_boxes: list[t.Box] = []
filtered_cnts: list[Any] = [] filtered_cnts: list[Any] = []
# loop over the contours individually
for c in cnts: for c in cnts:
# compute the rotated bounding box of the contour
rbox = cast(t.Box, cv2.minAreaRect(c)) rbox = cast(t.Box, cv2.minAreaRect(c))
# !! should only be newer OpenCV versions
# box = cv2.cv.BoxPoints(rbox) if is_cv2() else cv2.boxPoints(rbox)
box = cv2.boxPoints(rbox) box = cv2.boxPoints(rbox)
box = np.array(box, dtype=np.int32) box = np.array(box, dtype=np.int32)
# order the points in the contour in top-left, top-right, bottom-right, and bottom-left
box = cast(npt.NDArray[np.float32], perspective.order_points(box)) box = cast(npt.NDArray[np.float32], perspective.order_points(box))
# unpack the bounding box
(tl, tr, br, bl) = box (tl, tr, br, bl) = box
# compute the midpoints between the top-left and top-right as well as bottom-left and bottom-right coordinates
(tltrX, tltrY) = midpoint(tl, tr) (tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br) (blbrX, blbrY) = midpoint(bl, br)
# compute the midpoints between the top-left and bottom-left as well as the top-right and bottom-right coordinates
(tlblX, tlblY) = midpoint(tl, bl) (tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br) (trbrX, trbrY) = midpoint(tr, br)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY)) dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY)) dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# if the contour is not sufficiently large, ignore it
if dA < 100 or dB < 100: if dA < 100 or dB < 100:
continue continue
# check for redundancy
is_duplicate = any( is_duplicate = any(
check_box_redundancy(rbox, existing) for existing in accepted_boxes check_box_redundancy(rbox, existing) for existing in accepted_boxes
) )
if is_duplicate: if is_duplicate:
continue continue
# accept box and contour
accepted_boxes.append(rbox) accepted_boxes.append(rbox)
filtered_cnts.append(c) filtered_cnts.append(c)
# compute the size of the electrode object dimA = dA / pixels_per_metric_Y
dimA = dA / pixels_per_metric_Y # y dimB = dB / pixels_per_metric_X
dimB = dB / pixels_per_metric_X # x
data_csv.extend( data_csv.extend(
[ [
@ -160,7 +133,6 @@ def measure_length(
"Contour detection not valid: no contours recognized" "Contour detection not valid: no contours recognized"
) )
# if incorrect number of electrodes has been identified
num_contours = len(filtered_cnts) num_contours = len(filtered_cnts)
if num_contours != const.NUM_VALID_ELECTRODES: if num_contours != const.NUM_VALID_ELECTRODES:
raise errors.InvalidElectrodeCount( raise errors.InvalidElectrodeCount(
@ -168,7 +140,6 @@ def measure_length(
f"expected value: count = {num_contours}, expected = {const.NUM_VALID_ELECTRODES}" f"expected value: count = {num_contours}, expected = {const.NUM_VALID_ELECTRODES}"
) )
# identify left and right sensor areas
x_min = min(np.min(c[:, 0, 0]) for c in filtered_cnts) - 20 x_min = min(np.min(c[:, 0, 0]) for c in filtered_cnts) - 20
x_max = max(np.max(c[:, 0, 0]) for c in filtered_cnts) + 20 x_max = max(np.max(c[:, 0, 0]) for c in filtered_cnts) + 20
y_min = min(np.min(c[:, 0, 1]) for c in filtered_cnts) - 20 y_min = min(np.min(c[:, 0, 1]) for c in filtered_cnts) - 20
@ -178,15 +149,12 @@ def measure_length(
leftmost_x_fourth = min(filtered_cnts[3][:, 0, 0]) leftmost_x_fourth = min(filtered_cnts[3][:, 0, 0])
x_middle = rightmost_x_third + int((leftmost_x_fourth - rightmost_x_third) / 2.0) x_middle = rightmost_x_third + int((leftmost_x_fourth - rightmost_x_third) / 2.0)
# perform further cropping and separation of left and right sensor
cropped_sensor_left = orig[y_min:y_max, x_min:x_middle] cropped_sensor_left = orig[y_min:y_max, x_min:x_middle]
cropped_sensor_right = orig[y_min:y_max, x_middle:x_max] cropped_sensor_right = orig[y_min:y_max, x_middle:x_max]
return data_csv, t.SensorImages(left=cropped_sensor_left, right=cropped_sensor_right) return data_csv, t.SensorImages(left=cropped_sensor_left, right=cropped_sensor_right)
# helper function
# anomaly detection
def infer_image( def infer_image(
image: npt.NDArray[np.uint8], image: npt.NDArray[np.uint8],
model: Patchcore, model: Patchcore,
@ -211,7 +179,6 @@ def infer_image(
anomaly_label = output.pred_label.item() anomaly_label = output.pred_label.item()
anomaly_map = output.anomaly_map.squeeze().cpu().numpy() anomaly_map = output.anomaly_map.squeeze().cpu().numpy()
# resize heatmap to original image size
img_np = np.array(pil_image) img_np = np.array(pil_image)
anomaly_map_resized = cv2.resize(anomaly_map, (img_np.shape[1], img_np.shape[0])) anomaly_map_resized = cv2.resize(anomaly_map, (img_np.shape[1], img_np.shape[0]))
@ -223,7 +190,6 @@ def infer_image(
) )
# ** main function
def anomaly_detection( def anomaly_detection(
img_path: Path, img_path: Path,
detection_models: t.DetectionModels, detection_models: t.DetectionModels,
@ -233,15 +199,12 @@ def anomaly_detection(
file_stem = img_path.stem file_stem = img_path.stem
folder_path = img_path.parent folder_path = img_path.parent
# reconstruct the model and initialize the engine
model = Patchcore( model = Patchcore(
backbone=const.BACKBONE, layers=const.LAYERS, coreset_sampling_ratio=const.RATIO backbone=const.BACKBONE, layers=const.LAYERS, coreset_sampling_ratio=const.RATIO
) )
# preparation for plot
_, axes = plt.subplots(1, 2, figsize=(12, 6)) _, axes = plt.subplots(1, 2, figsize=(12, 6))
# loop over left and right sensor
for i, (side, image) in enumerate(sensor_images.items()): for i, (side, image) in enumerate(sensor_images.items()):
# Ich habe die Modellpfade als Funktionsparameter hinzugefügt
image = cast(npt.NDArray[np.uint8], image) image = cast(npt.NDArray[np.uint8], image)
checkpoint = torch.load(detection_models[side]) checkpoint = torch.load(detection_models[side])
model.load_state_dict(checkpoint["model_state_dict"]) model.load_state_dict(checkpoint["model_state_dict"])

View File

@ -10,28 +10,6 @@ import dopt_sensor_anomalies.detection as detect
import dopt_sensor_anomalies.types as t import dopt_sensor_anomalies.types as t
from dopt_sensor_anomalies import constants, errors from dopt_sensor_anomalies import constants, errors
# TODO remove
# @pytest.fixture(scope="module")
# def img_paths() -> tuple[Path, ...]:
# img_folder = Path(__file__).parent / "_img"
# if not img_folder.exists():
# raise FileNotFoundError("Img path not existing")
# img_paths = tuple(img_folder.glob("*.bmp"))
# if not img_paths:
# raise ValueError("No images found")
# return img_paths
# @pytest.fixture(scope="module")
# def single_img_path() -> Path:
# img_folder = Path(__file__).parent / "_img"
# if not img_folder.exists():
# raise FileNotFoundError("Img path not existing")
# img_paths = tuple(img_folder.glob("*_12.bmp"))
# if not img_paths:
# raise ValueError("No images found")
# return img_paths[0]
def test_midpoint(): def test_midpoint():
ptA = np.array([1.0, 2.0]) ptA = np.array([1.0, 2.0])