generated from dopt-python/py311
major code rework
This commit is contained in:
parent
09818f79e6
commit
3963fa8d0b
@ -92,20 +92,33 @@ def measure_length(
|
|||||||
file_path: Path,
|
file_path: Path,
|
||||||
pixels_per_metric_X: float,
|
pixels_per_metric_X: float,
|
||||||
pixels_per_metric_Y: float,
|
pixels_per_metric_Y: float,
|
||||||
) -> tuple[list[str | int], t.SensorImages]:
|
) -> tuple[t.CsvData, t.SensorImages]:
|
||||||
# ----------------------------
|
"""detect and measure the size of the electrodes
|
||||||
# To identify the midpoint of a 2D area
|
|
||||||
# Input:
|
Parameters
|
||||||
# file_path (string): path to file including file name and extension
|
----------
|
||||||
# pixelsPerMetricX (float): scaling parameter, Pixels per micrometer in image
|
file_path : Path
|
||||||
# pixelsPerMetricY (float): scaling parameter, Pixels per micrometer in image
|
path to file to analyse
|
||||||
# Output:
|
pixels_per_metric_X : float
|
||||||
# data_csv (list): contains measured lengths and areas of electrodes (i.e., column 1 to 18 of csv file)
|
scaling parameter x dimension, Pixels per micrometer in image
|
||||||
# image of left sensor
|
pixels_per_metric_Y : float
|
||||||
# image of right sensor
|
scaling parameter y dimension, Pixels per micrometer in image
|
||||||
# ----------------------------
|
|
||||||
file_stem = file_path.stem
|
Returns
|
||||||
folder_path = file_path.parent
|
-------
|
||||||
|
tuple[t.CsvData, t.SensorImages]
|
||||||
|
t.CsvData: (list) data to save as CSV according to requirements, contains strings and ints
|
||||||
|
t.SensorImages: (TypedDict) contains left and right image corresponding to each sensor
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
errors.ImageNotReadError
|
||||||
|
image was not read successfully
|
||||||
|
errors.ContourCalculationError
|
||||||
|
during contour detection there were several possible error causes
|
||||||
|
errors.InvalidElectrodeCount
|
||||||
|
an invalid number of electrodes were detected
|
||||||
|
"""
|
||||||
data_csv: list[str | int] = []
|
data_csv: list[str | int] = []
|
||||||
image = cv2.imread(str(file_path))
|
image = cv2.imread(str(file_path))
|
||||||
if image is None:
|
if image is None:
|
||||||
@ -113,9 +126,6 @@ def measure_length(
|
|||||||
|
|
||||||
cropped = image[500:1500, 100 : image.shape[1] - 100]
|
cropped = image[500:1500, 100 : image.shape[1] - 100]
|
||||||
orig = cropped.copy()
|
orig = cropped.copy()
|
||||||
# TODO: check removal
|
|
||||||
# height, width = cropped.shape[0], cropped.shape[1]
|
|
||||||
|
|
||||||
# change colours in the image to black and white
|
# change colours in the image to black and white
|
||||||
gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
|
gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
|
||||||
_, binary = cv2.threshold(gray, const.THRESHOLD_BW, 255, cv2.THRESH_BINARY)
|
_, binary = cv2.threshold(gray, const.THRESHOLD_BW, 255, cv2.THRESH_BINARY)
|
||||||
@ -126,20 +136,12 @@ def measure_length(
|
|||||||
# find contours in the edge map
|
# find contours in the edge map
|
||||||
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
cnts = imutils.grab_contours(cnts)
|
cnts = imutils.grab_contours(cnts)
|
||||||
|
|
||||||
if cnts is None:
|
if cnts is None:
|
||||||
raise errors.ContourCalculationError(
|
raise errors.ContourCalculationError(
|
||||||
"No contours were found in the provided image. Can not continue analysis."
|
"No contours were found in the provided image. Can not continue analysis."
|
||||||
)
|
)
|
||||||
|
|
||||||
# sort the contours from left to right (i.e., use x coordinates)
|
# sort the contours from left to right (i.e., use x coordinates)
|
||||||
cnts, _ = contours.sort_contours(cnts)
|
cnts, _ = contours.sort_contours(cnts)
|
||||||
# TODO: remove???
|
|
||||||
# bounding_boxes = list(set([cv2.boundingRect(c) for c in cnts]))
|
|
||||||
# cnts = [c for _, c in sorted(zip(bounding_boxes, cnts), key=lambda b: b[0][0])]
|
|
||||||
# min_area = 1000 # adjust as needed
|
|
||||||
# filtered_cnts = [c for c in cnts if cv2.contourArea(c) > min_area]
|
|
||||||
|
|
||||||
# check if this sorting was correct (might not be correct if we have overlaps or misfindings)
|
# check if this sorting was correct (might not be correct if we have overlaps or misfindings)
|
||||||
# get x coordinates of bounding boxes
|
# get x coordinates of bounding boxes
|
||||||
x_coords = [cv2.boundingRect(c)[0] for c in cnts]
|
x_coords = [cv2.boundingRect(c)[0] for c in cnts]
|
||||||
@ -150,14 +152,6 @@ def measure_length(
|
|||||||
"Contour detection not valid: contours are not "
|
"Contour detection not valid: contours are not "
|
||||||
"properly sorted from left to right."
|
"properly sorted from left to right."
|
||||||
)
|
)
|
||||||
|
|
||||||
##################################################################
|
|
||||||
# TODO: Remove??
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
output_image = gray.copy()
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
# to store only electrodes contours and nothing redundant
|
# to store only electrodes contours and nothing redundant
|
||||||
accepted_boxes: list[t.Box] = []
|
accepted_boxes: list[t.Box] = []
|
||||||
filtered_cnts: list[Any] = []
|
filtered_cnts: list[Any] = []
|
||||||
@ -213,60 +207,6 @@ def measure_length(
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
##################################################################
|
|
||||||
# TODO: Remove??
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
count = 1
|
|
||||||
# loop over the original points and draw everything
|
|
||||||
cv2.drawContours(output_image, [box.astype("int")], -1, (0, 255, 0), 2)
|
|
||||||
for x, y in box:
|
|
||||||
cv2.circle(output_image, (int(x), int(y)), 5, (0, 0, 255), -1)
|
|
||||||
# draw the midpoints on the image
|
|
||||||
cv2.circle(output_image, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
|
|
||||||
cv2.circle(output_image, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
|
|
||||||
cv2.circle(output_image, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
|
|
||||||
cv2.circle(output_image, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
|
|
||||||
# draw lines between the midpoints
|
|
||||||
cv2.line(
|
|
||||||
output_image,
|
|
||||||
(int(tltrX), int(tltrY)),
|
|
||||||
(int(blbrX), int(blbrY)),
|
|
||||||
(255, 0, 255),
|
|
||||||
2,
|
|
||||||
)
|
|
||||||
cv2.line(
|
|
||||||
output_image,
|
|
||||||
(int(tlblX), int(tlblY)),
|
|
||||||
(int(trbrX), int(trbrY)),
|
|
||||||
(255, 0, 255),
|
|
||||||
2,
|
|
||||||
)
|
|
||||||
# draw the object sizes on the image
|
|
||||||
cv2.putText(
|
|
||||||
output_image,
|
|
||||||
"{:.2f}".format(dimA),
|
|
||||||
(int(tltrX - 100), int(tltrY + 40)),
|
|
||||||
cv2.FONT_HERSHEY_SIMPLEX,
|
|
||||||
1.75,
|
|
||||||
(0, 255, 0),
|
|
||||||
3,
|
|
||||||
)
|
|
||||||
cv2.putText(
|
|
||||||
output_image,
|
|
||||||
"{:.2f}".format(dimB),
|
|
||||||
(int(trbrX - 100), int(trbrY)),
|
|
||||||
cv2.FONT_HERSHEY_SIMPLEX,
|
|
||||||
1.75,
|
|
||||||
(0, 255, 0),
|
|
||||||
3,
|
|
||||||
)
|
|
||||||
# cv2.imwrite(path.join(folder_path, f'{name}_contour_{count}.png'), output_image)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
cv2.imwrite(str(folder_path / f"{file_stem}_all_contours.png"), output_image)
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
if not filtered_cnts:
|
if not filtered_cnts:
|
||||||
raise errors.ContourCalculationError(
|
raise errors.ContourCalculationError(
|
||||||
"Contour detection not valid: no contours recognized"
|
"Contour detection not valid: no contours recognized"
|
||||||
@ -294,39 +234,34 @@ def measure_length(
|
|||||||
cropped_sensor_left = orig[y_min:y_max, x_min:x_middle]
|
cropped_sensor_left = orig[y_min:y_max, x_min:x_middle]
|
||||||
cropped_sensor_right = orig[y_min:y_max, x_middle:x_max]
|
cropped_sensor_right = orig[y_min:y_max, x_middle:x_max]
|
||||||
|
|
||||||
##################################################################
|
|
||||||
# TODO: Remove??
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
# save the cropped images for left and right sensor
|
|
||||||
try:
|
|
||||||
cv2.imwrite(path.join(folder_path, f"{file_stem}_left.png"), cropped_sensor_left)
|
|
||||||
cv2.imwrite(path.join(folder_path, f"{file_stem}_right.png"), cropped_sensor_right)
|
|
||||||
except Exception as err:
|
|
||||||
print(f"not possible: Error: {err}")
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
return data_csv, t.SensorImages(left=cropped_sensor_left, right=cropped_sensor_right)
|
return data_csv, t.SensorImages(left=cropped_sensor_left, right=cropped_sensor_right)
|
||||||
|
|
||||||
|
|
||||||
# helper function
|
# helper function
|
||||||
# anomaly detection
|
# anomaly detection
|
||||||
def infer_image(
|
def infer_image(
|
||||||
image: npt.NDArray,
|
image: npt.NDArray[np.uint8],
|
||||||
model: Patchcore,
|
model: Patchcore,
|
||||||
) -> t.InferenceResult:
|
) -> t.InferenceResult:
|
||||||
# ----------------------------
|
"""evaluate one image
|
||||||
# To evaluate the image
|
|
||||||
# Input:
|
|
||||||
# image (numpy.ndarray): represents image to be checked for anomalies
|
|
||||||
# model (serialized PyTorch state dictionary): model for anomaly detection
|
|
||||||
# Output:
|
|
||||||
# img_np (numpy.ndarray)
|
|
||||||
# anomaly_map_resized (numpy.ndarray): heatmap to visualize detected anomalies
|
|
||||||
# anomaly_score (float): evaluation metric, \in [0, 1] with close to 0 being no anomaly detected
|
|
||||||
# anomaly_label (bool): anomaly detected (1) or not (0)
|
|
||||||
# ----------------------------
|
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
image : npt.NDArray[np.uint8]
|
||||||
|
represents image to be checked for anomalies
|
||||||
|
model : Patchcore
|
||||||
|
(loaded PyTorch state dictionary): model for anomaly detection
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
t.InferenceResult
|
||||||
|
contains:
|
||||||
|
img (numpy.ndarray)
|
||||||
|
anomaly_map_resized (numpy.ndarray): heatmap to visualize detected anomalies
|
||||||
|
anomaly_score (float): evaluation metric, in [0, 1] with close to 0 being no
|
||||||
|
anomaly detected
|
||||||
|
anomaly_label (bool): anomaly detected (1) or not (0)
|
||||||
|
"""
|
||||||
torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||||
model.to(torch_device)
|
model.to(torch_device)
|
||||||
|
|
||||||
@ -355,24 +290,34 @@ def infer_image(
|
|||||||
img_np = np.array(pil_image)
|
img_np = np.array(pil_image)
|
||||||
anomaly_map_resized = cv2.resize(anomaly_map, (img_np.shape[1], img_np.shape[0]))
|
anomaly_map_resized = cv2.resize(anomaly_map, (img_np.shape[1], img_np.shape[0]))
|
||||||
|
|
||||||
return img_np, anomaly_map_resized, anomaly_score, anomaly_label
|
return t.InferenceResult(
|
||||||
|
img=img_np,
|
||||||
|
anomaly_map_resized=anomaly_map_resized,
|
||||||
|
anomaly_score=anomaly_score,
|
||||||
|
anomaly_label=anomaly_label,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# ** main function
|
# ** main function
|
||||||
def anomaly_detection(
|
def anomaly_detection(
|
||||||
file_path: Path,
|
file_path: Path,
|
||||||
detection_models: t.DetectionModels,
|
detection_models: t.DetectionModels,
|
||||||
data_csv: list[str | int],
|
data_csv: t.CsvData,
|
||||||
sensor_images: t.SensorImages,
|
sensor_images: t.SensorImages,
|
||||||
) -> None:
|
) -> None:
|
||||||
# ----------------------------
|
"""load the model, call function for anomaly detection and store the results
|
||||||
# To load the model, call function for anomaly detection and store the results
|
|
||||||
# Input:
|
Parameters
|
||||||
# file_path (string): path to file including file name and extension
|
----------
|
||||||
# data_csv (list of floats): results from measuring the electrodes
|
file_path : Path
|
||||||
# Output:
|
path to file to analyse
|
||||||
# none
|
detection_models : t.DetectionModels
|
||||||
# ----------------------------
|
collection of model paths for the left and right sensor
|
||||||
|
data_csv : t.CsvData
|
||||||
|
(list) data to save as CSV according to requirements, contains strings and ints
|
||||||
|
sensor_images : t.SensorImages
|
||||||
|
_description_
|
||||||
|
"""
|
||||||
file_stem = file_path.stem
|
file_stem = file_path.stem
|
||||||
folder_path = file_path.parent
|
folder_path = file_path.parent
|
||||||
|
|
||||||
@ -389,25 +334,17 @@ def anomaly_detection(
|
|||||||
# loop over left and right sensor
|
# loop over left and right sensor
|
||||||
for i, (side, image) in enumerate(sensor_images.items()):
|
for i, (side, image) in enumerate(sensor_images.items()):
|
||||||
# Ich habe die Modellpfade als Funktionsparameter hinzugefügt
|
# Ich habe die Modellpfade als Funktionsparameter hinzugefügt
|
||||||
image = cast(npt.NDArray, image)
|
image = cast(npt.NDArray[np.uint8], image)
|
||||||
checkpoint = torch.load(detection_models[side])
|
checkpoint = torch.load(detection_models[side])
|
||||||
model.load_state_dict(checkpoint["model_state_dict"])
|
model.load_state_dict(checkpoint["model_state_dict"])
|
||||||
|
|
||||||
_, anomaly_map_resized, score, label = infer_image(image, model)
|
result = infer_image(image, model)
|
||||||
|
data_csv.extend([int(result.anomaly_label)])
|
||||||
##################################################################
|
|
||||||
# TODO: Remove??
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
print(score)
|
|
||||||
# ---------------------------------------- just for internal evaluation ---------------------------------------
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
data_csv.extend([int(label)])
|
|
||||||
|
|
||||||
ax = axes[i]
|
ax = axes[i]
|
||||||
ax.axis("off")
|
ax.axis("off")
|
||||||
ax.imshow(image, alpha=0.8)
|
ax.imshow(image, alpha=0.8)
|
||||||
ax.imshow(anomaly_map_resized, cmap="jet", alpha=0.5)
|
ax.imshow(result.anomaly_map_resized, cmap="jet", alpha=0.5)
|
||||||
|
|
||||||
plt.subplots_adjust(wspace=0, hspace=0)
|
plt.subplots_adjust(wspace=0, hspace=0)
|
||||||
plt.savefig(
|
plt.savefig(
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user