From c7ef220799283a513aa5a7f092bbe9b4a0b3f62a Mon Sep 17 00:00:00 2001 From: Lukas Karras Date: Wed, 16 Oct 2024 08:12:01 +0200 Subject: [PATCH] project structure | added image processing (cropping, background remove) --- .gitignore | 3 +- requirements.txt | 1 + src/concept.py | 10 +++++ src/featue_extraction.py | 8 ++++ src/feature_vector.py | 27 ++++++++++++ src/image_processing.py | 93 ++++++++++++++++++++++++++++++++++++++++ src/learner.py | 17 ++++++++ 7 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 src/concept.py create mode 100644 src/featue_extraction.py create mode 100644 src/feature_vector.py create mode 100644 src/image_processing.py create mode 100644 src/learner.py diff --git a/.gitignore b/.gitignore index cd6cd0c..1901100 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,4 @@ .vsc .venv -./data/* -!./data/.gitkeep \ No newline at end of file +data \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index e69de29..1db7aea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -0,0 +1 @@ +opencv-python \ No newline at end of file diff --git a/src/concept.py b/src/concept.py new file mode 100644 index 0000000..2569443 --- /dev/null +++ b/src/concept.py @@ -0,0 +1,10 @@ +from enum import Enum, auto + +class Concept(Enum): + UNKNOWN = auto() + VORFAHRT_GEWAEHREN = auto() + VORFAHRT_STRASSE = auto() + STOP = auto() + RECHTS_ABBIEGEN = auto() + LINKS_ABBIEGEN = auto() + # TODO: add remaining signs diff --git a/src/featue_extraction.py b/src/featue_extraction.py new file mode 100644 index 0000000..b78edea --- /dev/null +++ b/src/featue_extraction.py @@ -0,0 +1,8 @@ +class FeatureExtractor: + + def get_overall_color_percentage(): + pass + + def get_color_per_raster_percentage(): + pass + diff --git a/src/feature_vector.py b/src/feature_vector.py new file mode 100644 index 0000000..aff59cc --- /dev/null +++ b/src/feature_vector.py @@ -0,0 +1,27 @@ +from typing import Any + +from concept import Concept + + +class FeatureVector: + + def __init__(self, concept: Concept, features: dict = {}) -> None: + self.features: dict = features + self.concept: Concept = concept + + + def add_feature(self, key, value) -> None: + self.features.update({key: value}) + + + def get_concept(self) -> Concept: + return self.concept + + + def get_num_features(self) -> int: + return len(self.features) + + + def get_feature_value(self, key) -> Any: + return self.features[key] + diff --git a/src/image_processing.py b/src/image_processing.py new file mode 100644 index 0000000..39250e0 --- /dev/null +++ b/src/image_processing.py @@ -0,0 +1,93 @@ +import cv2 as cv +import numpy as np + +def crop(image: cv.Mat) -> cv.Mat: + width, height = image.shape[:2] + + x1 = None + for x in range(width): + for y in range(height): + b, g, r = image[x][y] + if(r < 150 and g < 150 and b < 150): + x1 = x + break + if x1 is not None: + break + + y1 = None + for y in range(height): + for x in range(width): + b, g, r = image[x][y] + if(r < 150 and g < 150 and b < 150): + y1 = y + break + if y1 is not None: + break + + x2 = None + for x in range(width-1, 0, -1): + for y in range(height): + b, g, r = image[x][y] + if(r < 150 and g < 150 and b < 150): + x2 = x + break + if x2 is not None: + break + + y2 = None + for y in range(height-1, 0, -1): + for x in range(width): + b, g, r = image[x][y] + if(r < 150 and g < 150 and b < 150): + y2 = y + break + if y2 is not None: + break + + return image[x1-2:x2+2, y1-2:y2+2] + + +def remove_background(image: cv.Mat, lower_rgb: np.ndarray) -> cv.Mat: + width, height = image.shape[:2] + + # left to right + for x in range(width): + for y in range(height): + b, g, r = image[x][y] + if(r >= lower_rgb[0] and g >= lower_rgb[1] and b >= lower_rgb[2]): + image[x][y] = [0, 0, 0] + else: + break + + # top to bottom + for y in range(height): + for x in range(width): + b, g, r = image[x][y] + if(r >= lower_rgb[0] and g >= lower_rgb[1] and b >= lower_rgb[2]): + image[x][y] = [0, 0, 0] + else: + break + + # right to left + for x in range(width): + for y in range(height-1, 0, -1): + b, g, r = image[x][y] + if(r >= lower_rgb[0] and g >= lower_rgb[1] and b >= lower_rgb[2]): + image[x][y] = [0, 0, 0] + else: + break + + # bottom to top + for y in range(height): + for x in range(width-1, 0, -1): + b, g, r = image[x][y] + if(r >= lower_rgb[0] and g >= lower_rgb[1] and b >= lower_rgb[2]): + image[x][y] = [0, 0, 0] + else: + break + + return image + + +def raster_image(image: cv.Mat, num_cols: int = 4, num_rows: int = 4) -> dict[cv.Mat]: + pass \ No newline at end of file diff --git a/src/learner.py b/src/learner.py new file mode 100644 index 0000000..8a7c995 --- /dev/null +++ b/src/learner.py @@ -0,0 +1,17 @@ +class Learner: + # The training method, that changes the internal state of the learner such that + # it will classify examples of a similar set (i.e. the testSet better. + # + # @param trainingSet contains feature vectors and corresponding concepts + # to provide experience to learn from + + def learn(trainingSet): + pass + + # find the concept of the example from the internal knowledge of the lerner + # this method must not consider example.getConcept() at all!! + # + # @param example: is a feature vector + # @return the concept of the examplke as learned by this before + def classify(example): + pass