mirror of
https://github.com/3x4byte/StreetsignRecognition.git
synced 2025-12-21 01:15:52 +00:00
kNN implemented with analyse
This commit is contained in:
@@ -3,15 +3,21 @@ import numpy as np
|
||||
import os
|
||||
import pickle
|
||||
|
||||
from concept import Concept
|
||||
from feature import Feature
|
||||
from classes.concept import Concept
|
||||
from classes.feature import Feature
|
||||
|
||||
|
||||
class FeatureVector:
|
||||
|
||||
def __init__(self, concept: Concept, features: dict = {}) -> None:
|
||||
def __init__(self, concept: Concept, features: dict = {}, features_list: list = None, loaded: bool = False) -> None:
|
||||
if loaded == False:
|
||||
self.loaded: bool = False
|
||||
self.features: dict = features
|
||||
self.concept: Concept = concept
|
||||
else:
|
||||
self.loaded: bool = True
|
||||
self.concept: Concept = concept
|
||||
self.features_list: list = features_list
|
||||
|
||||
|
||||
def _get_values_of_feature(self, feature: Feature) -> list[int]:
|
||||
@@ -53,6 +59,9 @@ class FeatureVector:
|
||||
|
||||
|
||||
def get_vector(self) -> list:
|
||||
if self.loaded:
|
||||
return self.features_list
|
||||
|
||||
ret = []
|
||||
for feature in Feature:
|
||||
ret = ret + self._get_values_of_feature(feature)
|
||||
83
src/classes/learner.py
Normal file
83
src/classes/learner.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import csv
|
||||
import ast
|
||||
import math
|
||||
|
||||
from classes.concept import Concept
|
||||
from classes.feature_vector import FeatureVector
|
||||
|
||||
class Learner:
|
||||
# The training method, that changes the internal state of the learner such that
|
||||
# it will classify examples of a similar set (i.e. the testSet better.
|
||||
#
|
||||
# @param trainingSet contains feature vectors and corresponding concepts
|
||||
# to provide experience to learn from
|
||||
|
||||
def learn(self, path_to_training_set: str):
|
||||
training_set = []
|
||||
with open(path_to_training_set, mode='r' ,newline='') as csv_file:
|
||||
reader = csv.reader(csv_file, delimiter=';')
|
||||
next(reader) # read from 2. row without header
|
||||
for row in reader:
|
||||
fv = FeatureVector(concept = row[1], features_list = ast.literal_eval(row[2]), loaded = True)
|
||||
training_set.append(fv)
|
||||
|
||||
self.training_set = training_set
|
||||
|
||||
return training_set
|
||||
|
||||
# find the concept of the example from the internal knowledge of the lerner
|
||||
# this method must not consider example.getConcept() at all!!
|
||||
#
|
||||
# @param example: is a feature vector
|
||||
# @return the concept of the examplke as learned by this before
|
||||
def classify(self, input_feature_vector):
|
||||
distances = []
|
||||
result: dict = {
|
||||
Concept.LINKS_ABBIEGEN: 0,
|
||||
Concept.RECHTS_ABBIEGEN: 0,
|
||||
Concept.RECHTS_VOR_LINKS: 0,
|
||||
Concept.STOP: 0,
|
||||
Concept.VORFAHRT_GEWAEHREN: 0,
|
||||
Concept.VORFAHRT_STRASSE: 0
|
||||
}
|
||||
for single_fv in self.training_set:
|
||||
single_dist = self.euclid_distance(single_fv.get_vector(), input_feature_vector)
|
||||
distances.append((single_fv.get_concept(), single_dist))
|
||||
|
||||
sorted_distances = sorted(distances, key=lambda tuple: tuple[1])
|
||||
k_nearest = 3
|
||||
interested_distances = sorted_distances[:k_nearest]
|
||||
|
||||
for interested_fv in interested_distances:
|
||||
concept = self.string_to_enum(Concept, interested_fv[0])
|
||||
result[concept] += 1
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
def euclid_distance(self, list_a, list_b):
|
||||
if len(list_a) != len(list_b):
|
||||
raise Exception("Both lists must equal in size!")
|
||||
|
||||
sum = 0
|
||||
for i in range(0, len(list_a)):
|
||||
sum += (list_b[i] - list_a[i]) ** 2
|
||||
|
||||
return math.sqrt(sum)
|
||||
|
||||
def string_to_enum(self, enum_class, enum_string):
|
||||
try:
|
||||
# Split the string to get the enum member name
|
||||
_, member_name = enum_string.split('.')
|
||||
# Use getattr to get the enum member
|
||||
return getattr(enum_class, member_name)
|
||||
except (AttributeError, ValueError) as e:
|
||||
print(f"Error: {e}")
|
||||
return None
|
||||
|
||||
def analyse(self, result, k):
|
||||
sorted_dict_result = {key: value for key, value in sorted(result.items(), key=lambda item: item[1])}
|
||||
for key, amount in sorted_dict_result.items():
|
||||
probability = (amount/k) * 100
|
||||
print(f" Probability of {key} is {probability}%")
|
||||
@@ -2,10 +2,10 @@ import os
|
||||
import csv
|
||||
import cv2 as cv
|
||||
|
||||
from featue_extraction import get_color_percentage, get_raster_color_percentage, get_edges, get_corners, get_contours
|
||||
from feature_vector import FeatureVector
|
||||
from concept import Concept
|
||||
from feature import Feature
|
||||
from util.featue_extraction import get_color_percentage, get_raster_color_percentage, get_edges, get_corners, get_contours
|
||||
from classes.feature_vector import FeatureVector
|
||||
from classes.concept import Concept
|
||||
from classes.feature import Feature
|
||||
|
||||
img_path = os.path.abspath(os.path.join(__file__, "..", "..", "data", "processed"))
|
||||
vector_path = os.path.abspath(os.path.join(__file__, "..", "..", "data", "vectors"))
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
class Learner:
|
||||
# The training method, that changes the internal state of the learner such that
|
||||
# it will classify examples of a similar set (i.e. the testSet better.
|
||||
#
|
||||
# @param trainingSet contains feature vectors and corresponding concepts
|
||||
# to provide experience to learn from
|
||||
|
||||
def learn(trainingSet):
|
||||
pass
|
||||
|
||||
# find the concept of the example from the internal knowledge of the lerner
|
||||
# this method must not consider example.getConcept() at all!!
|
||||
#
|
||||
# @param example: is a feature vector
|
||||
# @return the concept of the examplke as learned by this before
|
||||
def classify(example):
|
||||
pass
|
||||
11
src/main.py
Normal file
11
src/main.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from classes.learner import Learner
|
||||
|
||||
path_to_training_set = '/Users/denysseredenko/StreetsignRecognition/src/feature_vectors.csv'
|
||||
|
||||
learner = Learner()
|
||||
learner.learn(path_to_training_set)
|
||||
|
||||
#TODO: add feature vector
|
||||
distances = learner.classify([0, 27, 0, 9, 0, 64, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 100, 0, 26, 0, 8, 0, 66, 0, 54, 0, 24, 0, 22, 0, 0, 0, 0, 0, 100, 0, 34, 0, 12, 0, 54, 0, 59, 0, 40, 0, 1, 0, 23, 0, 10, 0, 67, 0, 25, 0, 9, 0, 66, 0, 89, 0, 9, 0, 2, 0, 36, 0, 9, 0, 55, 0, 0, 0, 0, 0, 100, 0, 59, 0, 19, 0, 22, 0, 25, 0, 8, 0, 67, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 100, 6, 18, 104])
|
||||
|
||||
learner.analyse(distances, 3)
|
||||
@@ -1,7 +1,7 @@
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
|
||||
from image_processing import raster_image
|
||||
from util.image_processing import raster_image
|
||||
|
||||
blue_lower_hsv = np.array([90, 75, 75])
|
||||
blue_upper_hsv = np.array([130, 255, 255])
|
||||
Reference in New Issue
Block a user