Commit abbacc8b authored by Maria Kleppestø Mcculloch's avatar Maria Kleppestø Mcculloch
Browse files

Final delivery

parent d19bbb8b
......@@ -4,3 +4,5 @@ sim_scores.npy
utils/__pycache__/
DET/
__pycache__/
delivery/
depr/
\ No newline at end of file
from re import S
from webbrowser import get
import numpy as np
from DET.DET import DET
import matplotlib.pyplot as mpl
from matplotlib import pyplot as plt
from scipy.special import erfinv
......@@ -18,37 +14,37 @@ from scipy.special import erfinv
Also called ERC (error versus reject)
'''
class EDC:
def __init__(self, lowerbound, higherbound, fixed_fmnr_threshold = 0.3, metricname="") -> None:
def __init__(self, lowerbound, higherbound, fixed_fmnr_threshold = 0.3, metricname="", style="-") -> None:
self.lowb = lowerbound
self.highb = higherbound
self.threshold = fixed_fmnr_threshold
self.style = style
if metricname != "": self.title = "EDC-Curve for " + metricname
else: self.title = "EDC-Curve"
pass
'''
Calcualte the fnmr
'''
def _fmnr(self, sims):
sims_sorted_inx = np.argsort(sims, kind='mergesort')
sims_sorted = sims[sims_sorted_inx]
# FMNR
length= len(sims_sorted)
inx = 0
breaks = False
while(inx < length and not breaks ):
if(sims_sorted[inx] > self.threshold):
breaks = True
inx =inx +1
fnmr = (inx-1)/length
return fnmr
'''
Plot the EDC
'''
def plotEDC(self, gens, qual_gens):
# Init starting values
x = []
y = []
......@@ -64,10 +60,9 @@ class EDC:
x.append(0)
y.append(self._fmnr(gens))
print(" j " , self._fmnr(gens))
indx = 0
# Run for all combinations of (u,v)
# Run for all combinations of (u,v)
for u in range(self.lowb+1, self.highb + 1):
## Calculate for (u, lower_bound)
......@@ -78,17 +73,17 @@ class EDC:
r_0, fnmr, q_inx_spliced = self.discardValues(round_gens, round_quals[:, 0], u)
totRemoved = totRemoved + r_0
totRemoved_v_round = totRemoved
# Set new scores
# new_gens = curr_gen[u-self.lowb-1][new_indices]
#new_quals = curr_quals[u-self.lowb-1][new_indices]
new_quals = round_quals[q_inx_spliced]
new_gens = round_gens[q_inx_spliced]
curr_gen.append([new_gens])
curr_quals.append([new_quals])
# Calculate r
# Calculate rejection rate
r = totRemoved/totnumValues
# Append
x.append(r)
y.append(fnmr)
print("(",u, ", ", 1, ") -> FNMR: ",fnmr, " || ", len(q_inx_spliced))
......@@ -98,20 +93,18 @@ class EDC:
# Remove values
# Calculate r and v on those left
round_gens = curr_gen[u-self.lowb][v-self.lowb-1]
round_quals = curr_quals[u-self.lowb][v-self.lowb-1]
# calculate the rejection rate
r_0, fnmr,q_inx_spliced = self.discardValues(round_gens, round_quals[:, 1], v)
totRemoved_v_round = totRemoved_v_round + r_0
r = totRemoved_v_round/totnumValues
# Append
x.append(r)
y.append(fnmr)
print("(",u, ", ", v, ") -> FNMR: ",fnmr, " || ", len(q_inx_spliced))
#print("(",u, ", ", v, ") -> FNMR: ",fnmr, " || ", len(q_inx_spliced))
new_gens = round_gens[q_inx_spliced]
......@@ -121,25 +114,27 @@ class EDC:
curr_quals[u-self.lowb].append(new_quals)
# Format x,y
x = np.array(x)
y = np.array(y)
print(x)
print(y)
print("\n")
#print(x)
#print(y)
#print("\n")
indx = np.argsort(x, axis=0)
x_sort = x[indx]
y_sort = y[indx]
print(x_sort)
print(y_sort)
# Sort for x
mpl.plot(x_sort, y_sort, label="label", color=(0.3, 0.3, 0.0), linestyle='-', linewidth=1)
#print(x_sort)
#print(y_sort)
mpl.plot(x_sort, y_sort, label="label", color=(0.3, 0.3, 0.0), linestyle=self.style, linewidth=1)
return
'''
Removes values based one one quality value
'''
def discardValues(self, sims, quals, level):
total_scores = len(sims)
......@@ -176,51 +171,18 @@ class EDC:
return total_scores- tot_not_rejected, fnmr, q_inx_spliced
'''
Init figure
'''
def setup(self):
"""
Creates empty DET plot figure
"""
self.figure = mpl.figure()
'''
ax = self.figure.add_subplot(111)
ax.set_aspect('equal')
mpl.axis([
self.axes_transform(self.x_limits[0]),
self.axes_transform(self.x_limits[1]),
self.axes_transform(self.y_limits[0]),
self.axes_transform(self.y_limits[1])])
ax.set_xticks(self.axes_transform(self.x_ticks))
ax.set_xticklabels(self.x_ticklabels, size='x-small')
ax.set_yticks(self.axes_transform(self.y_ticks))
ax.set_yticklabels(self.y_ticklabels, size='x-small')
mpl.grid(True) # grid_color = '#b0b0b0'
'''
self.figure = mpl.figure()
mpl.xlabel("Percentage Removed")
mpl.ylabel("FNMR (False Non Match Rate)")
mpl.title(self.title)
'''
mpl.gca().set_xlim(
left = self.axes_transform(self.x_limits[0]),
right = self.axes_transform(self.x_limits[1])
)
mpl.gca().set_ylim(
bottom = self.axes_transform(self.y_limits[0]),
top = self.axes_transform(self.y_limits[1])
)
mpl.gca().set_aspect('equal')
'''
'''
Show EDC plot
'''
def showEDC(self):
# Show curve
mpl.show()
......
# Tested with Python 3.8.3
import os # Only used in usage_example
import sys # Only used in cli_main
import cv2 # Tested with opencv-python 4.4.0.44
import dlib # Tested with dlib 19.22.0
import numpy as np # Tested with numpy 1.18.5
def cli_main():
"""CLI entry function for usage_example"""
usage_example(
image_dir=sys.argv[1]
if len(sys.argv) > 1 else "images",
dlib_landmark_detector_path=sys.argv[2]
if len(sys.argv) > 2 else
"shape_predictor_68_face_landmarks.dat"
)
def usage_example(image_dir: str, dlib_landmark_detector_path: str):
"""The dlib_landmark_detector_path should point to the shape_predictor_68_face_landmarks.dat model
file, which is available here: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
Note that the license of this model's training data excludes commercial use.
You can find additional information in the general Dlib landmark detector Python example,
available at: http://dlib.net/face_landmark_detection.py.html
"""
dlib_landmark_detector = dlib.shape_predictor(dlib_landmark_detector_path)
image_filenames = os.listdir(image_dir)
quality_scores = []
for image_filename in image_filenames:
image_path = os.path.join(image_dir, image_filename)
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
quality_score = compute_illumination_uniformity(image, dlib_landmark_detector)
quality_scores.append(quality_score)
print(f"Count={len(quality_scores)}"
f" Min={np.min(quality_scores)} Max={np.max(quality_scores)} Mean={np.mean(quality_scores)}")
def compute_illumination_uniformity(image, dlib_landmark_detector):
# Note that this example assumes that the image is already cropped to the face region.
# Step 1: Find the eye center points and the inter-eye center point.
dlib_landmarks = dlib_landmark_detector(image, dlib.rectangle(0, 0, image.shape[1],
image.shape[0]))
left_eye_center = (dlib_landmarks.part(37) + dlib_landmarks.part(40)) * 0.5
right_eye_center = (dlib_landmarks.part(43) + dlib_landmarks.part(46)) * 0.5
inter_eye_center = (left_eye_center + right_eye_center) * 0.5
# Step 2: Determine the slope and intercept of the dividing line.
slope = get_slope(left_eye_center, right_eye_center)
if slope != 0:
intercept = inter_eye_center.y - (slope * inter_eye_center.x)
else:
intercept = inter_eye_center.x
# Step 3: Establish disjunct per-pixel masks for the left and right half.
y_values = np.zeros(image.shape[:2])
if slope != 0:
y_values[:, :] = np.arange(image.shape[0]).reshape(image.shape[0], 1)
right_mask = y_values > (np.arange(image.shape[1]) * slope + intercept)
else:
y_values[:, :] = np.arange(image.shape[0])
right_mask = y_values > intercept
left_mask = np.logical_not(right_mask)
# Step 4: Compute the normalized histograms for the halves.
left_histogram = get_normalized_histogram(image, left_mask)
right_histogram = get_normalized_histogram(image, right_mask)
# Step 5: Return the scaled sum of the element-wise minimum of the normalized histograms as the
#quality score.
min_histogram = np.minimum(left_histogram, right_histogram)
quality_score = int(100 * np.sum(min_histogram))
return quality_score
def get_slope(point1, point2):
return -(point2.x - point1.x) / (point2.y - point1.y) if point2.y != point1.y else 0
def get_normalized_histogram(image, mask):
histogram = cv2.calcHist([image], [0], mask.astype(np.uint8), [256], [0, 256])
histogram = histogram.flatten()
histogram /= np.sum(histogram)
return histogram
if __name__ == "__main__":
cli_main()
\ No newline at end of file
from qualityEvaluation import QualityTemp
from utils.baseScore import BaseHandler
import numpy as np
# Deals with similarity scores
bh = BaseHandler(51, 5)
qe = QualityTemp()
#qual_scores = qe.getBrisqueValues()
#qual_scores_f = qe.getFocusValues()
#qual_scores_s_p = qe.getSharpnessValues("P")
#qual_scores_s_g = qe.getSharpnessValues("gp")
#qual_scores_s_psp = qe.getSharpnessValues("psp")
qual_scores = np.load('data/quality_focus_norm_0.npy')
sim_scores = np.load('data/sim_scores_0_noise.npy')
print("Length: ", len(qual_scores))
'''
sim_scores_p = np.load('data/sim_scores.npy')
sim_scores_psp = np.load('data/sim_scores_psp_noise.npy')
gens_p = qe.getAllGenuine(bh, sim_scores_p)
imps_p = qe.getAllImposters(bh, sim_scores_p)
'''
gens_g = qe.getAllGenuine(bh, sim_scores)
imps_g = qe.getAllImposters(bh, sim_scores)
print("Number of imps: ", len(imps_g))
'''
gens_psp = qe.getAllGenuine(bh, sim_scores_psp)
imps_psp = qe.getAllImposters(bh, sim_scores_psp)
gen_score_aggr = [gens_p,gens_g,gens_psp]
imp_score_aggr = [imps_p,imps_g, imps_psp]
gens = []
imps = []
for i in range(len(gen_score_aggr)):
gg = gen_score_aggr[i]
ii = imp_score_aggr[i]
for j in range(len(gg)):
gens.append(gg[j])
for j in range(len(ii)):
imps.append(ii[j])
'''
# Balck Purple Blue DarkGreen Red LightGreen BlackDots
colour = [ "Kitty", "Disco", "Rabbit","Dog" , "Card", "Mickey", "Mask"]
#print(qual_scores.shape, " ", max(qual_scores), " ", min(qual_scores))
# Genereate sim scores
for i in range(1, 6):
#gq,iq = qe.removeLowQualValues(sim_scores, qual_scores, bh, i)
g_p,i_p = qe.removeLowQualValues(sim_scores, qual_scores, bh, i)
if len(i_p) != 0 and len(g_p) != 0:
print(i, ":")
print("Have gorund: " ,max(i_p) > min(i_p))
print(len(g_p), " ", len(i_p))
qe.plotDET(g_p, i_p, colour[i] )
else:
print(i, ": Empty: ", len(g_p), " || ", len(i_p))
'''
gens, imps = gens_g, imps_g
print(len(gens), " ", len(imps))
print(max(imps), " " , min(gens))
qe.plotDET(gens, imps,"Kitty" )
'''
qe.showDET()
import imp
import cv2
import numpy as np
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
from qualityEvaluation import QualityEvaluator
# Vars
#path = '/Users/krunal/Desktop/code/database'
projectpath = 'C:/Users/maria/Desktop/Biometrics/TermPaper/imt4126-biometricqualitymetrics/'
imgsdir = 'img/Casia_Converted_Noise/Gaussian_Salted/'
imgsdir_other = 'img/Casia_Converted/'
imgsdir_other_other = 'img/Casia_Converted_Noise/ScrambledEggs_l/'
params = [51, 5]
# Init Evaulator
QualEval = QualityEvaluator("ArcFaceCasiaV2")
QualEval.ArcFace_init()
# Read in dataset
QualEval.readInDataset(params, projectpath, imgsdir, "subjectnr/subjectnr_samplenr", "ArcFaceCasia", True ) # V5 eller V2
# TODO change names
imgsnr = 51
change_me = 5
img_obs = QualEval.getImgObs()
class img_o:
def __init__(self, img, face, name ):
self.img = img
self.face = face
self.feat = None
self.name = name
'''
# Set up detector and recognizer
app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
#gc.collect()
handler = insightface.model_zoo.get_model('buffalo_l')
handler.prepare(ctx_id=0)
# Read in images, faces and features
for i in range (0, imgsnr):
subjectnr = '00' + str(i) if i<10 else '0' + str(i)
subjectdir = subjectnr + '/'
print("Path" + imgsdir + subjectdir)
img_obs.append([])
print("Read in for: " + str(i) + "-th subject")
for j in range(0, change_me):
if j == 0: fullpath = projectpath + imgsdir_other + subjectdir
elif j ==1: fullpath = projectpath + imgsdir_other_other + subjectdir
else: fullpath = projectpath + imgsdir + subjectdir
#fullpath = projectpath + imgsdir + subjectdir
imgname = subjectnr + '_' + str(j)
#print(subjectdir + name)
#convert(projectpath + 'Casia/' + subjectdir + imgname + ".bmp", imgname + ".jpg", fullpath )
image = ins_get_image(fullpath + imgname)
face = app.get(image)
imgob = img_o(image, face, imgname)
imgob.feat = handler.get(imgob.img, imgob.face[0])
img_obs[i].append(imgob)
'''
handler = QualEval.handler
app = QualEval.app
# Calculate sim scores
sim_scores = []
for i in range(0, len(img_obs)):
print("Compute sim scores for: " + str(i) + "-th subject")
for j in range(0, len(img_obs[i])):
pin = j
for k in range(i, len(img_obs)):
for l in range (pin, len(img_obs[k])):
sim_scores.append([])
pin = len(sim_scores)
sim_scores[pin-1].append(handler.compute_sim(img_obs[i][j].feat, img_obs[k][l].feat ))
sim_scores[pin-1].append(img_obs[i][j].name + " x " + img_obs[k][l].name)
pin = 0
#bh = BaseHandler(imgsnr, change_me)
# Save to file
new_array = np.array(sim_scores)
spath = "data/sim_scores_0_noise.npy"
print("Saving to: ", spath)
#np.save(spath, new_array)
function computeBrisque()
fullpath = "img/Casia_Converted";
imgsdir = "img/Casia_Converted_Noise/Gaussian_Salted/"
imgsdir_other = "img/Casia_Converted/"
imgsdir_other_other = "img/Casia_Converted_Noise/ScrambledEggs_l/"
% For all subjects
for i =1:51
if (i-1) <10
nr = "0" + (i-1);
else
nr = "" + (i-1);
end
sub_nr = "0" + nr;
for j=1:5
if (j == 1)
high_path = imgsdir_other;
elseif(j ==2)
high_path = imgsdir_other_other ;
else
high_path = imgsdir;
end
path = high_path + '/' + sub_nr + '/' + sub_nr + '_' + (j-1) + '.jpg';
img = imread(path);
quality_scores(i,j) = brisque(img);
end
end
norm_scores = Normalize_Q(quality_scores);
%disp(max(max(quality_scores)))
disp(sum(norm_scores))
disp(sum(norm_scores')')
writematrix( norm_scores, "data/brisque/quality_values.csv")
end
function q= Normalize_Q(qualityscores)
m = 1;
v = [0,0,0,0,0,0,0];
for i=1:51
for j=1:5
if qualityscores(i,j)< 38 && qualityscores(i,j) >= 34
m = 1;
elseif qualityscores(i,j)< 42 && qualityscores(i,j) >= 38
m = 2;
elseif qualityscores(i,j)< 44 && qualityscores(i,j) >= 42
m = 3;
elseif qualityscores(i,j)< 46 && qualityscores(i,j) >= 44
m = 4;
elseif qualityscores(i,j)< 48 && qualityscores(i,j) >= 46
m = 5;
elseif qualityscores(i,j) >= 48
m = 6;
disp("Hello")
end
v(m) = v(m) +1;
q(i,j) = m;
end
end
disp(v)
end
\ No newline at end of file
import cv2
import numpy as np
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
from qualityEvaluation import QualityAssesmenet, Image_Preparator
#projectpath = 'C:/Users/maria/Desktop/Biometrics/TermPaper/imt4126-biometricqualitymetrics/'
#imgsdir = 'img/Casia_Converted_Noise/Gaussian_Poisson/'
#imgsdir = 'img/Casia_Converted/'
#fullpath = projectpath + "img1" #imgsdir + "001/001_1"
#fullpath = projectpath + imgsdir + "001/001_1"
def calculate(projectpath, subnr, samplenr, flavor, app):
# Variables
q = QualityAssesmenet()
d = Image_Preparator(app)
f_list = []
s_list = []
# For each subject
for i in range (0, subnr):
subjectnr = '00' + str(i) if i<10 else '0' + str(i)
subjectdir = subjectnr + '/'
# Print("Path" + imgsdir + subjectdir)
print("Read in for: " + str(i) + "-th subject")
# Focus values
f_list.append([])
# Sharpness values
s_list.append([])
for j in range(0, samplenr):