add OCRBench v2
This commit is contained in:
0
OCRBench_v2/eval_scripts/spotting_eval/__init__.py
Normal file
0
OCRBench_v2/eval_scripts/spotting_eval/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/spotting_eval/gt.zip
Normal file
BIN
OCRBench_v2/eval_scripts/spotting_eval/gt.zip
Normal file
Binary file not shown.
6
OCRBench_v2/eval_scripts/spotting_eval/gt/gt_img_0.txt
Normal file
6
OCRBench_v2/eval_scripts/spotting_eval/gt/gt_img_0.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
442,380,507,380,507,399,442,399,CHEROKEE
|
||||
506,380,547,380,547,397,506,397,STREET
|
||||
481,399,536,399,536,417,481,417,BIKES
|
||||
443,425,469,425,469,438,443,438,###
|
||||
471,425,505,425,505,438,471,438,###
|
||||
513,425,543,425,543,439,513,439,###
|
26
OCRBench_v2/eval_scripts/spotting_eval/readme.txt
Normal file
26
OCRBench_v2/eval_scripts/spotting_eval/readme.txt
Normal file
@@ -0,0 +1,26 @@
|
||||
INSTRUCTIONS FOR THE STANDALONE SCRIPTS
|
||||
Requirements:
|
||||
- Python version 3.
|
||||
- Each Task requires different Python modules. When running the script, if some module is not installed you will see a notification and installation instructions.
|
||||
|
||||
Procedure:
|
||||
Download the ZIP file for the requested script and unzip it to a directory.
|
||||
|
||||
Open a terminal in the directory and run the command:
|
||||
python script.py –g=gt.zip –s=submit.zip
|
||||
|
||||
If you have already installed all the required modules, then you will see the method’s results or an error message if the submitted file is not correct.
|
||||
|
||||
If a module is not present, you should install them with PIP: pip install 'module'
|
||||
|
||||
In case of Polygon module, use: 'pip install Polygon3'
|
||||
|
||||
parameters:
|
||||
-g: Path of the Ground Truth file. In most cases, the Ground Truth will be included in the same Zip file named 'gt.zip', gt.txt' or 'gt.json'. If not, you will be able to get it on the Downloads page of the Task.
|
||||
-s: Path of your method's results file.
|
||||
|
||||
Optional parameters:
|
||||
-o: Path to a directory where to copy the file ‘results.zip’ that contains per-sample results.
|
||||
-p: JSON string parameters to override the script default parameters. The parameters that can be overrided are inside the function 'default_evaluation_params' located at the begining of the evaluation Script.
|
||||
|
||||
Example: python script.py –g=gt.zip –s=submit.zip –o=./ -p={\"IOU_CONSTRAINT\":0.8}
|
BIN
OCRBench_v2/eval_scripts/spotting_eval/results.zip
Normal file
BIN
OCRBench_v2/eval_scripts/spotting_eval/results.zip
Normal file
Binary file not shown.
@@ -0,0 +1,456 @@
|
||||
#!/usr/bin/env python3
|
||||
#encoding: UTF-8
|
||||
|
||||
#File: rrc_evaluation_funcs_1_1.py
|
||||
#Version: 1.1
|
||||
#Version info: changes for Python 3
|
||||
#Date: 2019-12-29
|
||||
#Description: File with useful functions to use by the evaluation scripts in the RRC website.
|
||||
|
||||
import json
|
||||
import sys;
|
||||
sys.path.append('./')
|
||||
import zipfile
|
||||
import re
|
||||
import os
|
||||
import importlib
|
||||
|
||||
def print_help():
|
||||
sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0])
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def load_zip_file_keys(file,fileNameRegExp=''):
|
||||
"""
|
||||
Returns an array with the entries of the ZIP file that match with the regular expression.
|
||||
The key's are the names or the file or the capturing group definied in the fileNameRegExp
|
||||
"""
|
||||
try:
|
||||
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
|
||||
except :
|
||||
raise Exception('Error loading the ZIP archive.')
|
||||
|
||||
pairs = []
|
||||
|
||||
for name in archive.namelist():
|
||||
addFile = True
|
||||
keyName = name
|
||||
if fileNameRegExp!="":
|
||||
m = re.match(fileNameRegExp,name)
|
||||
if m == None:
|
||||
addFile = False
|
||||
else:
|
||||
if len(m.groups())>0:
|
||||
keyName = m.group(1)
|
||||
|
||||
if addFile:
|
||||
pairs.append( keyName )
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def load_zip_file(file,fileNameRegExp='',allEntries=False):
|
||||
"""
|
||||
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
|
||||
The key's are the names or the file or the capturing group definied in the fileNameRegExp
|
||||
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
|
||||
"""
|
||||
try:
|
||||
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
|
||||
except :
|
||||
raise Exception('Error loading the ZIP archive')
|
||||
|
||||
pairs = []
|
||||
for name in archive.namelist():
|
||||
addFile = True
|
||||
keyName = name
|
||||
if fileNameRegExp!="":
|
||||
m = re.match(fileNameRegExp,name)
|
||||
if m == None:
|
||||
addFile = False
|
||||
else:
|
||||
if len(m.groups())>0:
|
||||
keyName = m.group(1)
|
||||
|
||||
if addFile:
|
||||
pairs.append( [ keyName , archive.read(name)] )
|
||||
else:
|
||||
if allEntries:
|
||||
raise Exception('ZIP entry not valid: %s' %name)
|
||||
|
||||
return dict(pairs)
|
||||
|
||||
def decode_utf8(raw):
|
||||
"""
|
||||
Returns a Unicode object on success, or None on failure
|
||||
"""
|
||||
try:
|
||||
return raw.decode('utf-8-sig',errors = 'replace')
|
||||
except:
|
||||
return None
|
||||
|
||||
def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
|
||||
"""
|
||||
This function validates that all lines of the file calling the Line validation function for each line
|
||||
"""
|
||||
utf8File = decode_utf8(file_contents)
|
||||
if (utf8File is None) :
|
||||
raise Exception("The file %s is not UTF-8" %fileName)
|
||||
|
||||
lines = utf8File.split( "\r\n" if CRLF else "\n" )
|
||||
for line in lines:
|
||||
line = line.replace("\r","").replace("\n","")
|
||||
if(line != ""):
|
||||
try:
|
||||
validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
|
||||
except Exception as e:
|
||||
raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
|
||||
|
||||
|
||||
|
||||
def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
|
||||
"""
|
||||
Validate the format of the line. If the line is not valid an exception will be raised.
|
||||
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
||||
Posible values are:
|
||||
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
|
||||
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
|
||||
"""
|
||||
get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
|
||||
|
||||
|
||||
def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
|
||||
"""
|
||||
Validate the format of the line. If the line is not valid an exception will be raised.
|
||||
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
||||
Posible values are:
|
||||
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
|
||||
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
|
||||
Returns values from a textline. Points , [Confidences], [Transcriptions]
|
||||
"""
|
||||
confidence = 0.0
|
||||
transcription = "";
|
||||
points = []
|
||||
|
||||
numPoints = 4;
|
||||
|
||||
if LTRB:
|
||||
|
||||
numPoints = 4;
|
||||
|
||||
if withTranscription and withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
|
||||
elif withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
|
||||
elif withTranscription:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
|
||||
else:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
|
||||
|
||||
xmin = int(m.group(1))
|
||||
ymin = int(m.group(2))
|
||||
xmax = int(m.group(3))
|
||||
ymax = int(m.group(4))
|
||||
if(xmax<xmin):
|
||||
raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax))
|
||||
if(ymax<ymin):
|
||||
raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax))
|
||||
|
||||
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
|
||||
|
||||
if (imWidth>0 and imHeight>0):
|
||||
validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
|
||||
validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
|
||||
|
||||
else:
|
||||
|
||||
numPoints = 8;
|
||||
|
||||
if withTranscription and withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
|
||||
elif withConfidence:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
|
||||
elif withTranscription:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
|
||||
else:
|
||||
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
|
||||
if m == None :
|
||||
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
|
||||
|
||||
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
|
||||
|
||||
validate_clockwise_points(points)
|
||||
|
||||
if (imWidth>0 and imHeight>0):
|
||||
validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
|
||||
validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
|
||||
validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
|
||||
validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
|
||||
|
||||
|
||||
if withConfidence:
|
||||
try:
|
||||
confidence = float(m.group(numPoints+1))
|
||||
except ValueError:
|
||||
raise Exception("Confidence value must be a float")
|
||||
|
||||
if withTranscription:
|
||||
posTranscription = numPoints + (2 if withConfidence else 1)
|
||||
transcription = m.group(posTranscription)
|
||||
m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
|
||||
if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
|
||||
transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
|
||||
|
||||
return points,confidence,transcription
|
||||
|
||||
def get_tl_dict_values(detection,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,validNumPoints=[],validate_cw=True):
|
||||
"""
|
||||
Validate the format of the dictionary. If the dictionary is not valid an exception will be raised.
|
||||
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
|
||||
Posible values:
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]]}
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"illegibility":false}
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"dontCare":false}
|
||||
Returns values from the dictionary. Points , [Confidences], [Transcriptions]
|
||||
"""
|
||||
confidence = 0.0
|
||||
transcription = "";
|
||||
points = []
|
||||
|
||||
if isinstance(detection, dict) == False :
|
||||
raise Exception("Incorrect format. Object has to be a dictionary")
|
||||
|
||||
if not 'points' in detection:
|
||||
raise Exception("Incorrect format. Object has no points key)")
|
||||
|
||||
if isinstance(detection['points'], list) == False :
|
||||
raise Exception("Incorrect format. Object points key have to be an array)")
|
||||
|
||||
num_points = len(detection['points'])
|
||||
|
||||
if num_points<3 :
|
||||
raise Exception("Incorrect format. Incorrect number of points. At least 3 points are necessary. Found: " + str(num_points))
|
||||
|
||||
if(len(validNumPoints)>0 and num_points in validNumPoints == False ):
|
||||
raise Exception("Incorrect format. Incorrect number of points. Only allowed 4,8 or 12 points)")
|
||||
|
||||
for i in range(num_points):
|
||||
if isinstance(detection['points'][i], list) == False :
|
||||
raise Exception("Incorrect format. Point #" + str(i+1) + " has to be an array)")
|
||||
|
||||
if len(detection['points'][i]) != 2 :
|
||||
raise Exception("Incorrect format. Point #" + str(i+1) + " has to be an array with 2 objects(x,y) )")
|
||||
|
||||
if isinstance(detection['points'][i][0], (int,float) ) == False or isinstance(detection['points'][i][1], (int,float) ) == False :
|
||||
raise Exception("Incorrect format. Point #" + str(i+1) + " childs have to be Integers)")
|
||||
|
||||
if (imWidth>0 and imHeight>0):
|
||||
validate_point_inside_bounds(detection['points'][i][0],detection['points'][i][1],imWidth,imHeight);
|
||||
|
||||
points.append(float(detection['points'][i][0]))
|
||||
points.append(float(detection['points'][i][1]))
|
||||
|
||||
if validate_cw :
|
||||
validate_clockwise_points(points)
|
||||
|
||||
if withConfidence:
|
||||
if not 'confidence' in detection:
|
||||
raise Exception("Incorrect format. No confidence key)")
|
||||
|
||||
if isinstance(detection['confidence'], (int,float)) == False :
|
||||
raise Exception("Incorrect format. Confidence key has to be a float)")
|
||||
|
||||
if detection['confidence']<0 or detection['confidence']>1 :
|
||||
raise Exception("Incorrect format. Confidence key has to be a float between 0.0 and 1.0")
|
||||
|
||||
confidence = detection['confidence']
|
||||
|
||||
if withTranscription:
|
||||
if not 'transcription' in detection:
|
||||
raise Exception("Incorrect format. No transcription key)")
|
||||
|
||||
if isinstance(detection['transcription'], str) == False :
|
||||
raise Exception("Incorrect format. Transcription has to be a string. Detected: " + type(detection['transcription']).__name__ )
|
||||
|
||||
transcription = detection['transcription']
|
||||
|
||||
if 'illegibility' in detection: #Ensures that if illegibility atribute is present and is True the transcription is set to ### (don't care)
|
||||
if detection['illegibility'] == True:
|
||||
transcription = "###"
|
||||
|
||||
if 'dontCare' in detection: #Ensures that if dontCare atribute is present and is True the transcription is set to ### (don't care)
|
||||
if detection['dontCare'] == True:
|
||||
transcription = "###"
|
||||
|
||||
return points,confidence,transcription
|
||||
|
||||
def validate_point_inside_bounds(x,y,imWidth,imHeight):
|
||||
if(x<0 or x>imWidth):
|
||||
raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
|
||||
if(y<0 or y>imHeight):
|
||||
raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
|
||||
|
||||
def validate_clockwise_points(points):
|
||||
"""
|
||||
Validates that the points are in clockwise order.
|
||||
"""
|
||||
edge = []
|
||||
for i in range(len(points)//2):
|
||||
edge.append( (int(points[(i+1)*2 % len(points)]) - int(points[i*2])) * (int(points[ ((i+1)*2+1) % len(points)]) + int(points[i*2+1])) )
|
||||
if sum(edge)>0:
|
||||
raise Exception("Points are not clockwise. The coordinates of bounding points have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
|
||||
|
||||
def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
|
||||
"""
|
||||
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
|
||||
xmin,ymin,xmax,ymax,[confidence],[transcription]
|
||||
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
|
||||
"""
|
||||
pointsList = []
|
||||
transcriptionsList = []
|
||||
confidencesList = []
|
||||
|
||||
lines = content.split( "\r\n" if CRLF else "\n" )
|
||||
for line in lines:
|
||||
line = line.replace("\r","").replace("\n","")
|
||||
if(line != "") :
|
||||
points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
|
||||
pointsList.append(points)
|
||||
transcriptionsList.append(transcription)
|
||||
confidencesList.append(confidence)
|
||||
|
||||
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
|
||||
import numpy as np
|
||||
sorted_ind = np.argsort(-np.array(confidencesList))
|
||||
confidencesList = [confidencesList[i] for i in sorted_ind]
|
||||
pointsList = [pointsList[i] for i in sorted_ind]
|
||||
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
|
||||
|
||||
return pointsList,confidencesList,transcriptionsList
|
||||
|
||||
def get_tl_dict_values_from_array(array,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True,validNumPoints=[],validate_cw=True):
|
||||
"""
|
||||
Returns all points, confindences and transcriptions of a file in lists. Valid dict formats:
|
||||
{"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4}
|
||||
"""
|
||||
pointsList = []
|
||||
transcriptionsList = []
|
||||
confidencesList = []
|
||||
|
||||
for n in range(len(array)):
|
||||
objectDict = array[n]
|
||||
points, confidence, transcription = get_tl_dict_values(objectDict,withTranscription,withConfidence,imWidth,imHeight,validNumPoints,validate_cw);
|
||||
pointsList.append(points)
|
||||
transcriptionsList.append(transcription)
|
||||
confidencesList.append(confidence)
|
||||
|
||||
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
|
||||
import numpy as np
|
||||
sorted_ind = np.argsort(-np.array(confidencesList))
|
||||
confidencesList = [confidencesList[i] for i in sorted_ind]
|
||||
pointsList = [pointsList[i] for i in sorted_ind]
|
||||
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
|
||||
|
||||
return pointsList,confidencesList,transcriptionsList
|
||||
|
||||
def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
|
||||
"""
|
||||
This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
|
||||
Params:
|
||||
p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
|
||||
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
|
||||
validate_data_fn: points to a method that validates the corrct format of the submission
|
||||
evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
|
||||
"""
|
||||
|
||||
if (p == None):
|
||||
p = dict([s[1:].split('=') for s in sys.argv[1:]])
|
||||
if(len(sys.argv)<3):
|
||||
print_help()
|
||||
|
||||
evalParams = default_evaluation_params_fn()
|
||||
if 'p' in p.keys():
|
||||
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) )
|
||||
|
||||
resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
|
||||
try:
|
||||
validate_data_fn(p['g'], p['s'], evalParams)
|
||||
evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
|
||||
resDict.update(evalData)
|
||||
|
||||
except Exception as e:
|
||||
resDict['Message']= str(e)
|
||||
resDict['calculated']=False
|
||||
|
||||
if 'o' in p:
|
||||
if not os.path.exists(p['o']):
|
||||
os.makedirs(p['o'])
|
||||
|
||||
resultsOutputname = p['o'] + '/results.zip'
|
||||
outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
|
||||
|
||||
del resDict['per_sample']
|
||||
if 'output_items' in resDict.keys():
|
||||
del resDict['output_items']
|
||||
|
||||
outZip.writestr('method.json',json.dumps(resDict))
|
||||
|
||||
if not resDict['calculated']:
|
||||
if show_result:
|
||||
sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n')
|
||||
if 'o' in p:
|
||||
outZip.close()
|
||||
return resDict
|
||||
|
||||
if 'o' in p:
|
||||
if per_sample == True:
|
||||
for k,v in evalData['per_sample'].items():
|
||||
outZip.writestr( k + '.json',json.dumps(v))
|
||||
|
||||
if 'output_items' in evalData.keys():
|
||||
for k, v in evalData['output_items'].items():
|
||||
outZip.writestr( k,v)
|
||||
|
||||
outZip.close()
|
||||
|
||||
if show_result:
|
||||
sys.stdout.write("Calculated!")
|
||||
sys.stdout.write(json.dumps(resDict['method']))
|
||||
|
||||
return resDict
|
||||
|
||||
|
||||
def main_validation(default_evaluation_params_fn,validate_data_fn):
|
||||
"""
|
||||
This process validates a method
|
||||
Params:
|
||||
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
|
||||
validate_data_fn: points to a method that validates the corrct format of the submission
|
||||
"""
|
||||
try:
|
||||
p = dict([s[1:].split('=') for s in sys.argv[1:]])
|
||||
evalParams = default_evaluation_params_fn()
|
||||
if 'p' in p.keys():
|
||||
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) )
|
||||
|
||||
validate_data_fn(p['g'], p['s'], evalParams)
|
||||
print ('SUCCESS')
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print (str(e))
|
||||
sys.exit(101)
|
451
OCRBench_v2/eval_scripts/spotting_eval/script.py
Normal file
451
OCRBench_v2/eval_scripts/spotting_eval/script.py
Normal file
@@ -0,0 +1,451 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# encoding=utf8
|
||||
#File: E2E_iou_1_1.py
|
||||
#Version: 1.1
|
||||
#Version info: changes for Python 3
|
||||
#Date: 2019-12-29
|
||||
#Description: Evaluation script that computes End to End Recognition. For Text Localization it's used Intersection over Union criteria.
|
||||
#Average Precision is also calcuted when 'CONFIDENCES' parameter is True
|
||||
#There are 2 modes to determine if a detection is correct or not:
|
||||
#with Word Spotting: The detected word must coincide (ingnoring case) to a filtered Ground Truth containing only dictionary words (see include_in_dictionary and include_in_dictionary_transcription functions)
|
||||
#without Word Spotting: words must be equal excluding a set of special characters
|
||||
|
||||
from collections import namedtuple
|
||||
import spotting_eval.rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs
|
||||
import importlib
|
||||
|
||||
def evaluation_imports():
|
||||
"""
|
||||
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
|
||||
"""
|
||||
return {
|
||||
'Polygon':'plg',
|
||||
'numpy':'np'
|
||||
}
|
||||
|
||||
def default_evaluation_params():
|
||||
"""
|
||||
default_evaluation_params: Default parameters to use for the validation and evaluation.
|
||||
"""
|
||||
return {
|
||||
'IOU_CONSTRAINT' :0.5,
|
||||
'AREA_PRECISION_CONSTRAINT' :0.5,
|
||||
'WORD_SPOTTING' :False,
|
||||
'MIN_LENGTH_CARE_WORD' :3,
|
||||
'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt',
|
||||
'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt',
|
||||
'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
|
||||
'CRLF':False, # Lines are delimited by Windows CRLF format
|
||||
'CONFIDENCES':False, #Detections must include confidence value. AP will be calculated,
|
||||
'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'',
|
||||
'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True
|
||||
}
|
||||
|
||||
def validate_data(gtFilePath, submFilePath, evaluationParams):
|
||||
"""
|
||||
Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
|
||||
Validates also that there are no missing files in the folder.
|
||||
If some error detected, the method raises the error
|
||||
"""
|
||||
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
|
||||
|
||||
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
|
||||
|
||||
#Validate format of GroundTruth
|
||||
for k in gt:
|
||||
rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True)
|
||||
|
||||
#Validate format of results
|
||||
for k in subm:
|
||||
if (k in gt) == False :
|
||||
raise Exception("The sample %s not present in GT" %k)
|
||||
|
||||
rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
|
||||
|
||||
|
||||
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
|
||||
"""
|
||||
Method evaluate_method: evaluate method and returns the results
|
||||
Results. Dictionary with the following values:
|
||||
- method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
|
||||
- samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
|
||||
"""
|
||||
for module,alias in evaluation_imports().items():
|
||||
globals()[alias] = importlib.import_module(module)
|
||||
|
||||
def polygon_from_points(points,correctOffset=False):
|
||||
"""
|
||||
Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
|
||||
"""
|
||||
|
||||
if correctOffset: #this will substract 1 from the coordinates that correspond to the xmax and ymax
|
||||
points[2] -= 1
|
||||
points[4] -= 1
|
||||
points[5] -= 1
|
||||
points[7] -= 1
|
||||
|
||||
resBoxes=np.empty([1,8],dtype='int32')
|
||||
resBoxes[0,0]=int(points[0])
|
||||
resBoxes[0,4]=int(points[1])
|
||||
resBoxes[0,1]=int(points[2])
|
||||
resBoxes[0,5]=int(points[3])
|
||||
resBoxes[0,2]=int(points[4])
|
||||
resBoxes[0,6]=int(points[5])
|
||||
resBoxes[0,3]=int(points[6])
|
||||
resBoxes[0,7]=int(points[7])
|
||||
pointMat = resBoxes[0].reshape([2,4]).T
|
||||
return plg.Polygon( pointMat)
|
||||
|
||||
def rectangle_to_polygon(rect):
|
||||
resBoxes=np.empty([1,8],dtype='int32')
|
||||
resBoxes[0,0]=int(rect.xmin)
|
||||
resBoxes[0,4]=int(rect.ymax)
|
||||
resBoxes[0,1]=int(rect.xmin)
|
||||
resBoxes[0,5]=int(rect.ymin)
|
||||
resBoxes[0,2]=int(rect.xmax)
|
||||
resBoxes[0,6]=int(rect.ymin)
|
||||
resBoxes[0,3]=int(rect.xmax)
|
||||
resBoxes[0,7]=int(rect.ymax)
|
||||
|
||||
pointMat = resBoxes[0].reshape([2,4]).T
|
||||
|
||||
return plg.Polygon( pointMat)
|
||||
|
||||
def rectangle_to_points(rect):
|
||||
points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
|
||||
return points
|
||||
|
||||
def get_union(pD,pG):
|
||||
areaA = pD.area();
|
||||
areaB = pG.area();
|
||||
return areaA + areaB - get_intersection(pD, pG);
|
||||
|
||||
def get_intersection_over_union(pD,pG):
|
||||
try:
|
||||
return get_intersection(pD, pG) / get_union(pD, pG);
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_intersection(pD,pG):
|
||||
pInt = pD & pG
|
||||
if len(pInt) == 0:
|
||||
return 0
|
||||
return pInt.area()
|
||||
|
||||
def compute_ap(confList, matchList,numGtCare):
|
||||
correct = 0
|
||||
AP = 0
|
||||
if len(confList)>0:
|
||||
confList = np.array(confList)
|
||||
matchList = np.array(matchList)
|
||||
sorted_ind = np.argsort(-confList)
|
||||
confList = confList[sorted_ind]
|
||||
matchList = matchList[sorted_ind]
|
||||
for n in range(len(confList)):
|
||||
match = matchList[n]
|
||||
if match:
|
||||
correct += 1
|
||||
AP += float(correct)/(n + 1)
|
||||
|
||||
if numGtCare>0:
|
||||
AP /= numGtCare
|
||||
|
||||
return AP
|
||||
|
||||
def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True):
|
||||
|
||||
if onlyRemoveFirstLastCharacterGT:
|
||||
#special characters in GT are allowed only at initial or final position
|
||||
if (transGt==transDet):
|
||||
return True
|
||||
|
||||
if specialCharacters.find(transGt[0])>-1:
|
||||
if transGt[1:]==transDet:
|
||||
return True
|
||||
|
||||
if specialCharacters.find(transGt[-1])>-1:
|
||||
if transGt[0:len(transGt)-1]==transDet:
|
||||
return True
|
||||
|
||||
if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1:
|
||||
if transGt[1:len(transGt)-1]==transDet:
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
#Special characters are removed from the begining and the end of both Detection and GroundTruth
|
||||
while len(transGt)>0 and specialCharacters.find(transGt[0])>-1:
|
||||
transGt = transGt[1:]
|
||||
|
||||
while len(transDet)>0 and specialCharacters.find(transDet[0])>-1:
|
||||
transDet = transDet[1:]
|
||||
|
||||
while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 :
|
||||
transGt = transGt[0:len(transGt)-1]
|
||||
|
||||
while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1:
|
||||
transDet = transDet[0:len(transDet)-1]
|
||||
|
||||
return transGt == transDet
|
||||
|
||||
|
||||
def include_in_dictionary(transcription):
|
||||
"""
|
||||
Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care
|
||||
"""
|
||||
#special case 's at final
|
||||
if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
|
||||
transcription = transcription[0:len(transcription)-2]
|
||||
|
||||
#hypens at init or final of the word
|
||||
transcription = transcription.strip('-');
|
||||
|
||||
specialCharacters = "'!?.:,*\"()·[]/";
|
||||
for character in specialCharacters:
|
||||
transcription = transcription.replace(character,' ')
|
||||
|
||||
transcription = transcription.strip()
|
||||
|
||||
if len(transcription) != len(transcription.replace(" ","")) :
|
||||
return False;
|
||||
|
||||
if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']:
|
||||
return False;
|
||||
|
||||
notAllowed = "×÷·";
|
||||
|
||||
range1 = [ ord(u'a'), ord(u'z') ]
|
||||
range2 = [ ord(u'A'), ord(u'Z') ]
|
||||
range3 = [ ord(u'À'), ord(u'ƿ') ]
|
||||
range4 = [ ord(u'DŽ'), ord(u'ɿ') ]
|
||||
range5 = [ ord(u'Ά'), ord(u'Ͽ') ]
|
||||
range6 = [ ord(u'-'), ord(u'-') ]
|
||||
|
||||
for char in transcription :
|
||||
charCode = ord(char)
|
||||
if(notAllowed.find(char) != -1):
|
||||
return False
|
||||
|
||||
valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] )
|
||||
if valid == False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def include_in_dictionary_transcription(transcription):
|
||||
"""
|
||||
Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations
|
||||
"""
|
||||
#special case 's at final
|
||||
if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
|
||||
transcription = transcription[0:len(transcription)-2]
|
||||
|
||||
#hypens at init or final of the word
|
||||
transcription = transcription.strip('-');
|
||||
|
||||
specialCharacters = "'!?.:,*\"()·[]/";
|
||||
for character in specialCharacters:
|
||||
transcription = transcription.replace(character,' ')
|
||||
|
||||
transcription = transcription.strip()
|
||||
|
||||
return transcription
|
||||
|
||||
perSampleMetrics = {}
|
||||
|
||||
matchedSum = 0
|
||||
|
||||
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
|
||||
|
||||
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
|
||||
subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
|
||||
|
||||
numGlobalCareGt = 0;
|
||||
numGlobalCareDet = 0;
|
||||
|
||||
arrGlobalConfidences = [];
|
||||
arrGlobalMatches = [];
|
||||
|
||||
for resFile in gt:
|
||||
|
||||
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
|
||||
if (gtFile is None) :
|
||||
raise Exception("The file %s is not UTF-8" %resFile)
|
||||
|
||||
recall = 0
|
||||
precision = 0
|
||||
hmean = 0
|
||||
detCorrect = 0
|
||||
iouMat = np.empty([1,1])
|
||||
gtPols = []
|
||||
detPols = []
|
||||
gtTrans = []
|
||||
detTrans = []
|
||||
gtPolPoints = []
|
||||
detPolPoints = []
|
||||
gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care
|
||||
detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT
|
||||
detMatchedNums = []
|
||||
pairs = []
|
||||
|
||||
arrSampleConfidences = [];
|
||||
arrSampleMatch = [];
|
||||
sampleAP = 0;
|
||||
|
||||
evaluationLog = ""
|
||||
|
||||
pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False)
|
||||
for n in range(len(pointsList)):
|
||||
points = pointsList[n]
|
||||
transcription = transcriptionsList[n]
|
||||
dontCare = transcription == "###"
|
||||
if evaluationParams['LTRB']:
|
||||
gtRect = Rectangle(*points)
|
||||
gtPol = rectangle_to_polygon(gtRect)
|
||||
else:
|
||||
gtPol = polygon_from_points(points)
|
||||
gtPols.append(gtPol)
|
||||
gtPolPoints.append(points)
|
||||
|
||||
#On word spotting we will filter some transcriptions with special characters
|
||||
if evaluationParams['WORD_SPOTTING'] :
|
||||
if dontCare == False :
|
||||
if include_in_dictionary(transcription) == False :
|
||||
dontCare = True
|
||||
else:
|
||||
transcription = include_in_dictionary_transcription(transcription)
|
||||
|
||||
gtTrans.append(transcription)
|
||||
if dontCare:
|
||||
gtDontCarePolsNum.append( len(gtPols)-1 )
|
||||
|
||||
evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n")
|
||||
|
||||
if resFile in subm:
|
||||
|
||||
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
|
||||
|
||||
pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
|
||||
|
||||
for n in range(len(pointsList)):
|
||||
points = pointsList[n]
|
||||
transcription = transcriptionsList[n]
|
||||
|
||||
if evaluationParams['LTRB']:
|
||||
detRect = Rectangle(*points)
|
||||
detPol = rectangle_to_polygon(detRect)
|
||||
else:
|
||||
detPol = polygon_from_points(points)
|
||||
detPols.append(detPol)
|
||||
detPolPoints.append(points)
|
||||
detTrans.append(transcription)
|
||||
|
||||
if len(gtDontCarePolsNum)>0 :
|
||||
for dontCarePol in gtDontCarePolsNum:
|
||||
dontCarePol = gtPols[dontCarePol]
|
||||
intersected_area = get_intersection(dontCarePol,detPol)
|
||||
pdDimensions = detPol.area()
|
||||
precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
|
||||
if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ):
|
||||
detDontCarePolsNum.append( len(detPols)-1 )
|
||||
break
|
||||
|
||||
evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n")
|
||||
|
||||
if len(gtPols)>0 and len(detPols)>0:
|
||||
#Calculate IoU and precision matrixs
|
||||
outputShape=[len(gtPols),len(detPols)]
|
||||
iouMat = np.empty(outputShape)
|
||||
gtRectMat = np.zeros(len(gtPols),np.int8)
|
||||
detRectMat = np.zeros(len(detPols),np.int8)
|
||||
for gtNum in range(len(gtPols)):
|
||||
for detNum in range(len(detPols)):
|
||||
pG = gtPols[gtNum]
|
||||
pD = detPols[detNum]
|
||||
iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG)
|
||||
|
||||
for gtNum in range(len(gtPols)):
|
||||
for detNum in range(len(detPols)):
|
||||
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :
|
||||
if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']:
|
||||
gtRectMat[gtNum] = 1
|
||||
detRectMat[detNum] = 1
|
||||
#detection matched only if transcription is equal
|
||||
if evaluationParams['WORD_SPOTTING']:
|
||||
correct = gtTrans[gtNum].upper() == detTrans[detNum].upper()
|
||||
else:
|
||||
correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True
|
||||
detCorrect += (1 if correct else 0)
|
||||
if correct:
|
||||
detMatchedNums.append(detNum)
|
||||
pairs.append({'gt':gtNum,'det':detNum,'correct':correct})
|
||||
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n"
|
||||
|
||||
if evaluationParams['CONFIDENCES']:
|
||||
for detNum in range(len(detPols)):
|
||||
if detNum not in detDontCarePolsNum :
|
||||
#we exclude the don't care detections
|
||||
match = detNum in detMatchedNums
|
||||
|
||||
arrSampleConfidences.append(confidencesList[detNum])
|
||||
arrSampleMatch.append(match)
|
||||
|
||||
arrGlobalConfidences.append(confidencesList[detNum]);
|
||||
arrGlobalMatches.append(match);
|
||||
|
||||
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
|
||||
numDetCare = (len(detPols) - len(detDontCarePolsNum))
|
||||
if numGtCare == 0:
|
||||
recall = float(1)
|
||||
precision = float(0) if numDetCare >0 else float(1)
|
||||
sampleAP = precision
|
||||
else:
|
||||
recall = float(detCorrect) / numGtCare
|
||||
precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare
|
||||
if evaluationParams['CONFIDENCES']:
|
||||
sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )
|
||||
|
||||
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
|
||||
|
||||
matchedSum += detCorrect
|
||||
numGlobalCareGt += numGtCare
|
||||
numGlobalCareDet += numDetCare
|
||||
|
||||
perSampleMetrics[resFile] = {
|
||||
'precision':precision,
|
||||
'recall':recall,
|
||||
'hmean':hmean,
|
||||
'pairs':pairs,
|
||||
'AP':sampleAP,
|
||||
'iouMat':[] if len(detPols)>100 else iouMat.tolist(),
|
||||
'gtPolPoints':gtPolPoints,
|
||||
'detPolPoints':detPolPoints,
|
||||
'gtTrans':gtTrans,
|
||||
'detTrans':detTrans,
|
||||
'gtDontCare':gtDontCarePolsNum,
|
||||
'detDontCare':detDontCarePolsNum,
|
||||
'evaluationParams': evaluationParams,
|
||||
'evaluationLog': evaluationLog
|
||||
}
|
||||
|
||||
# Compute AP
|
||||
AP = 0
|
||||
if evaluationParams['CONFIDENCES']:
|
||||
AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)
|
||||
|
||||
methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt
|
||||
methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet
|
||||
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
|
||||
|
||||
methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP }
|
||||
|
||||
resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
|
||||
|
||||
|
||||
return resDict;
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
|
||||
rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)
|
Binary file not shown.
BIN
OCRBench_v2/eval_scripts/spotting_eval/submit.zip
Normal file
BIN
OCRBench_v2/eval_scripts/spotting_eval/submit.zip
Normal file
Binary file not shown.
@@ -0,0 +1 @@
|
||||
0,0,1000,0,1000,1000,0,1000,CHEROKEE STREET BIKES
|
Reference in New Issue
Block a user