This commit is contained in:
echo840
2024-01-16 17:26:24 +08:00
parent 9f0ffdf13c
commit 15218c3a03
500 changed files with 479753 additions and 4082266 deletions

150
scripts/GPT4V.py Normal file
View File

@@ -0,0 +1,150 @@
import base64
import requests
from tqdm import tqdm
import json
from PIL import Image
import random
import time
import pathlib
import textwrap
from argparse import ArgumentParser
import google.generativeai as genai
import json
from PIL import Image
from IPython.display import display
from IPython.display import Markdown
from tqdm import tqdm
import os
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_path", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--OPENAI_API_KEY", type=str, default="")
parser.add_argument("--API_BASE", type=str, default="")
parser.add_argument("--model", type=str, default="gpt-4-vision-preview")
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
if __name__ == "__main__":
args = _get_args()
if os.path.exists(os.path.join(args.output_path,f"{args.model}.json")):
data_path = os.path.join(args.output_path,f"{args.model}.json")
else:
data_path = args.OCRBench_file
with open(data_path,"r") as f:
data = json.load(f)
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
question = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
base64_image = encode_image(img_path)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {args.OPENAI_API_KEY}"
}
payload = {
"model": args.model,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"{question}"
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 500
}
try:
response = requests.post(args.API_BASE, headers=headers, json=payload)
print(response.json())
answer = response.json()['choices'][0]['message']['content']
data[i]['predict'] = answer
save_json(data, os.path.join(args.output_path,f"{args.model}.json"))
except:
time.sleep(100)
print(f"{img_path} error")
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_path,f"{args.model}.json"))
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")

115
scripts/Genimi.py Normal file
View File

@@ -0,0 +1,115 @@
import pathlib
import textwrap
from argparse import ArgumentParser
import google.generativeai as genai
import json
from PIL import Image
from IPython.display import display
from IPython.display import Markdown
from tqdm import tqdm
import os
import sys
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_path", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--GOOGLE_API_KEY", type=str, default="")
parser.add_argument("--model", type=str, default="gemini-pro-vision")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = _get_args()
genai.configure(api_key=args.GOOGLE_API_KEY)
model = genai.GenerativeModel(args.model)
if os.path.exists(os.path.join(args.output_path,f"{args.model}.json")):
data_path = os.path.join(args.output_path,f"{args.model}.json")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
question = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
try:
img = Image.open(img_path).convert("RGB")
response = model.generate_content([question, img])
data[i]['predict'] = response.text
save_json(data, os.path.join(args.output_path,f"{args.model}.json"))
except:
print(f"{img_path}: API call failed.")
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_path,f"{args.model}.json"))
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")

210
scripts/LLaVA1_5.py Normal file
View File

@@ -0,0 +1,210 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
# https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/model_vqa_loader.py
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="liuhaotian/llava-v1.5-7b")
parser.add_argument("--model_base", type=str, default=None)
parser.add_argument("--save_name", type=str, default="llava1_5_7b")
parser.add_argument("--conv_mode", type=str, default="vicuna_v1")
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--temperature", type=float, default=0.0)
parser.add_argument("--top_p", type=float, default=None)
parser.add_argument("--num_beams", type=int, default=1)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
device = f"cuda:{eval_id}"
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model( model_path = model_path, model_base = args.model_base, model_name = model_name,device = device)
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
args.conv_mode = args.conv_mode + '_mmtag'
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
qs = qs+"\nAnswer the question using a single word or phrase."
if model.config.mm_use_im_start_end:
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
else:
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
image = Image.open(img_path).convert('RGB')
image_tensor = process_images([image], image_processor, model.config)
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0)
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2
input_ids = input_ids.to(device=device, non_blocking=True)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor.to(dtype=torch.float16, device=device, non_blocking=True),
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
num_beams=args.num_beams,
max_new_tokens=128,
use_cache=True)
input_token_len = input_ids.shape[1]
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
if n_diff_input_output > 0:
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
outputs = outputs.strip()
if outputs.endswith(stop_str):
outputs = outputs[:-len(stop_str)]
outputs = outputs.strip()
data[i]['predict'] = outputs
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

163
scripts/blip2.py Normal file
View File

@@ -0,0 +1,163 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from transformers import Blip2Processor, Blip2ForConditionalGeneration
import torch
# https://huggingface.co/Salesforce/blip2-opt-6.7b
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="./model_weights/blip2-opt-6.7b")
parser.add_argument("--save_name", type=str, default="blip2_opt_6_7b")
parser.add_argument("--num_workers", type=int, default=8)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
processor = Blip2Processor.from_pretrained(args.model_path)
model = Blip2ForConditionalGeneration.from_pretrained(args.model_path, load_in_8bit=False, device_map={"": eval_id}, torch_dtype=torch.float16)
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
image = Image.open(img_path).convert("RGB")
prompt = f"Question: {qs} Answer:"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(device=f"cuda:{eval_id}", dtype=torch.float16)
generated_ids = model.generate(**inputs)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
data[i]['predict'] = generated_text
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

View File

@@ -0,0 +1,175 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
# https://huggingface.co/Salesforce/instructblip-vicuna-7b
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="./model_weights/instructblip-vicuna-7b")
parser.add_argument("--save_name", type=str, default="instructblip_vicuna_7b")
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--temperature", type=float, default=0.0)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
device = f"cuda:{eval_id}"
model = InstructBlipForConditionalGeneration.from_pretrained(args.model_path)
processor = InstructBlipProcessor.from_pretrained(args.model_path)
model.to(device)
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
image = Image.open(img_path).convert('RGB')
inputs = processor(images=image, text=qs, return_tensors="pt").to(device)
outputs = model.generate(
**inputs,
do_sample=False,
num_beams=5,
max_length=100,
min_length=1,
top_p=0.9,
repetition_penalty=1.5,
length_penalty=1.0,
temperature=0,
)
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
data[i]['predict'] = generated_text
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

179
scripts/bliva.py Normal file
View File

@@ -0,0 +1,179 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from bliva.models import load_model_and_preprocess
import numpy as np
# https://github.com/mlpc-ucsd/BLIVA/blob/main/evaluate.py
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="bliva_vicuna")
parser.add_argument("--save_name", type=str, default="bliva")
parser.add_argument("--num_workers", type=int, default=8)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
device = f"cuda:{eval_id}"
np.random.seed(0)
disable_torch_init()
if "vicuna" in args.model_path.lower():
print("load bliva-vicuna")
model, vis_processors, _ = load_model_and_preprocess(name=args.model_path, model_type="vicuna7b", is_eval=True, device=device)
if "flant5xxl" in args.model_path.lower():
print("load bliva-flant5xxl")
model, vis_processors, _ = load_model_and_preprocess(name=args.model_path, model_type="flant5xxl", is_eval=True, device=device)
vis_processor = vis_processors["eval"]
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
image = Image.open(img_path).convert('RGB')
question = [qs]
image = vis_processor(image).unsqueeze(0).to(device)
outputs = model.generate({"image": image, "prompt": qs}, max_length=150)
data[i]['predict'] = outputs[0].split('### Assistant:')[0]
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

330
scripts/llavar.py Normal file
View File

@@ -0,0 +1,330 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
from llava import LlavaLlamaForCausalLM
from llava.conversation import conv_templates
from llava import conversation as conversation_lib
from llava.utils import disable_torch_init
from transformers import CLIPVisionModel, CLIPImageProcessor, StoppingCriteria
from PIL import Image,ImageOps
# https://github.com/SALT-NLP/LLaVAR/blob/main/LLaVA/llava/eval/model_vqa.py
def resize_image(image, target_size):
width, height = image.size
aspect_ratio = width / height
if aspect_ratio > 1:
new_width = target_size[0]
new_height = int(new_width / aspect_ratio)
else:
new_height = target_size[1]
new_width = int(new_height * aspect_ratio)
image = image.resize((new_width, new_height))
width_diff = target_size[0] - image.size[0]
height_diff = target_size[1] - image.size[1]
left_padding = 0
top_padding = 0
right_padding = width_diff - left_padding
bottom_padding = height_diff - top_padding
padded_image = ImageOps.expand(image, border=(left_padding, top_padding, right_padding, bottom_padding), fill=0)
return padded_image
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def patch_config(config):
patch_dict = {
"use_mm_proj": True,
"mm_vision_tower": "openai/clip-vit-large-patch14",
"mm_hidden_size": 1024
}
cfg = AutoConfig.from_pretrained(config)
if not hasattr(cfg, "mm_vision_tower"):
print(f'`mm_vision_tower` not found in `{config}`, applying patch and save to disk.')
for k, v in patch_dict.items():
setattr(cfg, k, v)
cfg.save_pretrained(config)
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="./model_weights/LLaVar")
parser.add_argument("--save_name", type=str, default="llavar")
parser.add_argument("--conv-mode", type=str, default="llava_v1")
parser.add_argument("--mm-projector", type=str, default=None)
parser.add_argument("--vision-tower", type=str, default=None)
parser.add_argument("--num_workers", type=int, default=8)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
device = f"cuda:{eval_id}"
disable_torch_init()
model_name = os.path.expanduser(args.model_path)
tokenizer = AutoTokenizer.from_pretrained(model_name)
if args.mm_projector is None:
patch_config(model_name)
model = LlavaLlamaForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to(device)
image_processor = CLIPImageProcessor.from_pretrained(model.config.mm_vision_tower, torch_dtype=torch.float16)
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
vision_tower = model.model.vision_tower[0]
vision_tower.to(device=device, dtype=torch.float16)
vision_config = vision_tower.config
vision_config.im_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_IMAGE_PATCH_TOKEN])[0]
vision_config.use_im_start_end = mm_use_im_start_end
if mm_use_im_start_end:
vision_config.im_start_token, vision_config.im_end_token = tokenizer.convert_tokens_to_ids([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN])
image_token_len = (vision_config.image_size // vision_config.patch_size) ** 2
else:
# in case of using a pretrained model with only a MLP projector weights
model = LlavaLlamaForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to(device)
vision_tower = CLIPVisionModel.from_pretrained(args.vision_tower, torch_dtype=torch.float16).to(device)
image_processor = CLIPImageProcessor.from_pretrained(args.vision_tower, torch_dtype=torch.float16)
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
vision_config = vision_tower.config
vision_config.im_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_IMAGE_PATCH_TOKEN])[0]
vision_config.use_im_start_end = mm_use_im_start_end
if mm_use_im_start_end:
vision_config.im_start_token, vision_config.im_end_token = tokenizer.convert_tokens_to_ids([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN])
image_token_len = (vision_config.image_size // vision_config.patch_size) ** 2
mm_projector = torch.nn.Linear(vision_config.hidden_size, model.config.hidden_size)
mm_projector_weights = torch.load(args.mm_projector, map_location='cpu')
mm_projector.load_state_dict({k.split('.')[-1]: v for k, v in mm_projector_weights.items()})
model.model.mm_projector = mm_projector.to(device).half()
model.model.vision_tower = [vision_tower]
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
# qs = qs+"\nAnswer the question using a single word or phrase."
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
if mm_use_im_start_end:
qs = qs + '\n' + DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN * image_token_len + DEFAULT_IM_END_TOKEN
else:
qs = qs + '\n' + DEFAULT_IMAGE_PATCH_TOKEN * image_token_len
if args.conv_mode == 'simple_legacy':
qs += '\n\n### Response:'
# conv = default_conversation.copy()
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
# modified
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
inputs = tokenizer([prompt])
image = Image.open(img_path)
# if "REval" in args.image_folder:
image = resize_image(image, (336, 336))
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
input_ids = torch.as_tensor(inputs.input_ids).to(device)
# new stopping implementation
class KeywordsStoppingCriteria(StoppingCriteria):
def __init__(self, keywords, tokenizer, input_ids):
self.keywords = keywords
self.tokenizer = tokenizer
self.start_len = None
self.input_ids = input_ids
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
if self.start_len is None:
self.start_len = self.input_ids.shape[1]
else:
outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0]
for keyword in self.keywords:
if keyword in outputs:
return True
return False
# keywords = ['###']
# modified
keywords = ['</s>']
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor.unsqueeze(0).half().to(device),
do_sample=False,
temperature=0,
max_new_tokens=200,
stopping_criteria=[stopping_criteria])
input_token_len = input_ids.shape[1]
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
if n_diff_input_output > 0:
print(f'[Warning] Sample {i}: {n_diff_input_output} output_ids are not the same as the input_ids')
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
# modified
if args.conv_mode == 'simple_legacy' or args.conv_mode == 'simple':
while True:
cur_len = len(outputs)
outputs = outputs.strip()
for pattern in ['###', 'Assistant:', 'Response:']:
if outputs.startswith(pattern):
outputs = outputs[len(pattern):].strip()
if len(outputs) == cur_len:
break
if conv.sep_style == conversation_lib.SeparatorStyle.TWO:
sep = conv.sep2
else:
sep = conv.sep
try:
index = outputs.index(sep)
except ValueError:
outputs += sep
index = outputs.index(sep)
outputs = outputs[:index].strip()
data[i]['predict'] = outputs
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

185
scripts/mPLUG-owl.py Normal file
View File

@@ -0,0 +1,185 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
import sys
sys.path.append("./scripts/mPLUG-Owl/mPLUG-Owl/")
from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration
from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer
from mplug_owl.processing_mplug_owl import MplugOwlImageProcessor, MplugOwlProcessor
# https://github.com/X-PLUG/mPLUG-Owl/tree/main/mPLUG-Owl
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="./model_weights/mplug-owl")
parser.add_argument("--save_name", type=str, default="mplug-owl")
parser.add_argument("--num_workers", type=int, default=8)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
pretrained_ckpt = args.model_path
model = MplugOwlForConditionalGeneration.from_pretrained(
pretrained_ckpt,
torch_dtype=torch.bfloat16,
)
model.to(f"cuda:{eval_id}")
image_processor = MplugOwlImageProcessor.from_pretrained(pretrained_ckpt)
tokenizer = MplugOwlTokenizer.from_pretrained(pretrained_ckpt)
processor = MplugOwlProcessor(image_processor, tokenizer)
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
prompts = [
f'''The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
Human: <image>
Human: {qs}
AI: ''']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
generate_kwargs = {
'do_sample': False,
'top_k': 1,
'max_length': 100
}
images = [Image.open(img_path)]
inputs = processor(text=prompts, images=images, return_tensors='pt')
inputs = {k: v.bfloat16() if v.dtype == torch.float else v for k, v in inputs.items()}
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
res = model.generate(**inputs, **generate_kwargs)
sentence = tokenizer.decode(res.tolist()[0], skip_special_tokens=True)
data[i]['predict'] = sentence
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

191
scripts/mPLUG-owl2.py Normal file
View File

@@ -0,0 +1,191 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from transformers import TextStreamer
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
# https://github.com/X-PLUG/mPLUG-Owl/tree/main/mPLUG-Owl2
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="./model_weights/mplug-owl2")
parser.add_argument("--save_name", type=str, default="mplug-owl2")
parser.add_argument("--conv_mode", type=str, default="mplug_owl2")
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--temperature", type=float, default=0.0)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, None, model_name, load_8bit=False, load_4bit=False, device=f"cuda:{eval_id}")
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
conv = conv_templates[args.conv_mode].copy()
roles = conv.roles
image = Image.open(img_path).convert('RGB')
max_edge = max(image.size) # We recommand you to resize to squared image for BEST performance.
image = image.resize((max_edge, max_edge))
image_tensor = process_images([image], image_processor)
image_tensor = image_tensor.to(model.device, dtype=torch.float16)
inp = DEFAULT_IMAGE_TOKEN + qs
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
stop_str = conv.sep2
keywords = [stop_str]
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor,
do_sample=False,
temperature=args.temperature,
max_new_tokens=100,
streamer=streamer,
use_cache=True,
stopping_criteria=[stopping_criteria])
outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()
data[i]['predict'] = outputs
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

187
scripts/minigpt4v2.py Normal file
View File

@@ -0,0 +1,187 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
import sys
sys.path.append("./scripts/MiniGPT-4/")
from minigpt4.common.eval_utils import prepare_texts, init_model, eval_parser
from minigpt4.conversation.conversation import CONV_VISION_minigptv2
from minigpt4.common.config import Config
import random
# https://github.com/Vision-CAIR/MiniGPT-4/blob/main/eval_scripts/eval_vqa.py
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--cfg-path", default='./scripts/MiniGPT-4/eval_configs/minigptv2_eval.yaml')
parser.add_argument("--save_name", type=str, default="minigptv2")
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--temperature", type=float, default=0.0)
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
device = f'cuda:{eval_id}'
cfg = Config(args)
model, vis_processor = init_model(args, device)
conv_temp = CONV_VISION_minigptv2.copy()
conv_temp.system = ""
model.eval()
instruction_pool = [
"[vqa] {}"
]
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
if data[i].get("predict", 0)!=0:
print(f"{img_path} predict exist, continue.")
continue
image = Image.open(img_path).convert("RGB")
image = vis_processor(image)
image = image.unsqueeze(0).to(device)
# question = self.text_processor(qs)
instruction = random.choice(instruction_pool).format(qs)
instruction = "<Img><ImageHere></Img> {} ".format(instruction)
texts = prepare_texts(instruction, conv_temp) # warp the texts with conversation template
answers = model.generate(image, texts, max_new_tokens=100, do_sample=False)
data[i]['predict'] = answers[0]
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")

182
scripts/monkey.py Normal file
View File

@@ -0,0 +1,182 @@
import json
from argparse import ArgumentParser
import torch
import os
import json
from tqdm import tqdm
from PIL import Image
import math
import multiprocessing
from multiprocessing import Pool, Queue, Manager
from transformers import AutoModelForCausalLM, AutoTokenizer
# https://github.com/Yuliang-Liu/Monkey
def split_list(lst, n):
length = len(lst)
avg = length // n # 每份的大小
result = [] # 存储分割后的子列表
for i in range(n - 1):
result.append(lst[i*avg:(i+1)*avg])
result.append(lst[(n-1)*avg:])
return result
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file,indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./data")
parser.add_argument("--output_folder", type=str, default="./results")
parser.add_argument("--OCRBench_file", type=str, default="./OCRBench/OCRBench.json")
parser.add_argument("--model_path", type=str, default="echo840/Monkey")
parser.add_argument("--save_name", type=str, default="monkey")
parser.add_argument("--num_workers", type=int, default=8)
args = parser.parse_args()
return args
OCRBench_score = {"Regular Text Recognition":0,"Irregular Text Recognition":0,"Artistic Text Recognition":0,"Handwriting Recognition":0,
"Digit String Recognition":0,"Non-Semantic Text Recognition":0,"Scene Text-centric VQA":0,"Doc-oriented VQA":0,"Doc-oriented VQA":0,
"Key Information Extraction":0,"Handwritten Mathematical Expression Recognition":0}
AllDataset_score = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
num_all = {"IIIT5K":0,"svt":0,"IC13_857":0,"IC15_1811":0,"svtp":0,"ct80":0,"cocotext":0,"ctw":0,"totaltext":0,"HOST":0,"WOST":0,"WordArt":0,"IAM":0,"ReCTS":0,"ORAND":0,"NonSemanticText":0,"SemanticText":0,
"STVQA":0,"textVQA":0,"ocrVQA":0,"ESTVQA":0,"ESTVQA_cn":0,"docVQA":0,"infographicVQA":0,"ChartQA":0,"ChartQA_Human":0,"FUNSD":0,"SROIE":0,"POIE":0,"HME100k":0}
def eval_worker(args, data, eval_id, output_queue):
print(f"Process {eval_id} start.")
checkpoint = args.model_path
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda', trust_remote_code=True).eval()
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
tokenizer.padding_side = 'left'
tokenizer.pad_token_id = tokenizer.eod_id
for i in tqdm(range(len(data))):
img_path = os.path.join(args.image_folder, data[i]['image_path'])
qs = data[i]['question']
query = f'<img>{img_path}</img> {qs} Answer: '
input_ids = tokenizer(query, return_tensors='pt', padding='longest')
attention_mask = input_ids.attention_mask
input_ids = input_ids.input_ids
pred = model.generate(
input_ids=input_ids.to(f'cuda:{eval_id}'),
attention_mask=attention_mask.to(f'cuda:{eval_id}'),
do_sample=False,
num_beams=1,
max_new_tokens=100,
min_new_tokens=1,
length_penalty=1,
num_return_sequences=1,
output_hidden_states=True,
use_cache=True,
pad_token_id=tokenizer.eod_id,
eos_token_id=tokenizer.eod_id,
)
response = tokenizer.decode(pred[0][input_ids.size(1):].cpu(), skip_special_tokens=True).strip()
data[i]['predict'] = response
output_queue.put({eval_id: data})
print(f"Process {eval_id} has completed.")
if __name__=="__main__":
multiprocessing.set_start_method('spawn')
args = _get_args()
if os.path.exists(os.path.join(args.output_folder,f"{args.save_name}.json")):
data_path = os.path.join(args.output_folder,f"{args.save_name}.json")
print(f"output_path:{data_path} exist! Only generate the results that were not generated in {data_path}.")
else:
data_path = args.OCRBench_file
with open(data_path, "r") as f:
data = json.load(f)
data_list = split_list(data, args.num_workers)
output_queue = Manager().Queue()
pool = Pool(processes=args.num_workers)
for i in range(len(data_list)):
pool.apply_async(eval_worker, args=(args, data_list[i], i, output_queue))
pool.close()
pool.join()
results = {}
while not output_queue.empty():
result = output_queue.get()
results.update(result)
data = []
for i in range(len(data_list)):
data.extend(results[i])
for i in range(len(data)):
data_type = data[i]["type"]
dataset_name = data[i]["dataset_name"]
answers = data[i]["answers"]
if data[i].get('predict',0)==0:
continue
predict = data[i]['predict']
data[i]['result'] = 0
if dataset_name == "HME100k":
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.strip().replace("\n"," ").replace(" ","")
predict = predict.strip().replace("\n"," ").replace(" ","")
if answers in predict:
data[i]['result'] = 1
else:
if type(answers)==list:
for j in range(len(answers)):
answer = answers[j].lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answer in predict:
data[i]['result'] = 1
else:
answers = answers.lower().strip().replace("\n"," ")
predict = predict.lower().strip().replace("\n"," ")
if answers in predict:
data[i]['result'] = 1
save_json(data, os.path.join(args.output_folder,f"{args.save_name}.json"))
if len(data)==1000:
for i in range(len(data)):
if data[i].get("result",100)==100:
continue
OCRBench_score[data[i]['type']] += data[i]['result']
recognition_score = OCRBench_score['Regular Text Recognition']+OCRBench_score['Irregular Text Recognition']+OCRBench_score['Artistic Text Recognition']+OCRBench_score['Handwriting Recognition']+OCRBench_score['Digit String Recognition']+OCRBench_score['Non-Semantic Text Recognition']
Final_score = recognition_score+OCRBench_score['Scene Text-centric VQA']+OCRBench_score['Doc-oriented VQA']+OCRBench_score['Key Information Extraction']+OCRBench_score['Handwritten Mathematical Expression Recognition']
print("###########################OCRBench##############################")
print(f"Text Recognition(Total 300):{recognition_score}")
print("------------------Details of Recognition Score-------------------")
print(f"Regular Text Recognition(Total 50): {OCRBench_score['Regular Text Recognition']}")
print(f"Irregular Text Recognition(Total 50): {OCRBench_score['Irregular Text Recognition']}")
print(f"Artistic Text Recognition(Total 50): {OCRBench_score['Artistic Text Recognition']}")
print(f"Handwriting Recognition(Total 50): {OCRBench_score['Handwriting Recognition']}")
print(f"Digit String Recognition(Total 50): {OCRBench_score['Digit String Recognition']}")
print(f"Non-Semantic Text Recognition(Total 50): {OCRBench_score['Non-Semantic Text Recognition']}")
print("----------------------------------------------------------------")
print(f"Scene Text-centric VQA(Total 200): {OCRBench_score['Scene Text-centric VQA']}")
print("----------------------------------------------------------------")
print(f"Doc-oriented VQA(Total 200): {OCRBench_score['Doc-oriented VQA']}")
print("----------------------------------------------------------------")
print(f"Key Information Extraction(Total 200): {OCRBench_score['Key Information Extraction']}")
print("----------------------------------------------------------------")
print(f"Handwritten Mathematical Expression Recognition(Total 100): {OCRBench_score['Handwritten Mathematical Expression Recognition']}")
print("----------------------Final Score-------------------------------")
print(f"Final Score(Total 1000): {Final_score}")
else:
for i in range(len(data)):
num_all[data[i]['dataset_name']] += 1
if data[i].get("result",100)==100:
continue
AllDataset_score[data[i]['dataset_name']] += data[i]['result']
for key in AllDataset_score.keys():
print(f"{key}: {AllDataset_score[key]/float(num_all[key])}")